mirror of
https://github.com/OpenNebula/one.git
synced 2024-12-23 17:33:56 +03:00
Merge branch 'master' into feature-476
This commit is contained in:
commit
408d3f5fe1
@ -9,7 +9,7 @@ distributed data center infrastructures.
|
||||
|
||||
Complete documentation can be found at
|
||||
|
||||
http://opennebula.org/documentation:rel2.0
|
||||
http://opennebula.org/documentation:rel2.4
|
||||
|
||||
## INSTALLATION
|
||||
|
||||
@ -111,7 +111,7 @@ where **install_options** can be one or more of:
|
||||
|
||||
## CONFIGURATION
|
||||
|
||||
Information on how to configure OpenNebula is located at http://opennebula.org/documentation:rel2.0
|
||||
Information on how to configure OpenNebula is located at http://opennebula.org/documentation:rel2.4
|
||||
|
||||
|
||||
## CONTACT
|
||||
|
@ -205,6 +205,7 @@ build_scripts=[
|
||||
'src/um/SConstruct',
|
||||
'src/authm/SConstruct',
|
||||
'src/xml/SConstruct',
|
||||
'share/man/SConstruct'
|
||||
]
|
||||
|
||||
# Testing
|
||||
|
@ -39,6 +39,17 @@ public:
|
||||
attribute_name.end(),
|
||||
attribute_name.begin(),
|
||||
(int(*)(int))toupper);
|
||||
|
||||
// FIX Attribute name if it does not conform XML element
|
||||
// naming conventions
|
||||
|
||||
int size = attribute_name.size();
|
||||
|
||||
if ((size >0 && !isalpha(aname[0]))||
|
||||
(size >=3 && (aname[0]=='X' && aname[1]=='M' && aname[2]=='L')))
|
||||
{
|
||||
attribute_name.insert(0,"ONE_");
|
||||
}
|
||||
};
|
||||
|
||||
virtual ~Attribute(){};
|
||||
|
@ -457,13 +457,6 @@ private:
|
||||
*/
|
||||
void do_action(const string &name, void *args){};
|
||||
|
||||
/**
|
||||
* Base 64 encoding
|
||||
* @param in the string to encoded
|
||||
* @return a pointer to the encoded string (must be freed) or 0 in case of
|
||||
* error
|
||||
*/
|
||||
static string * base64_encode(const string& in);
|
||||
|
||||
};
|
||||
|
||||
|
@ -103,6 +103,15 @@ public:
|
||||
return source;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the source path of the image
|
||||
* @return source of image
|
||||
*/
|
||||
void set_source(const string& _source)
|
||||
{
|
||||
source = _source;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the type of the image
|
||||
* @return type
|
||||
@ -250,14 +259,6 @@ public:
|
||||
*/
|
||||
int disk_attribute(VectorAttribute * disk, int* index, ImageType* img_type);
|
||||
|
||||
/**
|
||||
* Generates the source path for the repository.
|
||||
* @param uid of the image owner
|
||||
* @param name of the image
|
||||
* @return source for the image
|
||||
*/
|
||||
static string generate_source(int uid, const string& name);
|
||||
|
||||
private:
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
|
@ -70,12 +70,13 @@ private:
|
||||
*/
|
||||
//Template driver_conf;
|
||||
|
||||
void cp(int oid, const string& source, const string& destination) const;
|
||||
void cp(int oid, const string& source) const;
|
||||
|
||||
/**
|
||||
* Sends a move request to the MAD: "MV IMAGE_ID SRC_PATH DST_PATH"
|
||||
* @param oid the image id.
|
||||
* @param destination is the path to the image to be created
|
||||
* @param destination is a driver specific location or "-" if not
|
||||
* initialized
|
||||
* @param size_mb of the image to be created
|
||||
*/
|
||||
void mv(int oid, const string& source, const string& destination) const;
|
||||
@ -83,12 +84,10 @@ private:
|
||||
/**
|
||||
* Sends a make filesystem request to the MAD: "MKFS IMAGE_ID PATH SIZE_MB"
|
||||
* @param oid the image id.
|
||||
* @param destination is the path to the image to be created
|
||||
* @param fs type
|
||||
* @param size_mb of the image to be created
|
||||
*/
|
||||
void mkfs(int oid,
|
||||
const string& destination,
|
||||
const string& fs,
|
||||
const string& size_mb) const;
|
||||
/**
|
||||
|
@ -39,7 +39,6 @@ class ImagePool : public PoolSQL
|
||||
public:
|
||||
|
||||
ImagePool(SqlDB * db,
|
||||
const string& _source_prefix,
|
||||
const string& _default_type,
|
||||
const string& _default_dev_prefix);
|
||||
|
||||
@ -146,11 +145,6 @@ public:
|
||||
*/
|
||||
void authorize_disk(VectorAttribute * disk, int uid, AuthRequest * ar);
|
||||
|
||||
static const string& source_prefix()
|
||||
{
|
||||
return _source_prefix;
|
||||
};
|
||||
|
||||
static const string& default_type()
|
||||
{
|
||||
return _default_type;
|
||||
@ -165,10 +159,6 @@ private:
|
||||
//--------------------------------------------------------------------------
|
||||
// Configuration Attributes for Images
|
||||
// -------------------------------------------------------------------------
|
||||
/**
|
||||
* Path to the image repository
|
||||
**/
|
||||
static string _source_prefix;
|
||||
|
||||
/**
|
||||
* Default image type
|
||||
|
@ -43,11 +43,11 @@ class MySqlDB : public SqlDB
|
||||
{
|
||||
public:
|
||||
|
||||
MySqlDB(const string& server,
|
||||
int port,
|
||||
const string& user,
|
||||
const string& password,
|
||||
const char * database);
|
||||
MySqlDB(const string& _server,
|
||||
int _port,
|
||||
const string& _user,
|
||||
const string& _password,
|
||||
const string& _database);
|
||||
|
||||
~MySqlDB();
|
||||
|
||||
@ -81,6 +81,19 @@ private:
|
||||
*/
|
||||
MYSQL * db;
|
||||
|
||||
/**
|
||||
* MySQL Connection parameters
|
||||
*/
|
||||
string server;
|
||||
|
||||
int port;
|
||||
|
||||
string user;
|
||||
|
||||
string password;
|
||||
|
||||
string database;
|
||||
|
||||
/**
|
||||
* Fine-grain mutex for DB access
|
||||
*/
|
||||
@ -113,7 +126,7 @@ public:
|
||||
int port,
|
||||
string user,
|
||||
string password,
|
||||
const char * database)
|
||||
string database)
|
||||
{
|
||||
throw runtime_error("Aborting oned, MySQL support not compiled!");
|
||||
};
|
||||
|
@ -37,7 +37,9 @@
|
||||
#include "AuthManager.h"
|
||||
#include "ImageManager.h"
|
||||
|
||||
class Nebula
|
||||
#include "Callbackable.h"
|
||||
|
||||
class Nebula : public Callbackable
|
||||
{
|
||||
public:
|
||||
|
||||
@ -222,6 +224,11 @@ public:
|
||||
return "OpenNebula 2.3.0";
|
||||
};
|
||||
|
||||
static int db_version()
|
||||
{
|
||||
return 1;
|
||||
};
|
||||
|
||||
void start();
|
||||
|
||||
void get_configuration_attribute(
|
||||
@ -424,6 +431,28 @@ private:
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
friend void nebula_signal_handler (int sig);
|
||||
|
||||
/**
|
||||
* Bootstraps the database control tables
|
||||
*/
|
||||
void bootstrap();
|
||||
|
||||
/**
|
||||
* Callback function to TODO
|
||||
* @param _loaded_db_version TODO
|
||||
* @param num the number of columns read from the DB
|
||||
* @param names the column names
|
||||
* @param vaues the column values
|
||||
* @return 0 on success
|
||||
*/
|
||||
int select_cb(void *_loaded_db_version, int num, char **values,
|
||||
char **names);
|
||||
|
||||
/*
|
||||
* TODO
|
||||
* @return 0 ok, -1 version mismatch, -2 needs bootstrap
|
||||
*/
|
||||
int check_db_version();
|
||||
};
|
||||
|
||||
#endif /*NEBULA_H_*/
|
||||
|
@ -104,6 +104,14 @@ public:
|
||||
pthread_mutex_unlock(&mutex);
|
||||
};
|
||||
|
||||
/**
|
||||
* Function to print the object into a string in XML format
|
||||
* base64 encoded
|
||||
* @param xml the resulting XML string
|
||||
* @return a reference to the generated string
|
||||
*/
|
||||
virtual string& to_xml64(string &xml64);
|
||||
|
||||
/**
|
||||
* Function to print the object into a string in XML format
|
||||
* @param xml the resulting XML string
|
||||
@ -189,11 +197,10 @@ public:
|
||||
const string& name,
|
||||
const string& value)
|
||||
{
|
||||
SingleAttribute * sattr;
|
||||
SingleAttribute * sattr = new SingleAttribute(name,value);
|
||||
|
||||
obj_template->erase(name);
|
||||
obj_template->erase(sattr->name());
|
||||
|
||||
sattr = new SingleAttribute(name,value);
|
||||
obj_template->set(sattr);
|
||||
|
||||
return 0;
|
||||
|
@ -47,7 +47,7 @@ public:
|
||||
* counter). If null the OID counter is not updated.
|
||||
* @param with_uid the Pool objects have an owner id (uid)
|
||||
*/
|
||||
PoolSQL(SqlDB * _db, const char * table);
|
||||
PoolSQL(SqlDB * _db, const char * _table);
|
||||
|
||||
virtual ~PoolSQL();
|
||||
|
||||
@ -189,6 +189,11 @@ private:
|
||||
*/
|
||||
int lastOID;
|
||||
|
||||
/**
|
||||
* Tablename for this pool
|
||||
*/
|
||||
string table;
|
||||
|
||||
/**
|
||||
* The pool is implemented with a Map of SQL object pointers, using the
|
||||
* OID as key.
|
||||
@ -253,6 +258,11 @@ private:
|
||||
return key.str();
|
||||
};
|
||||
|
||||
/**
|
||||
* Inserts the last oid into the pool_control table
|
||||
*/
|
||||
void update_lastOID();
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
|
51
include/SSLTools.h
Normal file
51
include/SSLTools.h
Normal file
@ -0,0 +1,51 @@
|
||||
/* ------------------------------------------------------------------------ */
|
||||
/* Copyright 2002-2011, OpenNebula Project Leads (OpenNebula.org) */
|
||||
/* */
|
||||
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
||||
/* not use this file except in compliance with the License. You may obtain */
|
||||
/* a copy of the License at */
|
||||
/* */
|
||||
/* http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
/* */
|
||||
/* Unless required by applicable law or agreed to in writing, software */
|
||||
/* distributed under the License is distributed on an "AS IS" BASIS, */
|
||||
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
|
||||
/* See the License for the specific language governing permissions and */
|
||||
/* limitations under the License. */
|
||||
/* -------------------------------------------------------------------------*/
|
||||
|
||||
#ifndef SSL_TOOLS_H_
|
||||
#define SSL_TOOLS_H_
|
||||
|
||||
#include <string>
|
||||
|
||||
using namespace std;
|
||||
|
||||
/**
|
||||
* The SSLTools class provides a simple interface to common SSL utils used
|
||||
* in OpenNebula
|
||||
*/
|
||||
class SSLTools
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* sha1 digest
|
||||
* @param in the string to be hashed
|
||||
* @return sha1 hash of str
|
||||
*/
|
||||
static string sha1_digest(const string& in);
|
||||
|
||||
/**
|
||||
* Base 64 encoding
|
||||
* @param in the string to encoded
|
||||
* @return a pointer to the encoded string (must be freed) or 0 in case of
|
||||
* error
|
||||
*/
|
||||
static string * base64_encode(const string& in);
|
||||
|
||||
private:
|
||||
SSLTools(){};
|
||||
~SSLTools(){};
|
||||
};
|
||||
|
||||
#endif /*SSL_TOOLS_H_*/
|
@ -94,13 +94,6 @@ public:
|
||||
**/
|
||||
static int split_secret(const string secret, string& user, string& pass);
|
||||
|
||||
/**
|
||||
* "Encrypts" the password with SHA1 digest
|
||||
* @param password
|
||||
* @return sha1 encrypted password
|
||||
*/
|
||||
static string sha1_digest(const string& pass);
|
||||
|
||||
private:
|
||||
// -------------------------------------------------------------------------
|
||||
// Friends
|
||||
|
@ -97,7 +97,6 @@ public:
|
||||
virtual UserPool* create_upool(SqlDB* db);
|
||||
|
||||
virtual ImagePool* create_ipool( SqlDB* db,
|
||||
string repository_path,
|
||||
string default_image_type,
|
||||
string default_device_prefix);
|
||||
|
||||
|
@ -57,7 +57,7 @@ public:
|
||||
if (mysql)
|
||||
{
|
||||
db = new MySqlDB( "localhost",0,
|
||||
"oneadmin","oneadmin",NULL);
|
||||
"oneadmin","oneadmin",db_name);
|
||||
|
||||
ostringstream oss1;
|
||||
oss1 << "DROP DATABASE IF EXISTS " << db_name;
|
||||
|
90
install.sh
90
install.sh
@ -31,8 +31,8 @@ usage() {
|
||||
echo "Usage: install.sh [-u install_user] [-g install_group] [-k keep conf]"
|
||||
echo " [-d ONE_LOCATION] [-c occi|ec2] [-r] [-h]"
|
||||
echo
|
||||
echo "-u: user that will run opennebula, defults to user executing install.sh"
|
||||
echo "-g: group of the user that will run opennebula, defults to user"
|
||||
echo "-u: user that will run opennebula, defaults to user executing install.sh"
|
||||
echo "-g: group of the user that will run opennebula, defaults to user"
|
||||
echo " executing install.sh"
|
||||
echo "-k: keep configuration files of existing OpenNebula installation, useful"
|
||||
echo " when upgrading. This flag should not be set when installing"
|
||||
@ -98,7 +98,7 @@ if [ -z "$ROOT" ] ; then
|
||||
LOCK_LOCATION="/var/lock/one"
|
||||
INCLUDE_LOCATION="/usr/include"
|
||||
SHARE_LOCATION="/usr/share/one"
|
||||
MAN_LOCATION="/usr/share/man/man8"
|
||||
MAN_LOCATION="/usr/share/man/man1"
|
||||
|
||||
if [ "$CLIENT" = "yes" ]; then
|
||||
MAKE_DIRS="$BIN_LOCATION $LIB_LOCATION"
|
||||
@ -133,7 +133,7 @@ else
|
||||
IMAGES_LOCATION="$VAR_LOCATION/images"
|
||||
INCLUDE_LOCATION="$ROOT/include"
|
||||
SHARE_LOCATION="$ROOT/share"
|
||||
MAN_LOCATION="$ROOT/share/man/man8"
|
||||
MAN_LOCATION="$ROOT/share/man/man1"
|
||||
|
||||
if [ "$CLIENT" = "yes" ]; then
|
||||
MAKE_DIRS="$BIN_LOCATION $LIB_LOCATION"
|
||||
@ -181,21 +181,13 @@ LIB_DIRS="$LIB_LOCATION/ruby \
|
||||
$LIB_LOCATION/ruby/cloud/econe \
|
||||
$LIB_LOCATION/ruby/cloud/econe/views \
|
||||
$LIB_LOCATION/ruby/cloud/occi \
|
||||
$LIB_LOCATION/onedb \
|
||||
$LIB_LOCATION/tm_commands \
|
||||
$LIB_LOCATION/tm_commands/nfs \
|
||||
$LIB_LOCATION/tm_commands/ssh \
|
||||
$LIB_LOCATION/tm_commands/dummy \
|
||||
$LIB_LOCATION/tm_commands/lvm \
|
||||
$LIB_LOCATION/mads \
|
||||
$LIB_LOCATION/remotes \
|
||||
$LIB_LOCATION/remotes/im \
|
||||
$LIB_LOCATION/remotes/im/kvm.d \
|
||||
$LIB_LOCATION/remotes/im/xen.d \
|
||||
$LIB_LOCATION/remotes/im/ganglia.d \
|
||||
$LIB_LOCATION/remotes/vmm/xen \
|
||||
$LIB_LOCATION/remotes/vmm/kvm \
|
||||
$LIB_LOCATION/remotes/image \
|
||||
$LIB_LOCATION/remotes/image/fs \
|
||||
$LIB_LOCATION/sh"
|
||||
|
||||
VAR_DIRS="$VAR_LOCATION/remotes \
|
||||
@ -258,9 +250,11 @@ INSTALL_FILES=(
|
||||
LIB_FILES:$LIB_LOCATION
|
||||
RUBY_LIB_FILES:$LIB_LOCATION/ruby
|
||||
RUBY_OPENNEBULA_LIB_FILES:$LIB_LOCATION/ruby/OpenNebula
|
||||
MAD_RUBY_LIB_FILES:$LIB_LOCATION/ruby
|
||||
MAD_RUBY_LIB_FILES:$VAR_LOCATION/remotes
|
||||
MAD_SH_LIB_FILES:$LIB_LOCATION/sh
|
||||
MAD_SH_LIB_FILES:$LIB_LOCATION/remotes
|
||||
MAD_SH_LIB_FILES:$VAR_LOCATION/remotes
|
||||
ONEDB_MIGRATOR_FILES:$LIB_LOCATION/onedb
|
||||
MADS_LIB_FILES:$LIB_LOCATION/mads
|
||||
IM_PROBES_FILES:$VAR_LOCATION/remotes/im
|
||||
IM_PROBES_KVM_FILES:$VAR_LOCATION/remotes/im/kvm.d
|
||||
@ -272,23 +266,13 @@ INSTALL_FILES=(
|
||||
VMM_SSH_XEN_KVM_POLL:$VAR_LOCATION/remotes/vmm/xen/poll
|
||||
VMM_SSH_GANGLIA_POLL:$VAR_LOCATION/remotes/vmm/kvm/poll_local
|
||||
VMM_SSH_GANGLIA_POLL:$VAR_LOCATION/remotes/vmm/xen/poll_local
|
||||
IM_PROBES_FILES:$LIB_LOCATION/remotes/im
|
||||
IM_PROBES_KVM_FILES:$LIB_LOCATION/remotes/im/kvm.d
|
||||
IM_PROBES_XEN_FILES:$LIB_LOCATION/remotes/im/xen.d
|
||||
IM_PROBES_GANGLIA_FILES:$LIB_LOCATION/remotes/im/ganglia.d
|
||||
VMM_SSH_KVM_SCRIPTS:$LIB_LOCATION/remotes/vmm/kvm
|
||||
VMM_SSH_XEN_SCRIPTS:$LIB_LOCATION/remotes/vmm/xen
|
||||
VMM_SSH_XEN_KVM_POLL:$LIB_LOCATION/remotes/vmm/kvm/poll
|
||||
VMM_SSH_XEN_KVM_POLL:$LIB_LOCATION/remotes/vmm/xen/poll
|
||||
VMM_SSH_GANGLIA_POLL:$LIB_LOCATION/remotes/vmm/kvm/poll_local
|
||||
VMM_SSH_GANGLIA_POLL:$LIB_LOCATION/remotes/vmm/xen/poll_local
|
||||
NFS_TM_COMMANDS_LIB_FILES:$LIB_LOCATION/tm_commands/nfs
|
||||
SSH_TM_COMMANDS_LIB_FILES:$LIB_LOCATION/tm_commands/ssh
|
||||
DUMMY_TM_COMMANDS_LIB_FILES:$LIB_LOCATION/tm_commands/dummy
|
||||
LVM_TM_COMMANDS_LIB_FILES:$LIB_LOCATION/tm_commands/lvm
|
||||
IMAGE_DRIVER_FS_SCRIPTS:$LIB_LOCATION/remotes/image/fs
|
||||
IMAGE_DRIVER_FS_SCRIPTS:$VAR_LOCATION/remotes/image/fs
|
||||
EXAMPLE_SHARE_FILES:$SHARE_LOCATION/examples
|
||||
INSTALL_NOVNC_SHARE_FILE:$SHARE_LOCATION
|
||||
TM_EXAMPLE_SHARE_FILES:$SHARE_LOCATION/examples/tm
|
||||
HOOK_SHARE_FILES:$SHARE_LOCATION/hooks
|
||||
COMMON_CLOUD_LIB_FILES:$LIB_LOCATION/ruby/cloud
|
||||
@ -350,6 +334,7 @@ INSTALL_ETC_FILES=(
|
||||
ECO_ETC_TEMPLATE_FILES:$ETC_LOCATION/ec2query_templates
|
||||
OCCI_ETC_FILES:$ETC_LOCATION
|
||||
OCCI_ETC_TEMPLATE_FILES:$ETC_LOCATION/occi_templates
|
||||
SUNSTONE_ETC_FILES:$ETC_LOCATION
|
||||
)
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
@ -365,6 +350,7 @@ BIN_FILES="src/nebula/oned \
|
||||
src/cli/oneimage \
|
||||
src/cli/onecluster \
|
||||
src/cli/onetemplate \
|
||||
src/cli/onedb \
|
||||
share/scripts/one \
|
||||
src/authm_mad/oneauth"
|
||||
|
||||
@ -415,11 +401,12 @@ RUBY_OPENNEBULA_LIB_FILES="src/oca/ruby/OpenNebula/Host.rb \
|
||||
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# MAD ShellScript library files, to be installed under $LIB_LOCATION/sh
|
||||
# MAD Script library files, to be installed under $LIB_LOCATION/<script lang>
|
||||
# and remotes directory
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
MAD_SH_LIB_FILES="src/mad/sh/scripts_common.sh"
|
||||
MAD_RUBY_LIB_FILES="src/mad/ruby/scripts_common.rb"
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Driver executable files, to be installed under $LIB_LOCATION/mads
|
||||
@ -543,8 +530,14 @@ LVM_TM_COMMANDS_LIB_FILES="src/tm_mad/lvm/tm_clone.sh \
|
||||
IMAGE_DRIVER_FS_SCRIPTS="src/image_mad/remotes/fs/cp \
|
||||
src/image_mad/remotes/fs/mkfs \
|
||||
src/image_mad/remotes/fs/mv \
|
||||
src/image_mad/remotes/fs/fsrc \
|
||||
src/image_mad/remotes/fs/rm"
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Migration scripts for onedb command, to be installed under $LIB_LOCATION
|
||||
#-------------------------------------------------------------------------------
|
||||
ONEDB_MIGRATOR_FILES="src/onedb/1.rb"
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Configuration files for OpenNebula, to be installed under $ETC_LOCATION
|
||||
#-------------------------------------------------------------------------------
|
||||
@ -638,6 +631,8 @@ HOOK_SHARE_FILES="share/hooks/ebtables-xen \
|
||||
share/hooks/host_error.rb \
|
||||
share/hooks/image.rb"
|
||||
|
||||
INSTALL_NOVNC_SHARE_FILE="share/install_novnc.sh"
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Common Cloud Files
|
||||
#-------------------------------------------------------------------------------
|
||||
@ -743,6 +738,8 @@ SUNSTONE_FILES="src/sunstone/config.ru \
|
||||
|
||||
SUNSTONE_BIN_FILES="src/sunstone/bin/sunstone-server"
|
||||
|
||||
SUNSTONE_ETC_FILES="src/sunstone/etc/sunstone-server.conf"
|
||||
|
||||
SUNSTONE_MODELS_FILES="src/sunstone/models/OpenNebulaJSON.rb \
|
||||
src/sunstone/models/SunstoneServer.rb"
|
||||
|
||||
@ -753,6 +750,7 @@ SUNSTONE_MODELS_JSON_FILES="src/sunstone/models/OpenNebulaJSON/ClusterJSON.rb \
|
||||
src/sunstone/models/OpenNebulaJSON/PoolJSON.rb \
|
||||
src/sunstone/models/OpenNebulaJSON/UserJSON.rb \
|
||||
src/sunstone/models/OpenNebulaJSON/VirtualMachineJSON.rb \
|
||||
src/sunstone/models/OpenNebulaJSON/TemplateJSON.rb \
|
||||
src/sunstone/models/OpenNebulaJSON/VirtualNetworkJSON.rb"
|
||||
|
||||
SUNSTONE_TEMPLATE_FILES="src/sunstone/templates/index.html \
|
||||
@ -768,6 +766,7 @@ SUNSTONE_PUBLIC_JS_PLUGINS_FILES="\
|
||||
src/sunstone/public/js/plugins/dashboard-tab.js \
|
||||
src/sunstone/public/js/plugins/hosts-tab.js \
|
||||
src/sunstone/public/js/plugins/images-tab.js \
|
||||
src/sunstone/public/js/plugins/templates-tab.js \
|
||||
src/sunstone/public/js/plugins/users-tab.js \
|
||||
src/sunstone/public/js/plugins/vms-tab.js \
|
||||
src/sunstone/public/js/plugins/vnets-tab.js"
|
||||
@ -825,7 +824,9 @@ SUNSTONE_PUBLIC_IMAGES_FILES="src/sunstone/public/images/ajax-loader.gif \
|
||||
src/sunstone/public/images/opennebula-sunstone-small.png \
|
||||
src/sunstone/public/images/panel.png \
|
||||
src/sunstone/public/images/pbar.gif \
|
||||
src/sunstone/public/images/Refresh-icon.png"
|
||||
src/sunstone/public/images/Refresh-icon.png \
|
||||
src/sunstone/public/images/vnc_off.png \
|
||||
src/sunstone/public/images/vnc_on.png"
|
||||
|
||||
SUNSTONE_RUBY_LIB_FILES="src/mad/ruby/CommandManager.rb \
|
||||
src/oca/ruby/OpenNebula.rb"
|
||||
@ -834,23 +835,24 @@ SUNSTONE_RUBY_LIB_FILES="src/mad/ruby/CommandManager.rb \
|
||||
# MAN files
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
MAN_FILES="share/man/oneauth.8.gz \
|
||||
share/man/onecluster.8.gz \
|
||||
share/man/onehost.8.gz \
|
||||
share/man/oneimage.8.gz \
|
||||
share/man/oneuser.8.gz \
|
||||
share/man/onevm.8.gz \
|
||||
share/man/onevnet.8.gz \
|
||||
share/man/onetemplate.8.gz \
|
||||
share/man/econe-describe-images.8.gz \
|
||||
share/man/econe-describe-instances.8.gz \
|
||||
share/man/econe-register.8.gz \
|
||||
share/man/econe-run-instances.8.gz \
|
||||
share/man/econe-terminate-instances.8.gz \
|
||||
share/man/econe-upload.8.gz \
|
||||
share/man/occi-compute.8.gz \
|
||||
share/man/occi-network.8.gz \
|
||||
share/man/occi-storage.8.gz"
|
||||
MAN_FILES="share/man/oneauth.1.gz \
|
||||
share/man/onecluster.1.gz \
|
||||
share/man/onehost.1.gz \
|
||||
share/man/oneimage.1.gz \
|
||||
share/man/oneuser.1.gz \
|
||||
share/man/onevm.1.gz \
|
||||
share/man/onevnet.1.gz \
|
||||
share/man/onetemplate.1.gz \
|
||||
share/man/onedb.1.gz \
|
||||
share/man/econe-describe-images.1.gz \
|
||||
share/man/econe-describe-instances.1.gz \
|
||||
share/man/econe-register.1.gz \
|
||||
share/man/econe-run-instances.1.gz \
|
||||
share/man/econe-terminate-instances.1.gz \
|
||||
share/man/econe-upload.1.gz \
|
||||
share/man/occi-compute.1.gz \
|
||||
share/man/occi-network.1.gz \
|
||||
share/man/occi-storage.1.gz"
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
#-----------------------------------------------------------------------------
|
||||
|
@ -82,9 +82,6 @@ MAC_PREFIX = "02:00"
|
||||
#*******************************************************************************
|
||||
# Image Repository Configuration
|
||||
#*******************************************************************************
|
||||
# IMAGE_REPOSITORY_PATH: Define the path to the image repository, by default
|
||||
# is set to $ONE_LOCATION/var/images
|
||||
#
|
||||
# DEFAULT_IMAGE_TYPE: This can take values
|
||||
# OS Image file holding an operating system
|
||||
# CDROM Image file holding a CDROM
|
||||
@ -96,8 +93,6 @@ MAC_PREFIX = "02:00"
|
||||
# xvd XEN Virtual Disk
|
||||
# vd KVM virtual disk
|
||||
#*******************************************************************************
|
||||
|
||||
#IMAGE_REPOSITORY_PATH = /srv/cloud/var/images
|
||||
DEFAULT_IMAGE_TYPE = "OS"
|
||||
DEFAULT_DEVICE_PREFIX = "hd"
|
||||
|
||||
@ -328,11 +323,9 @@ IMAGE_MAD = [
|
||||
# command : path can be absolute or relative to $ONE_LOCATION/share/hooks
|
||||
# case of self-contained installation or relative to
|
||||
# /usr/share/one/hooks in case of system-wide installation
|
||||
# arguments : for the hook. You can access to VM template variables with $
|
||||
# - $ATTR, the value of an attribute e.g. $NAME or $VMID
|
||||
# - $ATTR[VAR], the value of a vector e.g. $NIC[MAC]
|
||||
# - $ATTR[VAR, COND], same of previous but COND select between
|
||||
# multiple ATTRs e.g. $NIC[MAC, NETWORK="Public"]
|
||||
# arguments : for the hook. You can access to VM information with $
|
||||
# - $VMID, the ID of the virtual machine
|
||||
# - $TEMPLATE, the VM template in xml and base64 encoded
|
||||
# remote : values,
|
||||
# - YES, The hook is executed in the host where the VM was
|
||||
# allocated
|
||||
@ -348,8 +341,9 @@ IMAGE_MAD = [
|
||||
# command : path can be absolute or relative to $ONE_LOCATION/share/hooks
|
||||
# case of self-contained installation or relative to
|
||||
# /usr/share/one/hooks in case of system-wide installation
|
||||
# arguments : for the hook. You can use the Host ID with $HID to pass it as
|
||||
# argument for the hook
|
||||
# arguments : for the hook. You can use the following Host information:
|
||||
# - $HID, the ID of the host
|
||||
# - $TEMPLATE, the Host template in xml and base64 encoded
|
||||
# remote : values,
|
||||
# - YES, The hook is executed in the host
|
||||
# - NO, The hook is executed in the OpenNebula server (default)
|
||||
|
@ -64,7 +64,7 @@ host.info
|
||||
host_name = host.name
|
||||
|
||||
# Loop through all vms
|
||||
vms = VirtualMachinePool.new(client)
|
||||
vms = VirtualMachinePool.new(client, -2)
|
||||
exit -1 if OpenNebula.is_error?(vms)
|
||||
|
||||
vms.info
|
||||
|
37
share/install_novnc.sh
Executable file
37
share/install_novnc.sh
Executable file
@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
|
||||
NOVNC_TMP=/tmp/one/novnc-$(date "+%Y%m%d%H%M%S")
|
||||
|
||||
if [ -z "$ONE_LOCATION" ]; then
|
||||
ONE_SHARE=/usr/share/one
|
||||
ONE_PUBLIC_SUNSTONE=/usr/lib/one/sunstone/public
|
||||
SUNSTONE_CONF=/etc/one/sunstone-server.conf
|
||||
else
|
||||
ONE_SHARE=$ONE_LOCATION/share
|
||||
ONE_PUBLIC_SUNSTONE=$ONE_LOCATION/lib/sunstone/public
|
||||
SUNSTONE_CONF=$ONE_LOCATION/etc/sunstone-server.conf
|
||||
fi
|
||||
|
||||
mkdir -p $NOVNC_TMP
|
||||
wget -P $NOVNC_TMP --no-check-certificate http://github.com/kanaka/noVNC/tarball/master
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error downloading noVNC"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tar=`ls -rt $NOVNC_TMP|tail -n1`
|
||||
tar -C $ONE_SHARE -mxvzf $NOVNC_TMP/$tar
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error untaring noVNC"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
dir=`ls -rt $ONE_SHARE|tail -n1`
|
||||
mv $ONE_SHARE/$dir $ONE_SHARE/noVNC
|
||||
|
||||
mkdir -p $ONE_PUBLIC_SUNSTONE/vendor/noVNC
|
||||
mv $ONE_SHARE/noVNC/include/ $ONE_PUBLIC_SUNSTONE/vendor/noVNC/
|
||||
|
||||
sed -i "s%^\(NOVNC_PATH=\)%\1$ONE_SHARE/noVNC%" $SUNSTONE_CONF
|
21
share/man/SConstruct
Normal file
21
share/man/SConstruct
Normal file
@ -0,0 +1,21 @@
|
||||
# SConstruct for share/man
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2011, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
import os
|
||||
|
||||
os.system("gzip *.1 &>/dev/null")
|
0
share/man/econe-describe-images.1
Normal file
0
share/man/econe-describe-images.1
Normal file
Binary file not shown.
0
share/man/econe-describe-instances.1
Normal file
0
share/man/econe-describe-instances.1
Normal file
Binary file not shown.
0
share/man/econe-register.1
Normal file
0
share/man/econe-register.1
Normal file
Binary file not shown.
0
share/man/econe-run-instances.1
Normal file
0
share/man/econe-run-instances.1
Normal file
Binary file not shown.
0
share/man/econe-terminate-instances.1
Normal file
0
share/man/econe-terminate-instances.1
Normal file
Binary file not shown.
0
share/man/econe-upload.1
Normal file
0
share/man/econe-upload.1
Normal file
Binary file not shown.
0
share/man/occi-compute.1
Normal file
0
share/man/occi-compute.1
Normal file
Binary file not shown.
0
share/man/occi-network.1
Normal file
0
share/man/occi-network.1
Normal file
Binary file not shown.
0
share/man/occi-storage.1
Normal file
0
share/man/occi-storage.1
Normal file
Binary file not shown.
0
share/man/oneauth.1
Normal file
0
share/man/oneauth.1
Normal file
Binary file not shown.
76
share/man/onecluster.1
Normal file
76
share/man/onecluster.1
Normal file
@ -0,0 +1,76 @@
|
||||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.37.1.
|
||||
.TH OPENNEBULAPRO "1" "May 2011" "OpenNebulaPro 2.2.0" "User Commands"
|
||||
.SH NAME
|
||||
OpenNebulaPro \- OpenNebula Cluster command
|
||||
.SH SYNOPSIS
|
||||
.B onecluster
|
||||
[\fI<options>\fR] \fI<command> \fR[\fI<parameters>\fR]
|
||||
.SH DESCRIPTION
|
||||
|
||||
This command enables the OpenNebula administrator to manage clusters. The
|
||||
administrator can create, delete, as well as add and remove hosts from them.
|
||||
Any user can list available clusters.
|
||||
.SH OPTIONS
|
||||
.TP
|
||||
\fB\-l\fR, \fB\-\-list\fR x,y,z
|
||||
Selects columns to display with list
|
||||
command
|
||||
.TP
|
||||
\fB\-\-list\-columns\fR
|
||||
Information about the columns available
|
||||
to display, order or filter
|
||||
.TP
|
||||
\fB\-o\fR, \fB\-\-order\fR x,y,z
|
||||
Order by these columns, column starting
|
||||
with \- means decreasing order
|
||||
.TP
|
||||
\fB\-f\fR, \fB\-\-filter\fR x,y,z
|
||||
Filter data. An array is specified
|
||||
with column=value pairs.
|
||||
.TP
|
||||
\fB\-x\fR, \fB\-\-xml\fR
|
||||
Returns xml instead of human readable text
|
||||
.TP
|
||||
\fB\-v\fR, \fB\-\-verbose\fR
|
||||
Tells more information if the command
|
||||
is successful
|
||||
.TP
|
||||
\fB\-h\fR, \fB\-\-help\fR
|
||||
Shows this help message
|
||||
.TP
|
||||
\fB\-\-version\fR
|
||||
Shows version and copyright information
|
||||
.SH COMMANDS
|
||||
.TP
|
||||
\fBcreate\fR (Creates a new cluster)
|
||||
.IP
|
||||
onecluster create clustername
|
||||
.TP
|
||||
\fBdelete\fR (Removes a cluster)
|
||||
.IP
|
||||
onecluster delete <id>
|
||||
.TP
|
||||
\fBlist\fR (Lists all the clusters in the pool)
|
||||
.IP
|
||||
onecluster list
|
||||
.TP
|
||||
\fBaddhost\fR (Add a host to the cluster)
|
||||
.IP
|
||||
onecluster addhost <host_id> <cluster_id>
|
||||
.TP
|
||||
\fBremovehost\fR (Remove a host from the cluster)
|
||||
.IP
|
||||
onecluster removehost <host_id> <cluster_id>
|
||||
.SH COPYRIGHT
|
||||
Copyright 2010\-2011, C12G Labs S.L.
|
||||
.PP
|
||||
Licensed under the C12G Commercial Open\-source License (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License as part
|
||||
of the software distribution.
|
||||
.PP
|
||||
Unless agreed to in writing, software distributed under the
|
||||
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
|
||||
OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and
|
||||
limitations under the License.
|
Binary file not shown.
97
share/man/onedb.1
Normal file
97
share/man/onedb.1
Normal file
@ -0,0 +1,97 @@
|
||||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.37.1.
|
||||
.TH OPENNEBULAPRO "1" "May 2011" "OpenNebulaPro 2.2.0" "User Commands"
|
||||
.SH NAME
|
||||
OpenNebulaPro \- OpenNebula Database command
|
||||
.SH SYNOPSIS
|
||||
.B onedb
|
||||
[\fI<options>\fR] \fI<command> \fR[\fI<parameters>\fR]
|
||||
.SH DESCRIPTION
|
||||
|
||||
This command enables the user to manage the OpenNebula database. It provides
|
||||
information about the DB version, means to upgrade it to the latest version, and
|
||||
backup tools.
|
||||
.SH OPTIONS
|
||||
.TP
|
||||
\fB\-v\fR, \fB\-\-verbose\fR
|
||||
Tells more information if the command
|
||||
is successful
|
||||
.TP
|
||||
\fB\-f\fR, \fB\-\-force\fR
|
||||
Forces the backup even if the DB exists
|
||||
.TP
|
||||
\fB\-\-backup\fR file
|
||||
Use this file to store/read SQL dump
|
||||
.TP
|
||||
\fB\-s\fR, \fB\-\-sqlite\fR file
|
||||
SQLite DB file
|
||||
.TP
|
||||
\fB\-\-server\fR host
|
||||
MySQL server hostname or IP. Defaults to localhost
|
||||
.TP
|
||||
\fB\-\-port\fR port
|
||||
MySQL server port. Defaults to 3306
|
||||
.TP
|
||||
\fB\-\-user\fR username
|
||||
MySQL username
|
||||
.TP
|
||||
\fB\-\-passwd\fR password
|
||||
MySQL password. Leave unset to be prompted for it
|
||||
.TP
|
||||
\fB\-\-dbname\fR name
|
||||
MySQL DB name for OpenNebula
|
||||
.TP
|
||||
\fB\-h\fR, \fB\-\-help\fR
|
||||
Shows this help message
|
||||
.TP
|
||||
\fB\-\-version\fR
|
||||
Shows version and copyright information
|
||||
.PP
|
||||
DB Connection options:
|
||||
.PP
|
||||
By default, onedb reads the connection data from oned.conf
|
||||
If any of these options is set, oned.conf is ignored (i.e. if you set MySQL's
|
||||
port onedb won't look for the rest of the options in oned.conf)
|
||||
.SH COMMANDS
|
||||
.TP
|
||||
\fBupgrade\fR (Upgrades the DB to the latest version)
|
||||
.IP
|
||||
onedb upgrade [<version>]
|
||||
.IP
|
||||
where <version> : DB version (e.g. 1, 3) to upgrade. By default the DB is
|
||||
.IP
|
||||
upgraded to the latest version
|
||||
.TP
|
||||
\fBversion\fR (Prints the current DB version. Use \fB\-v\fR flag to see also OpenNebula version)
|
||||
.IP
|
||||
onedb version
|
||||
.TP
|
||||
\fBhistory\fR (Prints the upgrades history)
|
||||
.IP
|
||||
onedb history
|
||||
.TP
|
||||
\fBbackup\fR (Dumps the DB to a file)
|
||||
.IP
|
||||
onedb backup [<output_file>]
|
||||
.IP
|
||||
where <output_file> : Same as \fB\-\-backup\fR
|
||||
.TP
|
||||
\fBrestore\fR (Restores the DB from a backup file. Only restores backups generated
|
||||
.IP
|
||||
from the same backend (SQLite or MySQL))
|
||||
.IP
|
||||
onedb restore [<backup_file>]
|
||||
.IP
|
||||
where <backup_file> : Same as \fB\-\-backup\fR
|
||||
.SH COPYRIGHT
|
||||
Copyright 2010\-2011, C12G Labs S.L.
|
||||
.PP
|
||||
Licensed under the C12G Commercial Open\-source License (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License as part
|
||||
of the software distribution.
|
||||
.PP
|
||||
Unless agreed to in writing, software distributed under the
|
||||
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
|
||||
OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and
|
||||
limitations under the License.
|
112
share/man/onehost.1
Normal file
112
share/man/onehost.1
Normal file
@ -0,0 +1,112 @@
|
||||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.37.1.
|
||||
.TH OPENNEBULAPRO "1" "May 2011" "OpenNebulaPro 2.2.0" "User Commands"
|
||||
.SH NAME
|
||||
OpenNebulaPro \- OpenNebula Host command
|
||||
.SH SYNOPSIS
|
||||
.B onehost
|
||||
[\fI<options>\fR] \fI<command> \fR[\fI<parameters>\fR]
|
||||
.SH DESCRIPTION
|
||||
|
||||
This command enables the user to manage hosts in the Open Nebula server. It
|
||||
provides functionality to allocate, get information and delete a particular
|
||||
host or to list all the available hosts.
|
||||
.SH OPTIONS
|
||||
.TP
|
||||
\fB\-l\fR, \fB\-\-list\fR x,y,z
|
||||
Selects columns to display with list
|
||||
command
|
||||
.TP
|
||||
\fB\-\-list\-columns\fR
|
||||
Information about the columns available
|
||||
to display, order or filter
|
||||
.TP
|
||||
\fB\-o\fR, \fB\-\-order\fR x,y,z
|
||||
Order by these columns, column starting
|
||||
with \- means decreasing order
|
||||
.TP
|
||||
\fB\-f\fR, \fB\-\-filter\fR x,y,z
|
||||
Filter data. An array is specified
|
||||
with column=value pairs.
|
||||
.TP
|
||||
\fB\-d\fR, \fB\-\-delay\fR seconds
|
||||
Sets the delay in seconds for top
|
||||
command
|
||||
.TP
|
||||
\fB\-x\fR, \fB\-\-xml\fR
|
||||
Returns xml instead of human readable text
|
||||
.TP
|
||||
\fB\-v\fR, \fB\-\-verbose\fR
|
||||
Tells more information if the command
|
||||
is successful
|
||||
.TP
|
||||
\fB\-h\fR, \fB\-\-help\fR
|
||||
Shows this help message
|
||||
.TP
|
||||
\fB\-\-version\fR
|
||||
Shows version and copyright information
|
||||
.SH COMMANDS
|
||||
.TP
|
||||
\fBcreate\fR (Adds a new machine to the pool)
|
||||
.IP
|
||||
onehost create <hostname> <im_mad> <vmm_mad> <tm_mad>
|
||||
.TP
|
||||
\fBshow\fR (Gets info from a host)
|
||||
.IP
|
||||
onehost show <host_id>
|
||||
.TP
|
||||
\fBdelete\fR (Removes a machine from the pool)
|
||||
.IP
|
||||
onehost delete <host_id>
|
||||
.TP
|
||||
\fBlist\fR (Lists machines in the pool)
|
||||
.IP
|
||||
onehost list
|
||||
.TP
|
||||
\fBenable\fR (Enables host)
|
||||
.IP
|
||||
onehost enable <host_id>
|
||||
.TP
|
||||
\fBdisable\fR (Disables host)
|
||||
.IP
|
||||
onehost disable <host_id>
|
||||
.TP
|
||||
\fBtop\fR (Lists hosts continuously)
|
||||
.IP
|
||||
onehost top
|
||||
.TP
|
||||
\fBsync\fR (synchronizes probes with remote hosts)
|
||||
.IP
|
||||
onehost sync
|
||||
.PP
|
||||
Information Columns:
|
||||
.TP
|
||||
\fBHID\fR Host ID
|
||||
.TP
|
||||
\fBNAME\fR Host name
|
||||
.TP
|
||||
\fBRVM\fR Number of running VMs
|
||||
.TP
|
||||
\fBTCPU\fR Total CPU (percentage)
|
||||
.TP
|
||||
\fBFCPU\fR Free CPU (percentage)
|
||||
.TP
|
||||
\fBACPU\fR Available CPU (not allocated by VMs)
|
||||
.TP
|
||||
\fBTMEM\fR Total memory
|
||||
.TP
|
||||
\fBFMEM\fR Free memory
|
||||
.TP
|
||||
\fBSTAT\fR Host status
|
||||
.SH COPYRIGHT
|
||||
Copyright 2010\-2011, C12G Labs S.L.
|
||||
.PP
|
||||
Licensed under the C12G Commercial Open\-source License (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License as part
|
||||
of the software distribution.
|
||||
.PP
|
||||
Unless agreed to in writing, software distributed under the
|
||||
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
|
||||
OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and
|
||||
limitations under the License.
|
Binary file not shown.
133
share/man/oneimage.1
Normal file
133
share/man/oneimage.1
Normal file
@ -0,0 +1,133 @@
|
||||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.37.1.
|
||||
.TH OPENNEBULAPRO "1" "May 2011" "OpenNebulaPro 2.2.0" "User Commands"
|
||||
.SH NAME
|
||||
OpenNebulaPro \- OpenNebula Image command
|
||||
.SH SYNOPSIS
|
||||
.B oneimage
|
||||
[\fI<options>\fR] \fI<command> \fR[\fI<parameters>\fR]
|
||||
.SH DESCRIPTION
|
||||
|
||||
This command enables the user to manage images.
|
||||
.SH OPTIONS
|
||||
.TP
|
||||
\fB\-l\fR, \fB\-\-list\fR x,y,z
|
||||
Selects columns to display with list
|
||||
command
|
||||
.TP
|
||||
\fB\-\-list\-columns\fR
|
||||
Information about the columns available
|
||||
to display, order or filter
|
||||
.TP
|
||||
\fB\-o\fR, \fB\-\-order\fR x,y,z
|
||||
Order by these columns, column starting
|
||||
with \- means decreasing order
|
||||
.TP
|
||||
\fB\-f\fR, \fB\-\-filter\fR x,y,z
|
||||
Filter data. An array is specified
|
||||
with column=value pairs.
|
||||
.TP
|
||||
\fB\-d\fR, \fB\-\-delay\fR seconds
|
||||
Sets the delay in seconds for top
|
||||
command
|
||||
.TP
|
||||
\fB\-x\fR, \fB\-\-xml\fR
|
||||
Returns xml instead of human readable text
|
||||
.TP
|
||||
\fB\-v\fR, \fB\-\-verbose\fR
|
||||
Tells more information if the command
|
||||
is successful
|
||||
.TP
|
||||
\fB\-h\fR, \fB\-\-help\fR
|
||||
Shows this help message
|
||||
.TP
|
||||
\fB\-\-version\fR
|
||||
Shows version and copyright information
|
||||
.SH COMMANDS
|
||||
.TP
|
||||
\fBregister\fR (Registers an image, copying it to the repository if it applies)
|
||||
.IP
|
||||
oneimage register <template>
|
||||
.IP
|
||||
template is a file name where the Image description is located
|
||||
.TP
|
||||
\fBaddattr\fR (Add a new image attribute)
|
||||
.IP
|
||||
oneimage addattr <image_id> <attribute_name> <attribute_value>
|
||||
.TP
|
||||
\fBupdate\fR (Modifies an image attribute)
|
||||
.IP
|
||||
oneimage update <image_id> <attribute_name> <attribute_value>
|
||||
.TP
|
||||
\fBrmattr\fR (Deletes an Image attribute)
|
||||
.IP
|
||||
oneimage rmattr <image_id> <attribute_name>
|
||||
.TP
|
||||
\fBenable\fR (Enabled an Image)
|
||||
.IP
|
||||
oneimage enable <image_id>
|
||||
.TP
|
||||
\fBdisable\fR (Disabled an Image)
|
||||
.IP
|
||||
oneimage disable <image_id>
|
||||
.TP
|
||||
\fBpublish\fR (Publish an Image)
|
||||
.IP
|
||||
oneimage publish <image_id>
|
||||
.TP
|
||||
\fBunpublish\fR (Unpublish an Image)
|
||||
.IP
|
||||
oneimage unpublish <image_id>
|
||||
.TP
|
||||
\fBpersistent\fR (Makes an Image persistent)
|
||||
.IP
|
||||
oneimage persistent <image_id>
|
||||
.TP
|
||||
\fBnonpersistent\fR (Makes an Image non persistent)
|
||||
.IP
|
||||
oneimage nonpersistent <image_id>
|
||||
.TP
|
||||
\fBlist\fR (Shows Images in the pool)
|
||||
.IP
|
||||
oneimage list <filter_flag>
|
||||
.IP
|
||||
where filter_flag can be
|
||||
.TP
|
||||
a, all
|
||||
\fB\-\-\fR> all the known Images
|
||||
.TP
|
||||
m, mine
|
||||
\fB\-\-\fR> the Images belonging to the user in ONE_AUTH
|
||||
.TP
|
||||
and all the
|
||||
Public Images
|
||||
.TP
|
||||
uid
|
||||
\fB\-\-\fR> Images of the user identified by this uid
|
||||
.TP
|
||||
user
|
||||
\fB\-\-\fR> Images of the user identified by the username
|
||||
.TP
|
||||
\fBtop\fR (Lists Images continuously)
|
||||
.IP
|
||||
oneimage top
|
||||
.TP
|
||||
\fBshow\fR (Gets information about an specific Image)
|
||||
.IP
|
||||
oneimage show <image_id>
|
||||
.TP
|
||||
\fBdelete\fR (Deletes an already deployed Image)
|
||||
.IP
|
||||
oneimage delete <image_id>
|
||||
.SH COPYRIGHT
|
||||
Copyright 2010\-2011, C12G Labs S.L.
|
||||
.PP
|
||||
Licensed under the C12G Commercial Open\-source License (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License as part
|
||||
of the software distribution.
|
||||
.PP
|
||||
Unless agreed to in writing, software distributed under the
|
||||
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
|
||||
OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and
|
||||
limitations under the License.
|
Binary file not shown.
117
share/man/onetemplate.1
Normal file
117
share/man/onetemplate.1
Normal file
@ -0,0 +1,117 @@
|
||||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.37.1.
|
||||
.TH OPENNEBULAPRO "1" "May 2011" "OpenNebulaPro 2.2.0" "User Commands"
|
||||
.SH NAME
|
||||
OpenNebulaPro \- OpenNebula Template command
|
||||
.SH SYNOPSIS
|
||||
.B onetemplate
|
||||
[\fI<options>\fR] \fI<command> \fR[\fI<parameters>\fR]
|
||||
.SH DESCRIPTION
|
||||
|
||||
This command enables the user to manage templates.
|
||||
.SH OPTIONS
|
||||
.TP
|
||||
\fB\-l\fR, \fB\-\-list\fR x,y,z
|
||||
Selects columns to display with list
|
||||
command
|
||||
.TP
|
||||
\fB\-\-list\-columns\fR
|
||||
Information about the columns available
|
||||
to display, order or filter
|
||||
.TP
|
||||
\fB\-o\fR, \fB\-\-order\fR x,y,z
|
||||
Order by these columns, column starting
|
||||
with \- means decreasing order
|
||||
.TP
|
||||
\fB\-f\fR, \fB\-\-filter\fR x,y,z
|
||||
Filter data. An array is specified
|
||||
with column=value pairs.
|
||||
.TP
|
||||
\fB\-d\fR, \fB\-\-delay\fR seconds
|
||||
Sets the delay in seconds for top
|
||||
command
|
||||
.TP
|
||||
\fB\-x\fR, \fB\-\-xml\fR
|
||||
Returns xml instead of human readable text
|
||||
.TP
|
||||
\fB\-v\fR, \fB\-\-verbose\fR
|
||||
Tells more information if the command
|
||||
is successful
|
||||
.TP
|
||||
\fB\-h\fR, \fB\-\-help\fR
|
||||
Shows this help message
|
||||
.TP
|
||||
\fB\-\-version\fR
|
||||
Shows version and copyright information
|
||||
.SH COMMANDS
|
||||
.TP
|
||||
\fBcreate\fR (Registers a Template from a template file)
|
||||
.IP
|
||||
onetemplate create <file>
|
||||
.IP
|
||||
file is a file name where the Template description is located
|
||||
.TP
|
||||
\fBaddattr\fR (Add a new Template attribute)
|
||||
.IP
|
||||
onetemplate addattr <template_id> <attribute_name> <attribute_value>
|
||||
.TP
|
||||
\fBupdate\fR (Modifies a Template attribute)
|
||||
.IP
|
||||
onetemplate update <template_id> <attribute_name> <attribute_value>
|
||||
.TP
|
||||
\fBrmattr\fR (Deletes a Template attribute)
|
||||
.IP
|
||||
onetemplate rmattr <template_id> <attribute_name>
|
||||
.TP
|
||||
\fBpublish\fR (Publish a Template)
|
||||
.IP
|
||||
onetemplate publish <template_id>
|
||||
.TP
|
||||
\fBunpublish\fR (Unpublish an Template)
|
||||
.IP
|
||||
onetemplate unpublish <template_id>
|
||||
.TP
|
||||
\fBlist\fR (Shows Templates in the pool)
|
||||
.IP
|
||||
onetemplate list <filter_flag>
|
||||
.IP
|
||||
where filter_flag can be
|
||||
.TP
|
||||
a, all
|
||||
\fB\-\-\fR> all the known Templates
|
||||
.TP
|
||||
m, mine
|
||||
\fB\-\-\fR> the Templates belonging to the user in ONE_AUTH
|
||||
.TP
|
||||
and all the
|
||||
Public Templates
|
||||
.TP
|
||||
uid
|
||||
\fB\-\-\fR> Templates of the user identified by this uid
|
||||
.TP
|
||||
user
|
||||
\fB\-\-\fR> Templates of the user identified by the username
|
||||
.TP
|
||||
\fBtop\fR (Lists Templates continuously)
|
||||
.IP
|
||||
onetemplate top
|
||||
.TP
|
||||
\fBshow\fR (Gets information about an specific Template)
|
||||
.IP
|
||||
onetemplate show <template_id>
|
||||
.TP
|
||||
\fBdelete\fR (Deletes a Template)
|
||||
.IP
|
||||
onetemplate delete <template_id>
|
||||
.SH COPYRIGHT
|
||||
Copyright 2010\-2011, C12G Labs S.L.
|
||||
.PP
|
||||
Licensed under the C12G Commercial Open\-source License (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License as part
|
||||
of the software distribution.
|
||||
.PP
|
||||
Unless agreed to in writing, software distributed under the
|
||||
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
|
||||
OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and
|
||||
limitations under the License.
|
90
share/man/oneuser.1
Normal file
90
share/man/oneuser.1
Normal file
@ -0,0 +1,90 @@
|
||||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.37.1.
|
||||
.TH OPENNEBULAPRO "1" "May 2011" "OpenNebulaPro 2.2.0" "User Commands"
|
||||
.SH NAME
|
||||
OpenNebulaPro \- OpenNebula User command
|
||||
.SH SYNOPSIS
|
||||
.B oneuser
|
||||
[\fI<options>\fR] \fI<command> \fR[\fI<parameters>\fR]
|
||||
.SH DESCRIPTION
|
||||
|
||||
This command enables the OpenNebula administrator to manage users, adding,
|
||||
listing and deleting them.
|
||||
.PP
|
||||
The create and passwd commands accept the [\-r, \fB\-\-read\-file]\fR option. Use this
|
||||
option to store the contents of a file (without hashing it) as the password.
|
||||
.SH OPTIONS
|
||||
.TP
|
||||
\fB\-l\fR, \fB\-\-list\fR x,y,z
|
||||
Selects columns to display with list
|
||||
command
|
||||
.TP
|
||||
\fB\-\-list\-columns\fR
|
||||
Information about the columns available
|
||||
to display, order or filter
|
||||
.TP
|
||||
\fB\-o\fR, \fB\-\-order\fR x,y,z
|
||||
Order by these columns, column starting
|
||||
with \- means decreasing order
|
||||
.TP
|
||||
\fB\-f\fR, \fB\-\-filter\fR x,y,z
|
||||
Filter data. An array is specified
|
||||
with column=value pairs.
|
||||
.TP
|
||||
\fB\-x\fR, \fB\-\-xml\fR
|
||||
Returns xml instead of human readable text
|
||||
.TP
|
||||
\fB\-v\fR, \fB\-\-verbose\fR
|
||||
Tells more information if the command
|
||||
is successful
|
||||
.TP
|
||||
\fB\-n\fR, \fB\-\-no\-hash\fR
|
||||
Store plain password into the database
|
||||
.TP
|
||||
\fB\-r\fR, \fB\-\-read\-file\fR
|
||||
Read password from file
|
||||
.TP
|
||||
\fB\-h\fR, \fB\-\-help\fR
|
||||
Shows this help message
|
||||
.TP
|
||||
\fB\-\-version\fR
|
||||
Shows version and copyright information
|
||||
.SH COMMANDS
|
||||
.TP
|
||||
\fBcreate\fR (Creates a new user)
|
||||
.IP
|
||||
oneuser create username password
|
||||
.TP
|
||||
\fBdelete\fR (Removes a user)
|
||||
.IP
|
||||
oneuser delete <id>
|
||||
.TP
|
||||
\fBlist\fR (Lists all the users in the pool)
|
||||
.IP
|
||||
oneuser list
|
||||
.TP
|
||||
\fBpasswd\fR (Changes the given user's password)
|
||||
.IP
|
||||
oneuser passwd <id> password
|
||||
.PP
|
||||
Information Columns:
|
||||
.TP
|
||||
\fBUID\fR User ID
|
||||
.TP
|
||||
\fBNAME\fR Name of the user
|
||||
.TP
|
||||
\fBPASSWORD\fR SHA1 encrypted password
|
||||
.TP
|
||||
\fBENABLE\fR Whether the user is enabled or not
|
||||
.SH COPYRIGHT
|
||||
Copyright 2010\-2011, C12G Labs S.L.
|
||||
.PP
|
||||
Licensed under the C12G Commercial Open\-source License (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License as part
|
||||
of the software distribution.
|
||||
.PP
|
||||
Unless agreed to in writing, software distributed under the
|
||||
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
|
||||
OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and
|
||||
limitations under the License.
|
Binary file not shown.
235
share/man/onevm.1
Normal file
235
share/man/onevm.1
Normal file
@ -0,0 +1,235 @@
|
||||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.37.1.
|
||||
.TH OPENNEBULAPRO "1" "May 2011" "OpenNebulaPro 2.2.0" "User Commands"
|
||||
.SH NAME
|
||||
OpenNebulaPro \- OpenNebula Virtual Machine command
|
||||
.SH SYNOPSIS
|
||||
.B onevm
|
||||
[\fI<options>\fR] \fI<command> \fR[\fI<parameters>\fR]
|
||||
.SH DESCRIPTION
|
||||
|
||||
This command enables the user to manage virtual machines in OpenNebula.
|
||||
The user can allocate, deploy, migrate, suspend, resume and shutdown a virtual
|
||||
machine with the functionality present in onevm.
|
||||
.SH OPTIONS
|
||||
.TP
|
||||
\fB\-l\fR, \fB\-\-list\fR x,y,z
|
||||
Selects columns to display with list
|
||||
command
|
||||
.TP
|
||||
\fB\-\-list\-columns\fR
|
||||
Information about the columns available
|
||||
to display, order or filter
|
||||
.TP
|
||||
\fB\-o\fR, \fB\-\-order\fR x,y,z
|
||||
Order by these columns, column starting
|
||||
with \- means decreasing order
|
||||
.TP
|
||||
\fB\-f\fR, \fB\-\-filter\fR x,y,z
|
||||
Filter data. An array is specified
|
||||
with column=value pairs.
|
||||
.TP
|
||||
\fB\-d\fR, \fB\-\-delay\fR seconds
|
||||
Sets the delay in seconds for top
|
||||
command
|
||||
.TP
|
||||
\fB\-x\fR, \fB\-\-xml\fR
|
||||
Returns xml instead of human readable text
|
||||
.TP
|
||||
\fB\-t\fR, \fB\-\-type\fR type
|
||||
Image type
|
||||
.TP
|
||||
\fB\-v\fR, \fB\-\-verbose\fR
|
||||
Tells more information if the command
|
||||
is successful
|
||||
.TP
|
||||
\fB\-h\fR, \fB\-\-help\fR
|
||||
Shows this help message
|
||||
.TP
|
||||
\fB\-\-version\fR
|
||||
Shows version and copyright information
|
||||
.SH COMMANDS
|
||||
.TP
|
||||
\fBcreate\fR (Submits a new virtual machine, adding it to the ONE VM pool)
|
||||
.IP
|
||||
onevm create <template>
|
||||
.IP
|
||||
template is a file name where the VM description is located
|
||||
.TP
|
||||
\fBdeploy\fR (Starts an existing VM in an specific host)
|
||||
.IP
|
||||
onevm deploy <vm_id> <host_id>
|
||||
.IP
|
||||
States: PENDING
|
||||
.TP
|
||||
\fBshutdown\fR (Shuts down an already deployed VM)
|
||||
.IP
|
||||
onevm shutdown <vm_id>
|
||||
.IP
|
||||
States: RUNNING
|
||||
.TP
|
||||
\fBlivemigrate\fR (Migrates a running VM to another host without downtime)
|
||||
.IP
|
||||
onevm livemigrate <vm_id> <host_id>
|
||||
.IP
|
||||
States: RUNNING
|
||||
.TP
|
||||
\fBmigrate\fR (Saves a running VM and starts it again in the specified host)
|
||||
.IP
|
||||
onevm migrate <vm_id> <host_id>
|
||||
.IP
|
||||
States: RUNNING
|
||||
.TP
|
||||
\fBhold\fR (Sets a VM to hold state, scheduler will not deploy it)
|
||||
.IP
|
||||
onevm hold <vm_id>
|
||||
.IP
|
||||
States: PENDING
|
||||
.TP
|
||||
\fBrelease\fR (Releases a VM from hold state, setting it to pending)
|
||||
.IP
|
||||
onevm release <vm_id>
|
||||
.IP
|
||||
States: HOLD
|
||||
.TP
|
||||
\fBstop\fR (Stops a running VM)
|
||||
.IP
|
||||
onevm stop <vm_id>
|
||||
.IP
|
||||
States: RUNNING
|
||||
.TP
|
||||
\fBcancel\fR (Cancels a running VM)
|
||||
.IP
|
||||
onevm cancel <vm_id>
|
||||
.IP
|
||||
States: RUNNING
|
||||
.TP
|
||||
\fBsuspend\fR (Saves a running VM)
|
||||
.IP
|
||||
onevm suspend <vm_id>
|
||||
.IP
|
||||
States: RUNNING
|
||||
.TP
|
||||
\fBresume\fR (Resumes the execution of a saved VM)
|
||||
.IP
|
||||
onevm resume <vm_id>
|
||||
.IP
|
||||
States: STOPPED, SUSPENDED
|
||||
.TP
|
||||
\fBsaveas\fR (Set the specified vms disk to be saved in a new image (image_name)
|
||||
.IP
|
||||
when the vm shuts down)
|
||||
.IP
|
||||
onevm saveas <vm_id> <disk_id> <image_name>
|
||||
.IP
|
||||
(Set a different type for the new Image)
|
||||
.IP
|
||||
onevm saveas <vm_id> <disk_id> <image_name> \fB\-t\fR/\-\-type <type>
|
||||
.TP
|
||||
\fBdelete\fR (Deletes a VM from the pool)
|
||||
.IP
|
||||
onevm delete <vm_id>
|
||||
.IP
|
||||
States: ANY
|
||||
.TP
|
||||
\fBrestart\fR (Forces a re\-deployment of a VM in UNKNOWN or BOOT state)
|
||||
.IP
|
||||
onevm restart <vm_id>
|
||||
.IP
|
||||
States: UNKNOWN, BOOT
|
||||
.TP
|
||||
\fBresubmit\fR (Resubmits a VM to PENDING state)
|
||||
.IP
|
||||
onevm resubmit <vm_id>
|
||||
.IP
|
||||
States: ANY, except SUSPENDED or DONE
|
||||
.TP
|
||||
\fBlist\fR (Shows VMs in the pool)
|
||||
.IP
|
||||
onevm list <filter_flag>
|
||||
.IP
|
||||
where filter_flag can be
|
||||
.TP
|
||||
a, all
|
||||
\fB\-\-\fR> all the known VMs
|
||||
.TP
|
||||
m, mine
|
||||
\fB\-\-\fR> the VMs belonging to the user in ONE_AUTH
|
||||
.TP
|
||||
uid
|
||||
\fB\-\-\fR> VMs of the user identified by this uid
|
||||
.TP
|
||||
user
|
||||
\fB\-\-\fR> VMs of the user identified by the username
|
||||
.TP
|
||||
\fBshow\fR (Gets information about a specific VM)
|
||||
.IP
|
||||
onevm show <vm_id>
|
||||
.TP
|
||||
\fBtop\fR (Lists VMs continuously)
|
||||
.IP
|
||||
onevm top
|
||||
.TP
|
||||
\fBhistory\fR (Gets history from VMs)
|
||||
.IP
|
||||
onevm history [<vm_id> <vm_id> ...]
|
||||
.IP
|
||||
if no vm_id is provided it will list history for all known VMs
|
||||
.PP
|
||||
Information Columns:
|
||||
.TP
|
||||
\fBID\fR ONE VM identifier
|
||||
.TP
|
||||
\fBUSER\fR Username of the VM owner
|
||||
.TP
|
||||
\fBNAME\fR Name of the VM
|
||||
.TP
|
||||
\fBSTAT\fR Status of the VM
|
||||
.TP
|
||||
\fBCPU\fR CPU percentage used by the VM
|
||||
.TP
|
||||
\fBMEM\fR Memory used by the VM
|
||||
.TP
|
||||
\fBHOSTNAME\fR Host where the VM is being or was run
|
||||
.TP
|
||||
\fBTIME\fR Time since the submission of the VM (days hours:minutes:seconds)
|
||||
.PP
|
||||
VM States:
|
||||
.TP
|
||||
\fBpend\fR pending
|
||||
.TP
|
||||
\fBhold\fR VM on hold (not runnable)
|
||||
.TP
|
||||
\fBstop\fR stopped
|
||||
.TP
|
||||
\fBsusp\fR suspended
|
||||
.TP
|
||||
\fBdone\fR finished
|
||||
.TP
|
||||
\fBprol\fR prolog
|
||||
.TP
|
||||
\fBboot\fR booting
|
||||
.TP
|
||||
\fBrunn\fR running
|
||||
.TP
|
||||
\fBmigr\fR migrating
|
||||
.TP
|
||||
\fBsave\fR saving the VM to disk
|
||||
.TP
|
||||
\fBepil\fR epilog
|
||||
.TP
|
||||
\fBshut\fR shutting down
|
||||
.TP
|
||||
\fBfail\fR failed
|
||||
.SH COPYRIGHT
|
||||
Copyright 2010\-2011, C12G Labs S.L.
|
||||
.PP
|
||||
Licensed under the C12G Commercial Open\-source License (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License as part
|
||||
of the software distribution.
|
||||
.PP
|
||||
Unless agreed to in writing, software distributed under the
|
||||
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
|
||||
OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and
|
||||
limitations under the License.
|
Binary file not shown.
117
share/man/onevnet.1
Normal file
117
share/man/onevnet.1
Normal file
@ -0,0 +1,117 @@
|
||||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.37.1.
|
||||
.TH OPENNEBULAPRO "1" "May 2011" "OpenNebulaPro 2.2.0" "User Commands"
|
||||
.SH NAME
|
||||
OpenNebulaPro \- OpenNebula Virtual Network command
|
||||
.SH SYNOPSIS
|
||||
.B onevnet
|
||||
[\fI<options>\fR] \fI<command> \fR[\fI<parameters>\fR]
|
||||
.SH DESCRIPTION
|
||||
|
||||
This command enables the user to manage virtual networks in the OpenNebula
|
||||
server. It provides functionality to create, get information and delete a
|
||||
particular network or to list available and used IP's.
|
||||
.SH OPTIONS
|
||||
.TP
|
||||
\fB\-l\fR, \fB\-\-list\fR x,y,z
|
||||
Selects columns to display with list
|
||||
command
|
||||
.TP
|
||||
\fB\-\-list\-columns\fR
|
||||
Information about the columns available
|
||||
to display, order or filter
|
||||
.TP
|
||||
\fB\-o\fR, \fB\-\-order\fR x,y,z
|
||||
Order by these columns, column starting
|
||||
with \- means decreasing order
|
||||
.TP
|
||||
\fB\-f\fR, \fB\-\-filter\fR x,y,z
|
||||
Filter data. An array is specified
|
||||
with column=value pairs.
|
||||
.TP
|
||||
\fB\-x\fR, \fB\-\-xml\fR
|
||||
Returns xml instead of human readable text
|
||||
.TP
|
||||
\fB\-v\fR, \fB\-\-verbose\fR
|
||||
Tells more information if the command
|
||||
is successful
|
||||
.TP
|
||||
\fB\-h\fR, \fB\-\-help\fR
|
||||
Shows this help message
|
||||
.TP
|
||||
\fB\-\-version\fR
|
||||
Shows version and copyright information
|
||||
.SH COMMANDS
|
||||
.TP
|
||||
\fBcreate\fR (Creates a new virtual network)
|
||||
.IP
|
||||
onevnet create <template>
|
||||
.IP
|
||||
template is a filename where the virtual network is described
|
||||
.TP
|
||||
\fBshow\fR (Gets info from a virtual network)
|
||||
.IP
|
||||
onevnet show <network_id>
|
||||
.TP
|
||||
\fBpublish\fR (Publish a virtual network)
|
||||
.IP
|
||||
onevnet publish <network_id>
|
||||
.TP
|
||||
\fBunpublish\fR (Unpublish a virtual network)
|
||||
.IP
|
||||
onevnet unpublish <network_id>
|
||||
.TP
|
||||
\fBdelete\fR (Removes a virtual network)
|
||||
.IP
|
||||
onevnet delete <network_id>
|
||||
.TP
|
||||
\fBaddleases\fR (Adds a lease to the virtual network)
|
||||
.IP
|
||||
onevnet addleases <network_id> <IP> [<MAC>]
|
||||
.TP
|
||||
\fBrmleases\fR (Removes a lease fom the virtual network)
|
||||
.IP
|
||||
onevnet rmleases <network_id> <IP>
|
||||
.TP
|
||||
\fBlist\fR (Lists virtual networks in the pool)
|
||||
.IP
|
||||
onevnet list <filter_flag>
|
||||
.IP
|
||||
where filter_flag can be
|
||||
.TP
|
||||
a, all
|
||||
: all the known VNs
|
||||
.IP
|
||||
m, mine : the VNs belonging to the user in ONE_AUTH
|
||||
.IP
|
||||
and all the Public VNs
|
||||
.TP
|
||||
uid
|
||||
: VNs of the user identified by this uid
|
||||
.TP
|
||||
user
|
||||
: VNs of the user identified by the username
|
||||
.PP
|
||||
Information columns:
|
||||
.TP
|
||||
\fBNID\fR Network ID
|
||||
.TP
|
||||
\fBNAME\fR Name of the virtual network
|
||||
.TP
|
||||
\fBTYPE\fR Type of virtual network (0=ranged, 1=fixed)
|
||||
.TP
|
||||
\fBBRIDGE\fR Bridge associated to the virtual network
|
||||
.TP
|
||||
\fBLEASES\fR Number of leases used from this virtual network
|
||||
.SH COPYRIGHT
|
||||
Copyright 2010\-2011, C12G Labs S.L.
|
||||
.PP
|
||||
Licensed under the C12G Commercial Open\-source License (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License as part
|
||||
of the software distribution.
|
||||
.PP
|
||||
Unless agreed to in writing, software distributed under the
|
||||
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
|
||||
OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and
|
||||
limitations under the License.
|
Binary file not shown.
@ -16,15 +16,10 @@
|
||||
|
||||
#include "AuthManager.h"
|
||||
#include "NebulaLog.h"
|
||||
#include "SSLTools.h"
|
||||
|
||||
#include "Nebula.h"
|
||||
|
||||
#include <openssl/sha.h>
|
||||
#include <openssl/hmac.h>
|
||||
#include <openssl/evp.h>
|
||||
#include <openssl/bio.h>
|
||||
#include <openssl/buffer.h>
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
@ -35,40 +30,6 @@ const char * AuthManager::auth_driver_name = "auth_exe";
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
string * AuthRequest::base64_encode(const string& in)
|
||||
{
|
||||
BIO * bio_mem;
|
||||
BIO * bio_64;
|
||||
|
||||
char * encoded_c;
|
||||
long int size;
|
||||
|
||||
bio_64 = BIO_new(BIO_f_base64());
|
||||
bio_mem = BIO_new(BIO_s_mem());
|
||||
|
||||
BIO_push(bio_64, bio_mem);
|
||||
|
||||
BIO_set_flags(bio_64,BIO_FLAGS_BASE64_NO_NL);
|
||||
|
||||
BIO_write(bio_64, in.c_str(), in.length());
|
||||
|
||||
if (BIO_flush(bio_64) != 1)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
size = BIO_get_mem_data(bio_mem,&encoded_c);
|
||||
|
||||
string * encoded = new string(encoded_c,size);
|
||||
|
||||
BIO_free_all(bio_64);
|
||||
|
||||
return encoded;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
void AuthRequest::add_auth(Object ob,
|
||||
const string& ob_id,
|
||||
Operation op,
|
||||
@ -91,7 +52,7 @@ void AuthRequest::add_auth(Object ob,
|
||||
|
||||
if (op == CREATE || op == INSTANTIATE) //encode the ob_id, it is a template
|
||||
{
|
||||
string * encoded_id = base64_encode(ob_id);
|
||||
string * encoded_id = SSLTools::base64_encode(ob_id);
|
||||
|
||||
if (encoded_id != 0)
|
||||
{
|
||||
|
@ -1,4 +1,4 @@
|
||||
# SConstruct for src/vm
|
||||
# SConstruct for src/authm
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2011, OpenNebula Project Leads (OpenNebula.org) #
|
||||
|
@ -17,10 +17,10 @@ Import('env')
|
||||
|
||||
env.Prepend(LIBS=[
|
||||
'nebula_template',
|
||||
'nebula_authm',
|
||||
'nebula_common',
|
||||
'nebula_core',
|
||||
'nebula_mad',
|
||||
'nebula_authm',
|
||||
'nebula_sql',
|
||||
'nebula_log',
|
||||
'crypto'
|
||||
|
@ -89,19 +89,43 @@ class Quota
|
||||
# Checks if the user is below resource limits. If new_vm is defined
|
||||
# checks if its requirements fit in limits
|
||||
def check(user, new_vm=nil)
|
||||
usage=@usage.total(user)
|
||||
use=@usage.total(user)
|
||||
use_after=use.clone
|
||||
user_quota=get(user)
|
||||
if new_vm
|
||||
usage.cpu+=new_vm.cpu.to_f
|
||||
usage.memory+=new_vm.memory.to_i
|
||||
usage.num_vms+=1
|
||||
use_after.cpu+=new_vm.cpu.to_f
|
||||
use_after.memory+=new_vm.memory.to_i
|
||||
use_after.num_vms+=1
|
||||
end
|
||||
|
||||
STDERR.puts [user_quota, use_after, new_vm].inspect
|
||||
|
||||
error_message=""
|
||||
|
||||
if !(!user_quota[:cpu] || use_after.cpu<=user_quota[:cpu])
|
||||
error_message<<"Cpu quota exceeded (Quota: #{user_quota[:cpu]}, "+
|
||||
"Used: #{use.cpu}"
|
||||
error_message<<", asked: #{new_vm.cpu.to_f}" if new_vm
|
||||
error_message<<")."
|
||||
end
|
||||
|
||||
if !(!user_quota[:memory] || use_after.memory<=user_quota[:memory])
|
||||
error_message<<" Memory quota exceeded (Quota: "+
|
||||
"#{user_quota[:memory]}, Used: #{use.memory}"
|
||||
error_message<<", asked: #{new_vm.memory.to_i}" if new_vm
|
||||
error_message<<")."
|
||||
end
|
||||
|
||||
if !(!user_quota[:num_vms] || use_after.num_vms<=user_quota[:num_vms])
|
||||
error_message<<" Num VMS quota exceeded (Quota: "+
|
||||
"#{user_quota[:memory]}, Used: #{use.num_vms})."
|
||||
end
|
||||
|
||||
if error_message==""
|
||||
false
|
||||
else
|
||||
error_message.strip
|
||||
end
|
||||
|
||||
STDERR.puts [user_quota, usage, new_vm].inspect
|
||||
|
||||
(!user_quota[:cpu] || usage.cpu<=user_quota[:cpu]) &&
|
||||
(!user_quota[:memory] || usage.memory<=user_quota[:memory]) &&
|
||||
(!user_quota[:num_vms] || usage.num_vms<=user_quota[:num_vms])
|
||||
end
|
||||
|
||||
# Updates user resource consuption
|
||||
|
@ -54,8 +54,8 @@ class SimplePermissions
|
||||
if @quota_enabled and object=='VM' and auth_result
|
||||
STDERR.puts 'quota enabled'
|
||||
@quota.update(uid.to_i)
|
||||
if !@quota.check(uid.to_i, get_vm_usage(id))
|
||||
auth_result="Quota exceeded"
|
||||
if message=@quota.check(uid.to_i, get_vm_usage(id))
|
||||
auth_result=message
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -60,8 +60,8 @@ describe 'Quota' do
|
||||
it 'should check for quotas' do
|
||||
@quota.update(0)
|
||||
@quota.update(1)
|
||||
@quota.check(0).should == true
|
||||
@quota.check(1).should == true
|
||||
@quota.check(0).should == false
|
||||
@quota.check(1).should == false
|
||||
|
||||
vms=@quota.get_user(0)
|
||||
vms[5]=VmUsage.new(40.0, 8192)
|
||||
@ -69,13 +69,13 @@ describe 'Quota' do
|
||||
vms=@quota.get_user(1)
|
||||
vms[6]=VmUsage.new(40.0, 8192)
|
||||
|
||||
@quota.check(0).should == false
|
||||
@quota.check(1).should == false
|
||||
@quota.check(0).class.should == String
|
||||
@quota.check(1).class.should == String
|
||||
|
||||
@quota.update(0)
|
||||
@quota.update(1)
|
||||
@quota.check(0).should == true
|
||||
@quota.check(1).should == true
|
||||
@quota.check(0).should == false
|
||||
@quota.check(1).should == false
|
||||
end
|
||||
|
||||
it 'should let update limits' do
|
||||
@ -86,8 +86,8 @@ describe 'Quota' do
|
||||
it 'should understand unlimited quotas' do
|
||||
vms=@quota.get_user(0)
|
||||
vms[7]=VmUsage.new(9999999999.0, 99999999999)
|
||||
@quota.check(0).should == true
|
||||
@quota.check(0, VmUsage.new(999999999.0, 99999999)).should == true
|
||||
@quota.check(0).should == false
|
||||
@quota.check(0, VmUsage.new(999999999.0, 99999999)).should == false
|
||||
end
|
||||
|
||||
end
|
||||
|
614
src/cli/onedb
Executable file
614
src/cli/onedb
Executable file
@ -0,0 +1,614 @@
|
||||
#!/usr/bin/env ruby
|
||||
|
||||
# -------------------------------------------------------------------------- */
|
||||
# Copyright 2002-2011, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
||||
# not use this file except in compliance with the License. You may obtain */
|
||||
# a copy of the License at */
|
||||
# */
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
# */
|
||||
# Unless required by applicable law or agreed to in writing, software */
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, */
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
|
||||
# See the License for the specific language governing permissions and */
|
||||
# limitations under the License. */
|
||||
# -------------------------------------------------------------------------- */
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Set up the environment
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
ONE_LOCATION = ENV["ONE_LOCATION"]
|
||||
|
||||
if !ONE_LOCATION
|
||||
LIB_LOCATION = "/usr/lib/one"
|
||||
RUBY_LIB_LOCATION = LIB_LOCATION + "/ruby"
|
||||
VAR_LOCATION = "/var/lib/one"
|
||||
ETC_LOCATION = "/etc/one"
|
||||
LOCK_FILE = "/var/lock/one/one"
|
||||
else
|
||||
LIB_LOCATION = ONE_LOCATION + "/lib"
|
||||
RUBY_LIB_LOCATION = LIB_LOCATION + "/ruby"
|
||||
VAR_LOCATION = ONE_LOCATION + "/var"
|
||||
ETC_LOCATION = ONE_LOCATION + "/etc"
|
||||
LOCK_FILE = VAR_LOCATION + "/.lock"
|
||||
end
|
||||
|
||||
$: << RUBY_LIB_LOCATION
|
||||
|
||||
|
||||
require 'OpenNebula'
|
||||
require 'command_parse'
|
||||
|
||||
# TODO: Move the Configuration file to OpenNebula ruby lib?
|
||||
require "#{RUBY_LIB_LOCATION}/cloud/Configuration"
|
||||
|
||||
require 'rubygems'
|
||||
require 'sequel'
|
||||
|
||||
class MigratorBase
|
||||
attr_reader :db_version
|
||||
attr_reader :one_version
|
||||
@verbose
|
||||
|
||||
def initialize(db, verbose)
|
||||
@db = db
|
||||
@verbose = verbose
|
||||
end
|
||||
|
||||
def up
|
||||
puts "Method up not implemented for version #{@version}"
|
||||
return false
|
||||
end
|
||||
end
|
||||
|
||||
class OneDBParse < CommandParse
|
||||
|
||||
COMMANDS_HELP=<<-EOT
|
||||
DB Connection options:
|
||||
|
||||
By default, onedb reads the connection data from oned.conf
|
||||
If any of these options is set, oned.conf is ignored (i.e. if you set MySQL's
|
||||
port onedb won't look for the rest of the options in oned.conf)
|
||||
|
||||
Description:
|
||||
|
||||
This command enables the user to manage the OpenNebula database. It provides
|
||||
information about the DB version, means to upgrade it to the latest version, and
|
||||
backup tools.
|
||||
|
||||
Commands:
|
||||
|
||||
* upgrade (Upgrades the DB to the latest version)
|
||||
onedb upgrade [<version>]
|
||||
|
||||
where <version> : DB version (e.g. 1, 3) to upgrade. By default the DB is
|
||||
upgraded to the latest version
|
||||
|
||||
* version (Prints the current DB version. Use -v flag to see also OpenNebula version)
|
||||
onedb version
|
||||
|
||||
* history (Prints the upgrades history)
|
||||
onedb history
|
||||
|
||||
* backup (Dumps the DB to a file)
|
||||
onedb backup [<output_file>]
|
||||
|
||||
where <output_file> : Same as --backup
|
||||
|
||||
* restore (Restores the DB from a backup file. Only restores backups generated
|
||||
from the same backend (SQLite or MySQL))
|
||||
onedb restore [<backup_file>]
|
||||
|
||||
where <backup_file> : Same as --backup
|
||||
|
||||
EOT
|
||||
|
||||
def text_commands
|
||||
COMMANDS_HELP
|
||||
end
|
||||
|
||||
def text_command_name
|
||||
"onedb"
|
||||
end
|
||||
|
||||
def special_options(opts, options)
|
||||
opts.on_tail("-f", "--force", "Forces the backup even if the DB exists") do |o|
|
||||
options[:force] = true
|
||||
end
|
||||
|
||||
opts.on_tail("--backup file", "Use this file to store/read SQL dump", String) do |o|
|
||||
options[:backup] = o
|
||||
end
|
||||
|
||||
opts.on_tail("-s file", "--sqlite file", "SQLite DB file", String) do |o|
|
||||
options[:backend] = :sqlite
|
||||
options[:sqlite] = o
|
||||
end
|
||||
|
||||
opts.on_tail("--server host", "MySQL server hostname or IP. Defaults "<<
|
||||
"to localhost", String) do |o|
|
||||
options[:backend] = :mysql
|
||||
options[:server] = o
|
||||
end
|
||||
|
||||
opts.on_tail("--port port", "MySQL server port. Defaults to 3306", Integer) do |o|
|
||||
options[:backend] = :mysql
|
||||
options[:port] = o
|
||||
end
|
||||
|
||||
opts.on_tail("--user username", "MySQL username", String) do |o|
|
||||
options[:backend] = :mysql
|
||||
options[:user] = o
|
||||
end
|
||||
|
||||
opts.on_tail("--passwd password", "MySQL password. Leave unset to be "<<
|
||||
"prompted for it", String) do |o|
|
||||
options[:backend] = :mysql
|
||||
options[:passwd] = o
|
||||
end
|
||||
|
||||
opts.on_tail("--dbname name", "MySQL DB name for OpenNebula", String) do |o|
|
||||
options[:backend] = :mysql
|
||||
options[:dbname] = o
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
################################################################################
|
||||
# Helpers
|
||||
################################################################################
|
||||
|
||||
def connection_params()
|
||||
if( @ops[:backend] == nil )
|
||||
read_onedconf()
|
||||
else
|
||||
@backend = @ops[:backend]
|
||||
if( @backend == :sqlite )
|
||||
@sqlite_file = @ops[:sqlite]
|
||||
|
||||
else
|
||||
@server = @ops[:server]
|
||||
@port = @ops[:port]
|
||||
@user = @ops[:user]
|
||||
@passwd = @ops[:passwd]
|
||||
@db_name = @ops[:dbname]
|
||||
|
||||
# Check for errors:
|
||||
error = false
|
||||
missing = ""
|
||||
|
||||
(error = true; missing = "--user" ) if @user == nil
|
||||
(error = true; missing = "--dbname") if @db_name == nil
|
||||
|
||||
if error
|
||||
puts "MySQL option #{missing} is needed"
|
||||
exit -1
|
||||
end
|
||||
|
||||
# Check for defaults:
|
||||
@server = "localhost" if @server == nil
|
||||
@port = 0 if @port == nil
|
||||
|
||||
if @passwd == nil
|
||||
# Hide input characters
|
||||
`stty -echo`
|
||||
print "MySQL Password: "
|
||||
@passwd = STDIN.gets.strip
|
||||
`stty echo`
|
||||
puts ""
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def read_onedconf()
|
||||
|
||||
config = Configuration.new("#{ETC_LOCATION}/oned.conf")
|
||||
|
||||
if config[:db] == nil
|
||||
puts "No DB defined."
|
||||
exit -1
|
||||
end
|
||||
|
||||
if config[:db]["BACKEND"].upcase.include? "SQLITE"
|
||||
@backend = :sqlite
|
||||
@sqlite_file = "#{VAR_LOCATION}/one.db"
|
||||
|
||||
elsif config[:db]["BACKEND"].upcase.include? "MYSQL"
|
||||
@backend = :mysql
|
||||
|
||||
@server = config[:db]["SERVER"]
|
||||
@port = config[:db]["PORT"]
|
||||
@user = config[:db]["USER"]
|
||||
@passwd = config[:db]["PASSWD"]
|
||||
@db_name = config[:db]["DB_NAME"]
|
||||
|
||||
# In OpenNebula 2.0 PORT wasn't present in oned.conf, set default
|
||||
@port = "0" if @port == nil
|
||||
|
||||
# Check for errors:
|
||||
error = false
|
||||
missing = ""
|
||||
|
||||
(error = true; missing = "SERVER" ) if @server == nil
|
||||
(error = true; missing = "USER" ) if @user == nil
|
||||
(error = true; missing = "PASSWD" ) if @passwd == nil
|
||||
(error = true; missing = "DB_NAME") if @db_name == nil
|
||||
|
||||
if error
|
||||
puts "MySQL attribute #{missing} not found in " +
|
||||
"#{ETC_LOCATION}/oned.conf"
|
||||
|
||||
exit -1
|
||||
end
|
||||
|
||||
# Clean leading and trailing quotes, if any
|
||||
@server = @server [1..-2] if @server [0] == ?"
|
||||
@port = @port [1..-2] if @port [0] == ?"
|
||||
@user = @user [1..-2] if @user [0] == ?"
|
||||
@passwd = @passwd [1..-2] if @passwd [0] == ?"
|
||||
@db_name = @db_name[1..-2] if @db_name[0] == ?"
|
||||
|
||||
else
|
||||
puts "Could not load DB configuration from #{ETC_LOCATION}/oned.conf"
|
||||
exit -1
|
||||
end
|
||||
end
|
||||
|
||||
def get_bck_file()
|
||||
bck_file = ""
|
||||
|
||||
if( @ops[:backup] != nil )
|
||||
bck_file = @ops[:backup]
|
||||
elsif @backend == :sqlite
|
||||
bck_file = "#{VAR_LOCATION}/one.db.bck"
|
||||
elsif @backend == :mysql
|
||||
bck_file = "#{VAR_LOCATION}/mysql_#{@server}_#{@db_name}.sql"
|
||||
end
|
||||
|
||||
return bck_file
|
||||
end
|
||||
|
||||
def backup_db()
|
||||
bck_file = get_bck_file()
|
||||
|
||||
if( !@ops[:force] && File.exists?(bck_file) )
|
||||
puts "File #{bck_file} exists, backup aborted. Use -f to overwrite."
|
||||
exit -1
|
||||
end
|
||||
|
||||
case @backend
|
||||
when :sqlite
|
||||
if( ! File.exists?(@sqlite_file) )
|
||||
puts "File #{@sqlite_file} doesn't exist, backup aborted."
|
||||
exit -1
|
||||
end
|
||||
|
||||
FileUtils.cp(@sqlite_file, "#{bck_file}")
|
||||
puts "Sqlite database backup stored in #{bck_file}"
|
||||
puts "Use 'onedb restore' or copy the file back to restore the DB."
|
||||
|
||||
when :mysql
|
||||
cmd = "mysqldump -u #{@user} -p#{@passwd} -h #{@server} " +
|
||||
"-P #{@port} #{@db_name} > #{bck_file}"
|
||||
|
||||
rc = system(cmd)
|
||||
|
||||
if( !rc )
|
||||
puts "Unknown error running '#{cmd}'"
|
||||
exit -1
|
||||
end
|
||||
|
||||
puts "MySQL dump stored in #{bck_file}"
|
||||
puts "Use 'onedb restore' or restore the DB using the mysql command:"
|
||||
puts "mysql -u user -h server -P port db_name < backup_file"
|
||||
|
||||
else
|
||||
puts "Unknown DB #{@backend}"
|
||||
exit -1
|
||||
end
|
||||
|
||||
puts ""
|
||||
end
|
||||
|
||||
def connect_db()
|
||||
case @backend
|
||||
when :sqlite
|
||||
if( ! File.exists?(@sqlite_file) )
|
||||
puts "File #{@sqlite_file} doesn't exist."
|
||||
exit -1
|
||||
end
|
||||
@db = Sequel.sqlite(@sqlite_file)
|
||||
|
||||
when :mysql
|
||||
@db = Sequel.connect(
|
||||
"mysql://#{@user}:#{@passwd}@#{@server}:#{@port}/#{@db_name}")
|
||||
|
||||
else
|
||||
puts "Unknown DB #{@backend}"
|
||||
exit -1
|
||||
end
|
||||
end
|
||||
|
||||
def read_db_version()
|
||||
version = 0
|
||||
timestamp = 0
|
||||
comment = ""
|
||||
|
||||
@db.fetch("SELECT version, timestamp, comment FROM db_versioning " +
|
||||
"WHERE oid=(SELECT MAX(oid) FROM db_versioning)") do |row|
|
||||
version = row[:version]
|
||||
timestamp = row[:timestamp]
|
||||
comment = row[:comment]
|
||||
end
|
||||
|
||||
return [version.to_i, timestamp, comment]
|
||||
|
||||
rescue
|
||||
# If the DB doesn't have db_versioning table, it means it is empty or a 2.x
|
||||
# OpenNebula DB
|
||||
begin
|
||||
# User with ID 0 (oneadmin) always exists
|
||||
@db.fetch("SELECT * FROM user_pool WHERE oid=0") do |row|
|
||||
end
|
||||
rescue
|
||||
puts "Database schema does not look to be created by OpenNebula:"
|
||||
puts "table user_pool is missing or empty."
|
||||
|
||||
exit -1
|
||||
end
|
||||
|
||||
begin
|
||||
# Table image_pool is present only in 2.X DBs
|
||||
@db.fetch("SELECT * FROM image_pool") do |row|
|
||||
end
|
||||
rescue
|
||||
puts "Database schema looks to be created by OpenNebula 1.X."
|
||||
puts "This tool only works with databases created by 2.X versions."
|
||||
|
||||
exit -1
|
||||
end
|
||||
|
||||
comment = "Could not read any previous db_versioning data, assuming it is "+
|
||||
"an OpenNebula 2.0 or 2.2 DB."
|
||||
|
||||
return [0, 0, comment]
|
||||
end
|
||||
|
||||
def one_not_running()
|
||||
if File.exists?(LOCK_FILE)
|
||||
puts "First stop OpenNebula. Lock file found: #{LOCK_FILE}"
|
||||
exit -1
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
################################################################################
|
||||
################################################################################
|
||||
|
||||
|
||||
onedb_opts = OneDBParse.new([])
|
||||
onedb_opts.parse(ARGV)
|
||||
@ops = onedb_opts.options
|
||||
|
||||
@verbose = @ops[:verbose]
|
||||
|
||||
command = ARGV.shift
|
||||
|
||||
case command
|
||||
when "upgrade"
|
||||
# Check opennebula is not running
|
||||
one_not_running()
|
||||
|
||||
# Get DB connection parameters, from oned.conf or command arguments
|
||||
connection_params()
|
||||
|
||||
# Connect to DB
|
||||
connect_db()
|
||||
|
||||
# Read DB's version
|
||||
version, timestamp, comment = read_db_version()
|
||||
|
||||
if( @verbose )
|
||||
puts "Version read:"
|
||||
puts "#{version} : #{comment}"
|
||||
puts ""
|
||||
end
|
||||
|
||||
# Upgrade, using the scripts in $LIB_LOCATION/onedb/xx.rb
|
||||
|
||||
max_version = nil
|
||||
if( ARGV[0] )
|
||||
max_version = ARGV[0].to_i
|
||||
end
|
||||
|
||||
migrator_version = version + 1
|
||||
migrator = nil
|
||||
file = "#{LIB_LOCATION}/onedb/#{migrator_version}.rb"
|
||||
|
||||
if( File.exists?(file) &&
|
||||
(max_version == nil || migrator_version <= max_version) )
|
||||
|
||||
# At least one upgrade will be executed, make DB backup
|
||||
backup_db()
|
||||
end
|
||||
|
||||
while( File.exists?(file) &&
|
||||
(max_version == nil || migrator_version <= max_version) )
|
||||
|
||||
puts " > Running migrator #{file}" if @verbose
|
||||
|
||||
load(file)
|
||||
migrator = Migrator.new(@db, @verbose)
|
||||
result = migrator.up
|
||||
|
||||
if( !result )
|
||||
puts "Error while upgrading from #{migrator_version-1} to #{migrator.db_version}"
|
||||
return -1
|
||||
end
|
||||
|
||||
puts " > Done" if @verbose
|
||||
puts "" if @verbose
|
||||
|
||||
migrator_version += 1
|
||||
file = "#{LIB_LOCATION}/onedb/#{migrator_version}.rb"
|
||||
end
|
||||
|
||||
# Modify db_versioning table
|
||||
if( migrator != nil )
|
||||
comment = "Database migrated from #{version} to #{migrator.db_version}"+
|
||||
" (#{migrator.one_version}) by onedb command."
|
||||
|
||||
max_oid = nil
|
||||
@db.fetch("SELECT MAX(oid) FROM db_versioning") do |row|
|
||||
max_oid = row[:"MAX(oid)"].to_i
|
||||
end
|
||||
|
||||
max_oid = 0 if max_oid == nil
|
||||
|
||||
@db.run "INSERT INTO db_versioning (oid, version, timestamp, comment) "+
|
||||
"VALUES (" +
|
||||
"#{max_oid+1}, " +
|
||||
"'#{migrator.db_version}', " +
|
||||
"#{Time.new.to_i}, " +
|
||||
"'#{comment}')"
|
||||
|
||||
puts comment
|
||||
else
|
||||
puts "Database already uses version #{version}"
|
||||
end
|
||||
|
||||
when "version"
|
||||
connection_params()
|
||||
connect_db()
|
||||
|
||||
version, timestamp, comment = read_db_version()
|
||||
|
||||
if(@verbose)
|
||||
puts "Version: #{version}"
|
||||
|
||||
time = version == 0 ? Time.now : Time.at(timestamp)
|
||||
# TODO: UTC or Local time?
|
||||
puts "Timestamp: #{time.getgm.strftime("%b %d, %Y %H:%M")}"
|
||||
|
||||
puts "Comment: #{comment}"
|
||||
else
|
||||
puts version
|
||||
end
|
||||
|
||||
when "history"
|
||||
connection_params()
|
||||
connect_db()
|
||||
|
||||
begin
|
||||
@db.fetch("SELECT version, timestamp, comment FROM db_versioning") do |row|
|
||||
puts "Version: #{row[:version]}"
|
||||
|
||||
time = version == 0 ? Time.now : Time.at(row[:timestamp])
|
||||
# TODO: UTC or Local time?
|
||||
puts "Timestamp: #{time.getgm.strftime("%b %d, %Y %H:%M")}"
|
||||
|
||||
puts "Comment: #{row[:comment]}"
|
||||
|
||||
puts ""
|
||||
end
|
||||
rescue Exception => e
|
||||
puts "No version records found. Error message:"
|
||||
puts e.message
|
||||
end
|
||||
|
||||
when "backup"
|
||||
if( ARGV[0] != nil )
|
||||
@ops[:backup] = ARGV[0]
|
||||
end
|
||||
|
||||
connection_params()
|
||||
backup_db()
|
||||
|
||||
when "restore"
|
||||
if( ARGV[0] != nil )
|
||||
@ops[:backup] = ARGV[0]
|
||||
end
|
||||
|
||||
connection_params()
|
||||
|
||||
# Source sql dump file
|
||||
bck_file = get_bck_file()
|
||||
|
||||
if( ! File.exists?(bck_file) )
|
||||
puts "File #{bck_file} doesn't exist, backup restoration aborted."
|
||||
exit -1
|
||||
end
|
||||
|
||||
one_not_running()
|
||||
|
||||
case @backend
|
||||
when :sqlite
|
||||
if( !@ops[:force] && File.exists?(@sqlite_file) )
|
||||
puts "File #{@sqlite_file} exists, use -f to overwrite."
|
||||
exit -1
|
||||
end
|
||||
|
||||
FileUtils.cp(bck_file, @sqlite_file)
|
||||
puts "Sqlite database backup restored in #{@sqlite_file}"
|
||||
|
||||
when :mysql
|
||||
|
||||
connect_db()
|
||||
|
||||
# Check if target database exists
|
||||
exists = false
|
||||
begin
|
||||
# User with ID 0 (oneadmin) always exists
|
||||
@db.fetch("SELECT * FROM user_pool WHERE oid=0") do |row|
|
||||
end
|
||||
exists = true
|
||||
rescue
|
||||
end
|
||||
|
||||
if( !@ops[:force] && exists )
|
||||
puts "MySQL database #{@db_name} at #{@server} exists, use -f to overwrite."
|
||||
exit -1
|
||||
end
|
||||
|
||||
mysql_cmd = "mysql -u #{@user} -p#{@passwd} -h #{@server} " +
|
||||
"-P #{@port} "
|
||||
|
||||
rc = system( mysql_cmd + "-e 'DROP DATABASE IF EXISTS #{@db_name};'")
|
||||
if( !rc )
|
||||
puts "Error dropping MySQL DB #{@db_name} at #{@server}."
|
||||
exit -1
|
||||
end
|
||||
|
||||
rc = system( mysql_cmd + "-e 'CREATE DATABASE IF NOT EXISTS #{@db_name};'")
|
||||
if( !rc )
|
||||
puts "Error creating MySQL DB #{@db_name} at #{@server}."
|
||||
exit -1
|
||||
end
|
||||
|
||||
rc = system( mysql_cmd + "#{@db_name} < #{bck_file}")
|
||||
if( !rc )
|
||||
puts "Error while restoring MySQL DB #{@db_name} at #{@server}."
|
||||
exit -1
|
||||
end
|
||||
|
||||
puts "MySQL DB #{@db_name} at #{@server} restored."
|
||||
|
||||
else
|
||||
puts "Unknown DB #{@backend}"
|
||||
exit -1
|
||||
end
|
||||
|
||||
else
|
||||
onedb_opts.print_help
|
||||
|
||||
exit -1
|
||||
end
|
||||
|
||||
|
||||
exit 0
|
@ -814,12 +814,6 @@ when "saveas"
|
||||
puts result.message
|
||||
exit -1
|
||||
end
|
||||
|
||||
if vm["TEMPLATE/DISK[DISK_ID=\"#{disk_id}\"]/SAVE_AS"]
|
||||
puts "Error: The disk #{disk_id} is already" <<
|
||||
" suppossed to be saved"
|
||||
exit -1
|
||||
end
|
||||
|
||||
result = vm.save_as(disk_id.to_i, image_name)
|
||||
if is_successful?(result)
|
||||
|
@ -30,7 +30,7 @@ class ImageOCCI < Image
|
||||
<DESCRIPTION><%= self['TEMPLATE/DESCRIPTION'] %></DESCRIPTION>
|
||||
<% end %>
|
||||
<% if size != nil %>
|
||||
<SIZE><%= size %></SIZE>
|
||||
<SIZE><%= size.to_i / 1024 %></SIZE>
|
||||
<% end %>
|
||||
<% if fstype != nil %>
|
||||
<FSTYPE><%= fstype %></FSTYPE>
|
||||
|
@ -62,8 +62,11 @@ class OCCIServer < CloudServer
|
||||
# [return] _Client_ client with the user credentials
|
||||
def get_client(requestenv)
|
||||
auth = Rack::Auth::Basic::Request.new(requestenv)
|
||||
|
||||
return one_client_user(auth.credentials[0], auth.credentials[1])
|
||||
if auth
|
||||
return one_client_user(auth.credentials[0], auth.credentials[1])
|
||||
else
|
||||
return nil
|
||||
end
|
||||
end
|
||||
|
||||
# Prepare the OCCI XML Response
|
||||
@ -88,8 +91,14 @@ class OCCIServer < CloudServer
|
||||
def get_computes(request)
|
||||
# --- Get User's VMs ---
|
||||
user_flag = -1
|
||||
|
||||
one_client = get_client(request.env)
|
||||
if !one_client
|
||||
return "No authorization data present", 401
|
||||
end
|
||||
|
||||
vmpool = VirtualMachinePoolOCCI.new(
|
||||
get_client(request.env),
|
||||
one_client,
|
||||
user_flag)
|
||||
|
||||
# --- Prepare XML Response ---
|
||||
@ -107,8 +116,14 @@ class OCCIServer < CloudServer
|
||||
def get_networks(request)
|
||||
# --- Get User's VNETs ---
|
||||
user_flag = -1
|
||||
|
||||
one_client = get_client(request.env)
|
||||
if !one_client
|
||||
return "No authorization data present", 401
|
||||
end
|
||||
|
||||
network_pool = VirtualNetworkPoolOCCI.new(
|
||||
get_client(request.env),
|
||||
one_client),
|
||||
user_flag)
|
||||
|
||||
# --- Prepare XML Response ---
|
||||
@ -125,8 +140,14 @@ class OCCIServer < CloudServer
|
||||
def get_storages(request)
|
||||
# --- Get User's Images ---
|
||||
user_flag = -1
|
||||
|
||||
one_client = get_client(request.env)
|
||||
if !one_client
|
||||
return "No authorization data present", 401
|
||||
end
|
||||
|
||||
image_pool = ImagePoolOCCI.new(
|
||||
get_client(request.env),
|
||||
one_client,
|
||||
user_flag)
|
||||
|
||||
# --- Prepare XML Response ---
|
||||
@ -151,9 +172,14 @@ class OCCIServer < CloudServer
|
||||
# [return] _String_,_Integer_ COMPUTE Representation or error, status code
|
||||
def post_compute(request)
|
||||
# --- Create the new Instance ---
|
||||
one_client = get_client(request.env)
|
||||
if !one_client
|
||||
return "No authorization data present", 401
|
||||
end
|
||||
|
||||
vm = VirtualMachineOCCI.new(
|
||||
VirtualMachine.build_xml,
|
||||
get_client(request.env),
|
||||
one_client,
|
||||
request.body.read,
|
||||
@instance_types,
|
||||
@config[:template_location])
|
||||
@ -176,9 +202,14 @@ class OCCIServer < CloudServer
|
||||
# status code
|
||||
def get_compute(request, params)
|
||||
# --- Get the VM ---
|
||||
one_client = get_client(request.env)
|
||||
if !one_client
|
||||
return "No authorization data present", 401
|
||||
end
|
||||
|
||||
vm = VirtualMachineOCCI.new(
|
||||
VirtualMachine.build_xml(params[:id]),
|
||||
get_client(request.env))
|
||||
one_client)
|
||||
|
||||
# --- Prepare XML Response ---
|
||||
rc = vm.info
|
||||
@ -194,9 +225,14 @@ class OCCIServer < CloudServer
|
||||
# status code
|
||||
def delete_compute(request, params)
|
||||
# --- Get the VM ---
|
||||
one_client = get_client(request.env)
|
||||
if !one_client
|
||||
return "No authorization data present", 401
|
||||
end
|
||||
|
||||
vm = VirtualMachineOCCI.new(
|
||||
VirtualMachine.build_xml(params[:id]),
|
||||
get_client(request.env))
|
||||
one_client)
|
||||
|
||||
rc = vm.info
|
||||
return rc, 404 if OpenNebula::is_error?(rc)
|
||||
@ -213,8 +249,11 @@ class OCCIServer < CloudServer
|
||||
# [return] _String_,_Integer_ Update confirmation msg or error,
|
||||
# status code
|
||||
def put_compute(request, params)
|
||||
|
||||
one_client = get_client(request.env)
|
||||
# --- Get the VM ---
|
||||
one_client = get_client(request.env)
|
||||
if !one_client
|
||||
return "No authorization data present", 401
|
||||
end
|
||||
|
||||
vm = VirtualMachineOCCI.new(
|
||||
VirtualMachine.build_xml(params[:id]),
|
||||
@ -295,9 +334,14 @@ class OCCIServer < CloudServer
|
||||
# [return] _String_,_Integer_ Network Representation or error, status code
|
||||
def post_network(request)
|
||||
# --- Create the new Instance ---
|
||||
one_client = get_client(request.env)
|
||||
if !one_client
|
||||
return "No authorization data present", 401
|
||||
end
|
||||
|
||||
network = VirtualNetworkOCCI.new(
|
||||
VirtualNetwork.build_xml,
|
||||
get_client(request.env),
|
||||
one_client,
|
||||
request.body,
|
||||
@config[:bridge])
|
||||
|
||||
@ -319,9 +363,14 @@ class OCCIServer < CloudServer
|
||||
# status code
|
||||
def get_network(request, params)
|
||||
# --- Get the VNET ---
|
||||
one_client = get_client(request.env)
|
||||
if !one_client
|
||||
return "No authorization data present", 401
|
||||
end
|
||||
|
||||
network = VirtualNetworkOCCI.new(
|
||||
VirtualNetwork.build_xml(params[:id]),
|
||||
get_client(request.env))
|
||||
one_client)
|
||||
|
||||
# --- Prepare XML Response ---
|
||||
rc = network.info
|
||||
@ -336,9 +385,14 @@ class OCCIServer < CloudServer
|
||||
# status code
|
||||
def delete_network(request, params)
|
||||
# --- Get the VNET ---
|
||||
one_client = get_client(request.env)
|
||||
if !one_client
|
||||
return "No authorization data present", 401
|
||||
end
|
||||
|
||||
network = VirtualNetworkOCCI.new(
|
||||
VirtualNetwork.build_xml(params[:id]),
|
||||
get_client(request.env))
|
||||
one_client)
|
||||
|
||||
rc = network.info
|
||||
return rc, 404 if OpenNebula::is_error?(rc)
|
||||
@ -357,10 +411,15 @@ class OCCIServer < CloudServer
|
||||
def put_network(request, params)
|
||||
xmldoc = XMLElement.build_xml(request.body, 'NETWORK')
|
||||
vnet_info = XMLElement.new(xmldoc) if xmldoc != nil
|
||||
|
||||
one_client = get_client(request.env)
|
||||
if !one_client
|
||||
return "No authorization data present", 401
|
||||
end
|
||||
|
||||
vnet = VirtualNetworkOCCI.new(
|
||||
VirtualNetwork.build_xml(params[:id]),
|
||||
get_client(request.env))
|
||||
one_client)
|
||||
|
||||
rc = vnet.info
|
||||
return rc, 400 if OpenNebula.is_error?(rc)
|
||||
@ -393,6 +452,11 @@ class OCCIServer < CloudServer
|
||||
error = OpenNebula::Error.new(error_msg)
|
||||
return error, 400
|
||||
end
|
||||
|
||||
one_client = get_client(request.env)
|
||||
if !one_client
|
||||
return "No authorization data present", 401
|
||||
end
|
||||
|
||||
# --- Create and Add the new Image ---
|
||||
occixml = request.params['occixml']
|
||||
@ -400,7 +464,7 @@ class OCCIServer < CloudServer
|
||||
|
||||
image = ImageOCCI.new(
|
||||
Image.build_xml,
|
||||
get_client(request.env),
|
||||
one_client,
|
||||
occixml,
|
||||
request.params['file'])
|
||||
|
||||
@ -422,9 +486,14 @@ class OCCIServer < CloudServer
|
||||
# status code
|
||||
def get_storage(request, params)
|
||||
# --- Get the Image ---
|
||||
one_client = get_client(request.env)
|
||||
if !one_client
|
||||
return "No authorization data present", 401
|
||||
end
|
||||
|
||||
image = ImageOCCI.new(
|
||||
Image.build_xml(params[:id]),
|
||||
get_client(request.env))
|
||||
one_client)
|
||||
|
||||
rc = image.info
|
||||
return rc, 404 if OpenNebula::is_error?(rc)
|
||||
@ -439,9 +508,14 @@ class OCCIServer < CloudServer
|
||||
# status code
|
||||
def delete_storage(request, params)
|
||||
# --- Get the Image ---
|
||||
one_client = get_client(request.env)
|
||||
if !one_client
|
||||
return "No authorization data present", 401
|
||||
end
|
||||
|
||||
image = ImageOCCI.new(
|
||||
Image.build_xml(params[:id]),
|
||||
get_client(request.env))
|
||||
one_client)
|
||||
|
||||
rc = image.info
|
||||
return rc, 404 if OpenNebula::is_error?(rc)
|
||||
@ -460,10 +534,15 @@ class OCCIServer < CloudServer
|
||||
def put_storage(request, params)
|
||||
xmldoc = XMLElement.build_xml(request.body, 'STORAGE')
|
||||
image_info = XMLElement.new(xmldoc) if xmldoc != nil
|
||||
|
||||
one_client = get_client(request.env)
|
||||
if !one_client
|
||||
return "No authorization data present", 401
|
||||
end
|
||||
|
||||
image = ImageOCCI.new(
|
||||
Image.build_xml(params[:id]),
|
||||
get_client(request.env))
|
||||
one_client)
|
||||
|
||||
rc = image.info
|
||||
return rc, 400 if OpenNebula.is_error?(rc)
|
||||
|
@ -75,8 +75,20 @@ string * VectorAttribute::to_xml() const
|
||||
|
||||
for (it=attribute_value.begin();it!=attribute_value.end();it++)
|
||||
{
|
||||
oss << "<" << it->first << "><![CDATA[" << it->second
|
||||
<< "]]></"<< it->first << ">";
|
||||
if ( it->first.empty() )
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if ( it->second.empty() )
|
||||
{
|
||||
oss << "<" << it->first << "/>";
|
||||
}
|
||||
else
|
||||
{
|
||||
oss << "<" << it->first << "><![CDATA[" << it->second
|
||||
<< "]]></"<< it->first << ">";
|
||||
}
|
||||
}
|
||||
|
||||
oss << "</"<< name() << ">";
|
||||
@ -133,9 +145,16 @@ void VectorAttribute::unmarshall(const string& sattr, const char * _sep)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
attribute_value.insert(make_pair(tmp.substr(0,mpos),
|
||||
tmp.substr(mpos+1)));
|
||||
|
||||
if ( mpos + 1 == tmp.size() )
|
||||
{
|
||||
attribute_value.insert(make_pair(tmp.substr(0,mpos),""));
|
||||
}
|
||||
else
|
||||
{
|
||||
attribute_value.insert(make_pair(tmp.substr(0,mpos),
|
||||
tmp.substr(mpos+1)));
|
||||
}
|
||||
}
|
||||
}
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
@ -24,7 +24,8 @@ lib_name='nebula_common'
|
||||
source_files=[
|
||||
'ActionManager.cc',
|
||||
'Attribute.cc',
|
||||
'mem_collector.c'
|
||||
'mem_collector.c',
|
||||
'SSLTools.cc'
|
||||
]
|
||||
|
||||
# Build library
|
||||
|
98
src/common/SSLTools.cc
Normal file
98
src/common/SSLTools.cc
Normal file
@ -0,0 +1,98 @@
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Copyright 2002-2011, OpenNebula Project Leads (OpenNebula.org) */
|
||||
/* */
|
||||
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
||||
/* not use this file except in compliance with the License. You may obtain */
|
||||
/* a copy of the License at */
|
||||
/* */
|
||||
/* http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
/* */
|
||||
/* Unless required by applicable law or agreed to in writing, software */
|
||||
/* distributed under the License is distributed on an "AS IS" BASIS, */
|
||||
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
|
||||
/* See the License for the specific language governing permissions and */
|
||||
/* limitations under the License. */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
#include <openssl/sha.h>
|
||||
#include <openssl/hmac.h>
|
||||
#include <openssl/evp.h>
|
||||
#include <openssl/bio.h>
|
||||
#include <openssl/buffer.h>
|
||||
|
||||
#include "SSLTools.h"
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
#include <iomanip>
|
||||
|
||||
|
||||
//#include <iostream>
|
||||
|
||||
|
||||
//#include <sys/types.h>
|
||||
//#include <pwd.h>
|
||||
//#include <stdlib.h>
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
string * SSLTools::base64_encode(const string& in)
|
||||
{
|
||||
BIO * bio_mem;
|
||||
BIO * bio_64;
|
||||
|
||||
char * encoded_c;
|
||||
long int size;
|
||||
|
||||
bio_64 = BIO_new(BIO_f_base64());
|
||||
bio_mem = BIO_new(BIO_s_mem());
|
||||
|
||||
BIO_push(bio_64, bio_mem);
|
||||
|
||||
BIO_set_flags(bio_64,BIO_FLAGS_BASE64_NO_NL);
|
||||
|
||||
BIO_write(bio_64, in.c_str(), in.length());
|
||||
|
||||
if (BIO_flush(bio_64) != 1)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
size = BIO_get_mem_data(bio_mem,&encoded_c);
|
||||
|
||||
string * encoded = new string(encoded_c,size);
|
||||
|
||||
BIO_free_all(bio_64);
|
||||
|
||||
return encoded;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
string SSLTools::sha1_digest(const string& in)
|
||||
{
|
||||
EVP_MD_CTX mdctx;
|
||||
unsigned char md_value[EVP_MAX_MD_SIZE];
|
||||
unsigned int md_len;
|
||||
ostringstream oss;
|
||||
|
||||
EVP_MD_CTX_init(&mdctx);
|
||||
EVP_DigestInit_ex(&mdctx, EVP_sha1(), NULL);
|
||||
|
||||
EVP_DigestUpdate(&mdctx, in.c_str(), in.length());
|
||||
|
||||
EVP_DigestFinal_ex(&mdctx,md_value,&md_len);
|
||||
EVP_MD_CTX_cleanup(&mdctx);
|
||||
|
||||
for(unsigned int i = 0; i<md_len; i++)
|
||||
{
|
||||
oss << setfill('0') << setw(2) << hex << nouppercase
|
||||
<< (unsigned short) md_value[i];
|
||||
}
|
||||
|
||||
return oss.str();
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
@ -18,6 +18,30 @@
|
||||
#include "Host.h"
|
||||
#include "Nebula.h"
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------
|
||||
static void parse_host_arguments(Host *host, string& parsed)
|
||||
{
|
||||
size_t found;
|
||||
|
||||
found = parsed.find("$HID");
|
||||
|
||||
if ( found !=string::npos )
|
||||
{
|
||||
ostringstream oss;
|
||||
oss << host->get_oid();
|
||||
|
||||
parsed.replace(found,4,oss.str());
|
||||
}
|
||||
|
||||
found = parsed.find("$TEMPLATE");
|
||||
|
||||
if ( found != string::npos )
|
||||
{
|
||||
string templ;
|
||||
parsed.replace(found,9,host->to_xml64(templ));
|
||||
}
|
||||
}
|
||||
// -----------------------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
@ -26,7 +50,6 @@ void HostAllocateHook::do_hook(void *arg)
|
||||
Host * host;
|
||||
|
||||
string parsed_args = args;
|
||||
size_t found;
|
||||
|
||||
host = static_cast<Host *>(arg);
|
||||
|
||||
@ -34,16 +57,8 @@ void HostAllocateHook::do_hook(void *arg)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
found = args.find("$HID");
|
||||
|
||||
if ( found !=string::npos )
|
||||
{
|
||||
ostringstream oss;
|
||||
oss << host->get_oid();
|
||||
|
||||
parsed_args.replace(found,4,oss.str());
|
||||
}
|
||||
|
||||
parse_host_arguments(host,parsed_args);
|
||||
|
||||
Nebula& ne = Nebula::instance();
|
||||
HookManager * hm = ne.get_hm();
|
||||
@ -156,17 +171,8 @@ void HostStateHook::do_hook(void *arg)
|
||||
if ( cur_state == this->state )
|
||||
{
|
||||
string parsed_args = args;
|
||||
size_t found;
|
||||
|
||||
found = args.find("$HID");
|
||||
|
||||
if ( found !=string::npos )
|
||||
{
|
||||
ostringstream oss;
|
||||
oss << host->get_oid();
|
||||
|
||||
parsed_args.replace(found,4,oss.str());
|
||||
}
|
||||
parse_host_arguments(host,parsed_args);
|
||||
|
||||
Nebula& ne = Nebula::instance();
|
||||
HookManager * hm = ne.get_hm();
|
||||
|
@ -41,7 +41,7 @@ Image::Image(int _uid,
|
||||
user_name(_user_name),
|
||||
type(OS),
|
||||
regtime(time(0)),
|
||||
source(""),
|
||||
source("-"),
|
||||
state(INIT),
|
||||
running_vms(0)
|
||||
{
|
||||
@ -78,22 +78,6 @@ const char * Image::db_bootstrap = "CREATE TABLE IF NOT EXISTS image_pool ("
|
||||
/* ------------------------------------------------------------------------ */
|
||||
/* ------------------------------------------------------------------------ */
|
||||
|
||||
string Image::generate_source(int uid, const string& name)
|
||||
{
|
||||
ostringstream tmp_hashstream;
|
||||
ostringstream tmp_sourcestream;
|
||||
|
||||
tmp_hashstream << uid << ":" << name;
|
||||
|
||||
tmp_sourcestream << ImagePool::source_prefix() << "/";
|
||||
tmp_sourcestream << sha1_digest(tmp_hashstream.str());
|
||||
|
||||
return tmp_sourcestream.str();
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------ */
|
||||
/* ------------------------------------------------------------------------ */
|
||||
|
||||
int Image::insert(SqlDB *db, string& error_str)
|
||||
{
|
||||
int rc;
|
||||
@ -103,6 +87,7 @@ int Image::insert(SqlDB *db, string& error_str)
|
||||
string public_attr;
|
||||
string persistent_attr;
|
||||
string dev_prefix;
|
||||
string source_attr;
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
// Check default image attributes
|
||||
@ -163,16 +148,17 @@ int Image::insert(SqlDB *db, string& error_str)
|
||||
{
|
||||
SingleAttribute * dev_att = new SingleAttribute("DEV_PREFIX",
|
||||
ImagePool::default_dev_prefix());
|
||||
|
||||
obj_template->set(dev_att);
|
||||
}
|
||||
|
||||
// ------------ PATH & SOURCE --------------------
|
||||
|
||||
get_template_attribute("PATH", path_attr);
|
||||
get_template_attribute("SOURCE", source);
|
||||
get_template_attribute("SOURCE", source_attr);
|
||||
|
||||
// The template should contain PATH or SOURCE
|
||||
if ( source.empty() && path_attr.empty() )
|
||||
if ( source_attr.empty() && path_attr.empty() )
|
||||
{
|
||||
string size_attr;
|
||||
string fstype_attr;
|
||||
@ -198,14 +184,13 @@ int Image::insert(SqlDB *db, string& error_str)
|
||||
goto error_size_format;
|
||||
}
|
||||
}
|
||||
else if ( !source.empty() && !path_attr.empty() )
|
||||
else if ( !source_attr.empty() && !path_attr.empty() )
|
||||
{
|
||||
goto error_path_and_source;
|
||||
}
|
||||
|
||||
if (source.empty())
|
||||
else if ( !source_attr.empty() )
|
||||
{
|
||||
source = Image::generate_source(uid,name);
|
||||
source = source_attr;
|
||||
}
|
||||
|
||||
state = LOCKED; //LOCKED till the ImageManager copies it to the Repository
|
||||
@ -447,31 +432,21 @@ int Image::disk_attribute( VectorAttribute * disk,
|
||||
get_template_attribute("DEV_PREFIX", prefix);
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// NEW DISK ATTRIBUTES
|
||||
// BASE DISK ATTRIBUTES
|
||||
//---------------------------------------------------------------------------
|
||||
|
||||
map<string,string> new_disk;
|
||||
disk->replace("IMAGE", name);
|
||||
disk->replace("IMAGE_ID", iid.str());
|
||||
disk->replace("SOURCE", source);
|
||||
|
||||
new_disk.insert(make_pair("IMAGE", name));
|
||||
new_disk.insert(make_pair("IMAGE_ID", iid.str()));
|
||||
new_disk.insert(make_pair("SOURCE", source));
|
||||
|
||||
if (!bus.empty())
|
||||
if (bus.empty() && !template_bus.empty()) //BUS in Image, not in DISK
|
||||
{
|
||||
new_disk.insert(make_pair("BUS",bus));
|
||||
}
|
||||
else if (!template_bus.empty())
|
||||
{
|
||||
new_disk.insert(make_pair("BUS",template_bus));
|
||||
disk->replace("BUS",template_bus);
|
||||
}
|
||||
|
||||
if (!driver.empty())
|
||||
if (driver.empty() && !template_driver.empty())//DRIVER in Image,not in DISK
|
||||
{
|
||||
new_disk.insert(make_pair("DRIVER",driver));
|
||||
}
|
||||
else if (!template_driver.empty())
|
||||
{
|
||||
new_disk.insert(make_pair("DRIVER",template_driver));
|
||||
disk->replace("DRIVER",template_driver);
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
@ -480,26 +455,26 @@ int Image::disk_attribute( VectorAttribute * disk,
|
||||
|
||||
if ( persistent_img )
|
||||
{
|
||||
new_disk.insert(make_pair("CLONE","NO"));
|
||||
new_disk.insert(make_pair("SAVE","YES"));
|
||||
disk->replace("CLONE","NO");
|
||||
disk->replace("SAVE","YES");
|
||||
}
|
||||
else
|
||||
{
|
||||
new_disk.insert(make_pair("CLONE","YES"));
|
||||
new_disk.insert(make_pair("SAVE","NO"));
|
||||
disk->replace("CLONE","YES");
|
||||
disk->replace("SAVE","NO");
|
||||
}
|
||||
|
||||
switch(type)
|
||||
{
|
||||
case OS:
|
||||
case DATABLOCK:
|
||||
new_disk.insert(make_pair("TYPE","DISK"));
|
||||
new_disk.insert(make_pair("READONLY","NO"));
|
||||
disk->replace("TYPE","DISK");
|
||||
disk->replace("READONLY","NO");
|
||||
break;
|
||||
|
||||
case CDROM:
|
||||
new_disk.insert(make_pair("TYPE","CDROM"));
|
||||
new_disk.insert(make_pair("READONLY","YES"));
|
||||
disk->replace("TYPE","CDROM");
|
||||
disk->replace("READONLY","YES");
|
||||
break;
|
||||
}
|
||||
|
||||
@ -507,67 +482,37 @@ int Image::disk_attribute( VectorAttribute * disk,
|
||||
// TARGET attribute
|
||||
//---------------------------------------------------------------------------
|
||||
|
||||
if (!target.empty())
|
||||
if (target.empty()) //No TARGET in DISK attribute
|
||||
{
|
||||
new_disk.insert(make_pair("TARGET", target));
|
||||
}
|
||||
else if (!template_target.empty())
|
||||
{
|
||||
new_disk.insert(make_pair("TARGET", template_target));
|
||||
}
|
||||
else
|
||||
{
|
||||
switch(type)
|
||||
if (!template_target.empty())
|
||||
{
|
||||
case OS:
|
||||
prefix += "a";
|
||||
break;
|
||||
|
||||
case CDROM:
|
||||
prefix += "c"; // b is for context
|
||||
break;
|
||||
|
||||
case DATABLOCK:
|
||||
prefix += static_cast<char>(('e'+ *index));
|
||||
*index = *index + 1;
|
||||
break;
|
||||
|
||||
disk->replace("TARGET", template_target);
|
||||
}
|
||||
else
|
||||
{
|
||||
switch(type)
|
||||
{
|
||||
case OS:
|
||||
prefix += "a";
|
||||
break;
|
||||
|
||||
new_disk.insert(make_pair("TARGET", prefix));
|
||||
case CDROM:
|
||||
prefix += "c"; // b is for context
|
||||
break;
|
||||
|
||||
case DATABLOCK:
|
||||
prefix += static_cast<char>(('e'+ *index));
|
||||
*index = *index + 1;
|
||||
break;
|
||||
|
||||
}
|
||||
|
||||
disk->replace("TARGET", prefix);
|
||||
}
|
||||
}
|
||||
|
||||
disk->replace(new_disk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
string Image::sha1_digest(const string& pass)
|
||||
{
|
||||
EVP_MD_CTX mdctx;
|
||||
unsigned char md_value[EVP_MAX_MD_SIZE];
|
||||
unsigned int md_len;
|
||||
ostringstream oss;
|
||||
|
||||
EVP_MD_CTX_init(&mdctx);
|
||||
EVP_DigestInit_ex(&mdctx, EVP_sha1(), NULL);
|
||||
|
||||
EVP_DigestUpdate(&mdctx, pass.c_str(), pass.length());
|
||||
|
||||
EVP_DigestFinal_ex(&mdctx,md_value,&md_len);
|
||||
EVP_MD_CTX_cleanup(&mdctx);
|
||||
|
||||
for(unsigned int i = 0; i<md_len; i++)
|
||||
{
|
||||
oss << setfill('0') << setw(2) << hex << nouppercase
|
||||
<< (unsigned short) md_value[i];
|
||||
}
|
||||
|
||||
return oss.str();
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------ */
|
||||
/* ------------------------------------------------------------------------ */
|
||||
|
@ -122,7 +122,7 @@ void ImageManager::move_image(Image *img, const string& source)
|
||||
}
|
||||
|
||||
oss << "Moving disk " << source << " to repository image "
|
||||
<< img->get_oid() << " as " << img->get_source();
|
||||
<< img->get_oid();
|
||||
|
||||
imd->mv(img->get_oid(),source,img->get_source());
|
||||
|
||||
@ -400,7 +400,7 @@ int ImageManager::register_image(int iid)
|
||||
if ( img->get_type() == Image::DATABLOCK &&
|
||||
!size.empty() && !fs.empty() )
|
||||
{
|
||||
imd->mkfs(img->get_oid(), img->get_source(), fs, size);
|
||||
imd->mkfs(img->get_oid(), fs, size);
|
||||
|
||||
oss << "Creating disk at " << img->get_source() << " of "
|
||||
<< size << "Mb with format " << fs;
|
||||
@ -416,9 +416,8 @@ int ImageManager::register_image(int iid)
|
||||
}
|
||||
else //PATH -> COPY TO REPOSITORY AS SOURCE
|
||||
{
|
||||
imd->cp(img->get_oid(), path, img->get_source());
|
||||
oss << "Copying image " << path
|
||||
<< " to repository as " << img->get_source();
|
||||
imd->cp(img->get_oid(), path);
|
||||
oss << "Copying " << path <<" to repository for image "<<img->get_oid();
|
||||
}
|
||||
|
||||
NebulaLog::log("ImM",Log::INFO,oss);
|
||||
|
@ -27,12 +27,11 @@
|
||||
/* ************************************************************************** */
|
||||
|
||||
void ImageManagerDriver::cp(int oid,
|
||||
const string& source,
|
||||
const string& destination) const
|
||||
const string& source) const
|
||||
{
|
||||
ostringstream os;
|
||||
|
||||
os << "CP " << oid << " " << source << " " << destination << endl;
|
||||
os << "CP " << oid << " " << source << endl;
|
||||
|
||||
write(os);
|
||||
}
|
||||
@ -53,14 +52,12 @@ void ImageManagerDriver::mv(int oid,
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
void ImageManagerDriver::mkfs(int oid,
|
||||
const string& destination,
|
||||
const string& fs,
|
||||
const string& size_mb) const
|
||||
{
|
||||
ostringstream os;
|
||||
|
||||
os << "MKFS " << oid << " " << destination << " " <<
|
||||
fs << " " << size_mb << endl;
|
||||
os << "MKFS " << oid << " " << fs << " " << size_mb << endl;
|
||||
write(os);
|
||||
}
|
||||
|
||||
@ -146,7 +143,21 @@ void ImageManagerDriver::protocol(
|
||||
{
|
||||
if ( result == "SUCCESS" )
|
||||
{
|
||||
string source;
|
||||
|
||||
if ( is.good() )
|
||||
{
|
||||
is >> source >> ws;
|
||||
}
|
||||
|
||||
if ( is.fail() )
|
||||
{
|
||||
goto error_cp;
|
||||
}
|
||||
|
||||
image->set_source(source);
|
||||
image->set_state(Image::READY);
|
||||
|
||||
ipool->update(image);
|
||||
|
||||
NebulaLog::log("ImM", Log::INFO, "Image copied and ready to use.");
|
||||
@ -160,6 +171,23 @@ void ImageManagerDriver::protocol(
|
||||
{
|
||||
if ( result == "SUCCESS" )
|
||||
{
|
||||
if (image->get_source() == "-")
|
||||
{
|
||||
string source;
|
||||
|
||||
if ( is.good() )
|
||||
{
|
||||
is >> source >> ws;
|
||||
}
|
||||
|
||||
if ( is.fail() )
|
||||
{
|
||||
goto error_mv;
|
||||
}
|
||||
|
||||
image->set_source(source);
|
||||
}
|
||||
|
||||
image->set_state(Image::READY);
|
||||
ipool->update(image);
|
||||
|
||||
@ -174,7 +202,21 @@ void ImageManagerDriver::protocol(
|
||||
{
|
||||
if ( result == "SUCCESS" )
|
||||
{
|
||||
string source;
|
||||
|
||||
if ( is.good() )
|
||||
{
|
||||
is >> source >> ws;
|
||||
}
|
||||
|
||||
if ( is.fail() )
|
||||
{
|
||||
goto error_mkfs;
|
||||
}
|
||||
|
||||
image->set_source(source);
|
||||
image->set_state(Image::READY);
|
||||
|
||||
ipool->update(image);
|
||||
|
||||
NebulaLog::log("ImM", Log::INFO, "Image created and ready to use");
|
||||
|
@ -24,7 +24,6 @@
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
string ImagePool::_source_prefix;
|
||||
string ImagePool::_default_type;
|
||||
string ImagePool::_default_dev_prefix;
|
||||
|
||||
@ -32,7 +31,6 @@ string ImagePool::_default_dev_prefix;
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
ImagePool::ImagePool(SqlDB * db,
|
||||
const string& __source_prefix,
|
||||
const string& __default_type,
|
||||
const string& __default_dev_prefix):
|
||||
PoolSQL(db,Image::table)
|
||||
@ -40,7 +38,6 @@ ImagePool::ImagePool(SqlDB * db,
|
||||
ostringstream sql;
|
||||
|
||||
// Init static defaults
|
||||
_source_prefix = __source_prefix;
|
||||
_default_type = __default_type;
|
||||
_default_dev_prefix = __default_dev_prefix;
|
||||
|
||||
|
@ -50,19 +50,19 @@ const string templates[] =
|
||||
|
||||
const string xmls[] =
|
||||
{
|
||||
"<IMAGE><ID>0</ID><UID>0</UID><USERNAME>A user</USERNAME><NAME>Image one</NAME><TYPE>0</TYPE><PUBLIC>0</PUBLIC><PERSISTENT>1</PERSISTENT><REGTIME>0000000000</REGTIME><SOURCE>source_prefix/9ab4a4e021ee2883f57e3aeecc9e2aed7c3fa198</SOURCE><STATE>4</STATE><RUNNING_VMS>0</RUNNING_VMS><TEMPLATE><DESCRIPTION><![CDATA[This is a very long description of an image, and to achieve the longness I will copy this over. This is a very long description of an image, and to achieve the longness I will copy this over. And over. This is a very long description of an image, and to achieve the longness I will copy this over. And over. This is a very long description of an image, and to achieve the longness I will copy this over. And over.This is a very long description of an image, and to achieve the longness I will copy this over.]]></DESCRIPTION><DEV_PREFIX><![CDATA[hd]]></DEV_PREFIX><NAME><![CDATA[Image one]]></NAME><PATH><![CDATA[/tmp/image_test]]></PATH></TEMPLATE></IMAGE>",
|
||||
"<IMAGE><ID>0</ID><UID>0</UID><USERNAME>A user</USERNAME><NAME>Image one</NAME><TYPE>0</TYPE><PUBLIC>0</PUBLIC><PERSISTENT>1</PERSISTENT><REGTIME>0000000000</REGTIME><SOURCE>-</SOURCE><STATE>4</STATE><RUNNING_VMS>0</RUNNING_VMS><TEMPLATE><DESCRIPTION><![CDATA[This is a very long description of an image, and to achieve the longness I will copy this over. This is a very long description of an image, and to achieve the longness I will copy this over. And over. This is a very long description of an image, and to achieve the longness I will copy this over. And over. This is a very long description of an image, and to achieve the longness I will copy this over. And over.This is a very long description of an image, and to achieve the longness I will copy this over.]]></DESCRIPTION><DEV_PREFIX><![CDATA[hd]]></DEV_PREFIX><NAME><![CDATA[Image one]]></NAME><PATH><![CDATA[/tmp/image_test]]></PATH></TEMPLATE></IMAGE>",
|
||||
|
||||
"<IMAGE><ID>1</ID><UID>1</UID><USERNAME>B user</USERNAME><NAME>Second Image</NAME><TYPE>0</TYPE><PUBLIC>1</PUBLIC><PERSISTENT>0</PERSISTENT><REGTIME>0000000000</REGTIME><SOURCE>source_prefix/c9d51800847467911c755e5e4c13dfe28c3a79f3</SOURCE><STATE>4</STATE><RUNNING_VMS>0</RUNNING_VMS><TEMPLATE><DESCRIPTION><![CDATA[This is a rather short description.]]></DESCRIPTION><DEV_PREFIX><![CDATA[hd]]></DEV_PREFIX><NAME><![CDATA[Second Image]]></NAME><PATH><![CDATA[/tmp/image_second_test]]></PATH></TEMPLATE></IMAGE>",
|
||||
"<IMAGE><ID>1</ID><UID>1</UID><USERNAME>B user</USERNAME><NAME>Second Image</NAME><TYPE>0</TYPE><PUBLIC>1</PUBLIC><PERSISTENT>0</PERSISTENT><REGTIME>0000000000</REGTIME><SOURCE>-</SOURCE><STATE>4</STATE><RUNNING_VMS>0</RUNNING_VMS><TEMPLATE><DESCRIPTION><![CDATA[This is a rather short description.]]></DESCRIPTION><DEV_PREFIX><![CDATA[hd]]></DEV_PREFIX><NAME><![CDATA[Second Image]]></NAME><PATH><![CDATA[/tmp/image_second_test]]></PATH></TEMPLATE></IMAGE>",
|
||||
|
||||
"<IMAGE><ID>0</ID><UID>2</UID><USERNAME>C user</USERNAME><NAME>The third image</NAME><TYPE>0</TYPE><PUBLIC>0</PUBLIC><PERSISTENT>0</PERSISTENT><REGTIME>0000000000</REGTIME><SOURCE>source_prefix/e50b0c738be9d431475bf5859629e5580301a7d6</SOURCE><STATE>4</STATE><RUNNING_VMS>0</RUNNING_VMS><TEMPLATE><BUS><![CDATA[SCSI]]></BUS><DEV_PREFIX><![CDATA[hd]]></DEV_PREFIX><NAME><![CDATA[The third image]]></NAME><PATH><![CDATA[/tmp/image_test]]></PATH><PROFILE><![CDATA[STUDENT]]></PROFILE></TEMPLATE></IMAGE>"
|
||||
"<IMAGE><ID>0</ID><UID>2</UID><USERNAME>C user</USERNAME><NAME>The third image</NAME><TYPE>0</TYPE><PUBLIC>0</PUBLIC><PERSISTENT>0</PERSISTENT><REGTIME>0000000000</REGTIME><SOURCE>-</SOURCE><STATE>4</STATE><RUNNING_VMS>0</RUNNING_VMS><TEMPLATE><BUS><![CDATA[SCSI]]></BUS><DEV_PREFIX><![CDATA[hd]]></DEV_PREFIX><NAME><![CDATA[The third image]]></NAME><PATH><![CDATA[/tmp/image_test]]></PATH><PROFILE><![CDATA[STUDENT]]></PROFILE></TEMPLATE></IMAGE>"
|
||||
};
|
||||
|
||||
|
||||
// This xml dump result has the STIMEs modified to 0000000000
|
||||
const string xml_dump =
|
||||
"<IMAGE_POOL><IMAGE><ID>0</ID><UID>0</UID><USERNAME>A user</USERNAME><NAME>Image one</NAME><TYPE>0</TYPE><PUBLIC>0</PUBLIC><PERSISTENT>1</PERSISTENT><REGTIME>0000000000</REGTIME><SOURCE>source_prefix/9ab4a4e021ee2883f57e3aeecc9e2aed7c3fa198</SOURCE><STATE>4</STATE><RUNNING_VMS>0</RUNNING_VMS><TEMPLATE><DESCRIPTION><![CDATA[This is a very long description of an image, and to achieve the longness I will copy this over. This is a very long description of an image, and to achieve the longness I will copy this over. And over. This is a very long description of an image, and to achieve the longness I will copy this over. And over. This is a very long description of an image, and to achieve the longness I will copy this over. And over.This is a very long description of an image, and to achieve the longness I will copy this over.]]></DESCRIPTION><DEV_PREFIX><![CDATA[hd]]></DEV_PREFIX><NAME><![CDATA[Image one]]></NAME><PATH><![CDATA[/tmp/image_test]]></PATH></TEMPLATE></IMAGE><IMAGE><ID>1</ID><UID>1</UID><USERNAME>B user</USERNAME><NAME>Second Image</NAME><TYPE>0</TYPE><PUBLIC>1</PUBLIC><PERSISTENT>0</PERSISTENT><REGTIME>0000000000</REGTIME><SOURCE>source_prefix/c9d51800847467911c755e5e4c13dfe28c3a79f3</SOURCE><STATE>4</STATE><RUNNING_VMS>0</RUNNING_VMS><TEMPLATE><DESCRIPTION><![CDATA[This is a rather short description.]]></DESCRIPTION><DEV_PREFIX><![CDATA[hd]]></DEV_PREFIX><NAME><![CDATA[Second Image]]></NAME><PATH><![CDATA[/tmp/image_second_test]]></PATH></TEMPLATE></IMAGE><IMAGE><ID>2</ID><UID>2</UID><USERNAME>C user</USERNAME><NAME>The third image</NAME><TYPE>0</TYPE><PUBLIC>0</PUBLIC><PERSISTENT>0</PERSISTENT><REGTIME>0000000000</REGTIME><SOURCE>source_prefix/e50b0c738be9d431475bf5859629e5580301a7d6</SOURCE><STATE>4</STATE><RUNNING_VMS>0</RUNNING_VMS><TEMPLATE><BUS><![CDATA[SCSI]]></BUS><DEV_PREFIX><![CDATA[hd]]></DEV_PREFIX><NAME><![CDATA[The third image]]></NAME><PATH><![CDATA[/tmp/image_test]]></PATH><PROFILE><![CDATA[STUDENT]]></PROFILE></TEMPLATE></IMAGE></IMAGE_POOL>";
|
||||
"<IMAGE_POOL><IMAGE><ID>0</ID><UID>0</UID><USERNAME>A user</USERNAME><NAME>Image one</NAME><TYPE>0</TYPE><PUBLIC>0</PUBLIC><PERSISTENT>1</PERSISTENT><REGTIME>0000000000</REGTIME><SOURCE>-</SOURCE><STATE>4</STATE><RUNNING_VMS>0</RUNNING_VMS><TEMPLATE><DESCRIPTION><![CDATA[This is a very long description of an image, and to achieve the longness I will copy this over. This is a very long description of an image, and to achieve the longness I will copy this over. And over. This is a very long description of an image, and to achieve the longness I will copy this over. And over. This is a very long description of an image, and to achieve the longness I will copy this over. And over.This is a very long description of an image, and to achieve the longness I will copy this over.]]></DESCRIPTION><DEV_PREFIX><![CDATA[hd]]></DEV_PREFIX><NAME><![CDATA[Image one]]></NAME><PATH><![CDATA[/tmp/image_test]]></PATH></TEMPLATE></IMAGE><IMAGE><ID>1</ID><UID>1</UID><USERNAME>B user</USERNAME><NAME>Second Image</NAME><TYPE>0</TYPE><PUBLIC>1</PUBLIC><PERSISTENT>0</PERSISTENT><REGTIME>0000000000</REGTIME><SOURCE>-</SOURCE><STATE>4</STATE><RUNNING_VMS>0</RUNNING_VMS><TEMPLATE><DESCRIPTION><![CDATA[This is a rather short description.]]></DESCRIPTION><DEV_PREFIX><![CDATA[hd]]></DEV_PREFIX><NAME><![CDATA[Second Image]]></NAME><PATH><![CDATA[/tmp/image_second_test]]></PATH></TEMPLATE></IMAGE><IMAGE><ID>2</ID><UID>2</UID><USERNAME>C user</USERNAME><NAME>The third image</NAME><TYPE>0</TYPE><PUBLIC>0</PUBLIC><PERSISTENT>0</PERSISTENT><REGTIME>0000000000</REGTIME><SOURCE>-</SOURCE><STATE>4</STATE><RUNNING_VMS>0</RUNNING_VMS><TEMPLATE><BUS><![CDATA[SCSI]]></BUS><DEV_PREFIX><![CDATA[hd]]></DEV_PREFIX><NAME><![CDATA[The third image]]></NAME><PATH><![CDATA[/tmp/image_test]]></PATH><PROFILE><![CDATA[STUDENT]]></PROFILE></TEMPLATE></IMAGE></IMAGE_POOL>";
|
||||
const string xml_dump_where =
|
||||
"<IMAGE_POOL><IMAGE><ID>0</ID><UID>0</UID><USERNAME>A user</USERNAME><NAME>Image one</NAME><TYPE>0</TYPE><PUBLIC>0</PUBLIC><PERSISTENT>1</PERSISTENT><REGTIME>0000000000</REGTIME><SOURCE>source_prefix/9ab4a4e021ee2883f57e3aeecc9e2aed7c3fa198</SOURCE><STATE>4</STATE><RUNNING_VMS>0</RUNNING_VMS><TEMPLATE><DESCRIPTION><![CDATA[This is a very long description of an image, and to achieve the longness I will copy this over. This is a very long description of an image, and to achieve the longness I will copy this over. And over. This is a very long description of an image, and to achieve the longness I will copy this over. And over. This is a very long description of an image, and to achieve the longness I will copy this over. And over.This is a very long description of an image, and to achieve the longness I will copy this over.]]></DESCRIPTION><DEV_PREFIX><![CDATA[hd]]></DEV_PREFIX><NAME><![CDATA[Image one]]></NAME><PATH><![CDATA[/tmp/image_test]]></PATH></TEMPLATE></IMAGE><IMAGE><ID>1</ID><UID>1</UID><USERNAME>B user</USERNAME><NAME>Second Image</NAME><TYPE>0</TYPE><PUBLIC>1</PUBLIC><PERSISTENT>0</PERSISTENT><REGTIME>0000000000</REGTIME><SOURCE>source_prefix/c9d51800847467911c755e5e4c13dfe28c3a79f3</SOURCE><STATE>4</STATE><RUNNING_VMS>0</RUNNING_VMS><TEMPLATE><DESCRIPTION><![CDATA[This is a rather short description.]]></DESCRIPTION><DEV_PREFIX><![CDATA[hd]]></DEV_PREFIX><NAME><![CDATA[Second Image]]></NAME><PATH><![CDATA[/tmp/image_second_test]]></PATH></TEMPLATE></IMAGE></IMAGE_POOL>";
|
||||
"<IMAGE_POOL><IMAGE><ID>0</ID><UID>0</UID><USERNAME>A user</USERNAME><NAME>Image one</NAME><TYPE>0</TYPE><PUBLIC>0</PUBLIC><PERSISTENT>1</PERSISTENT><REGTIME>0000000000</REGTIME><SOURCE>-</SOURCE><STATE>4</STATE><RUNNING_VMS>0</RUNNING_VMS><TEMPLATE><DESCRIPTION><![CDATA[This is a very long description of an image, and to achieve the longness I will copy this over. This is a very long description of an image, and to achieve the longness I will copy this over. And over. This is a very long description of an image, and to achieve the longness I will copy this over. And over. This is a very long description of an image, and to achieve the longness I will copy this over. And over.This is a very long description of an image, and to achieve the longness I will copy this over.]]></DESCRIPTION><DEV_PREFIX><![CDATA[hd]]></DEV_PREFIX><NAME><![CDATA[Image one]]></NAME><PATH><![CDATA[/tmp/image_test]]></PATH></TEMPLATE></IMAGE><IMAGE><ID>1</ID><UID>1</UID><USERNAME>B user</USERNAME><NAME>Second Image</NAME><TYPE>0</TYPE><PUBLIC>1</PUBLIC><PERSISTENT>0</PERSISTENT><REGTIME>0000000000</REGTIME><SOURCE>-</SOURCE><STATE>4</STATE><RUNNING_VMS>0</RUNNING_VMS><TEMPLATE><DESCRIPTION><![CDATA[This is a rather short description.]]></DESCRIPTION><DEV_PREFIX><![CDATA[hd]]></DEV_PREFIX><NAME><![CDATA[Second Image]]></NAME><PATH><![CDATA[/tmp/image_second_test]]></PATH></TEMPLATE></IMAGE></IMAGE_POOL>";
|
||||
|
||||
const string replacement = "0000000000";
|
||||
|
||||
@ -70,12 +70,10 @@ class ImagePoolFriend : public ImagePool
|
||||
{
|
||||
public:
|
||||
ImagePoolFriend(SqlDB * db,
|
||||
const string& _source_prefix,
|
||||
const string& _default_type,
|
||||
const string& _default_dev_prefix):
|
||||
|
||||
ImagePool( db,
|
||||
_source_prefix,
|
||||
_default_type,
|
||||
_default_dev_prefix){};
|
||||
|
||||
@ -147,7 +145,7 @@ protected:
|
||||
PoolSQL* create_pool(SqlDB* db)
|
||||
{
|
||||
ImagePoolFriend * imp =
|
||||
new ImagePoolFriend(db, "source_prefix", "OS", "hd");
|
||||
new ImagePoolFriend(db, "OS", "hd");
|
||||
return imp;
|
||||
};
|
||||
|
||||
@ -201,7 +199,7 @@ public:
|
||||
|
||||
// Create a new pool, using the same DB. This new pool should read the
|
||||
// allocated images.
|
||||
imp = new ImagePool(db, "source_prefix", "OS", "hd");
|
||||
imp = new ImagePool(db,"OS", "hd");
|
||||
|
||||
img = imp->get(names[0], uids[0], false);
|
||||
CPPUNIT_ASSERT( img != 0 );
|
||||
@ -565,8 +563,7 @@ public:
|
||||
|
||||
value = "";
|
||||
value = disk->vector_value("SOURCE");
|
||||
CPPUNIT_ASSERT( value ==
|
||||
"source_prefix/9ab4a4e021ee2883f57e3aeecc9e2aed7c3fa198" );
|
||||
CPPUNIT_ASSERT( value == "-" );
|
||||
|
||||
// clean up
|
||||
//img->release_image();
|
||||
@ -586,8 +583,7 @@ public:
|
||||
|
||||
value = "";
|
||||
value = disk->vector_value("SOURCE");
|
||||
CPPUNIT_ASSERT( value ==
|
||||
"source_prefix/9ab4a4e021ee2883f57e3aeecc9e2aed7c3fa198" );
|
||||
CPPUNIT_ASSERT( value == "-" );
|
||||
|
||||
// clean up
|
||||
delete disk;
|
||||
@ -874,15 +870,15 @@ public:
|
||||
string result = oss.str();
|
||||
|
||||
result.replace(157, 10, replacement);
|
||||
result.replace(1129, 10, replacement);
|
||||
result.replace(1641, 10, replacement);
|
||||
result.replace(1076, 10, replacement);
|
||||
result.replace(1535, 10, replacement);
|
||||
|
||||
/*
|
||||
if( result != xml_dump )
|
||||
{
|
||||
cout << endl << result << endl << xml_dump << endl;
|
||||
}
|
||||
//*/
|
||||
*/
|
||||
|
||||
CPPUNIT_ASSERT( result == xml_dump );
|
||||
}
|
||||
@ -909,14 +905,14 @@ public:
|
||||
|
||||
string result = oss.str();
|
||||
result.replace(157, 10, replacement);
|
||||
result.replace(1129, 10, replacement);
|
||||
result.replace(1076, 10, replacement);
|
||||
|
||||
|
||||
/*
|
||||
if( result != xml_dump_where )
|
||||
{
|
||||
cout << endl << result << endl << xml_dump_where << endl;
|
||||
}
|
||||
//*/
|
||||
|
||||
|
||||
CPPUNIT_ASSERT( result == xml_dump_where );
|
||||
}
|
||||
|
@ -69,19 +69,19 @@ class ImageDriver < OpenNebulaDriver
|
||||
# Image Manager Protocol Actions (generic implementation
|
||||
# -------------------------------------------------------------------------
|
||||
def mv(id, src, dst)
|
||||
local_action("#{@actions_path}/mv #{src} #{dst}",id,:mv)
|
||||
local_action("#{@actions_path}/mv #{src} #{dst} #{id}",id,ACTION[:mv])
|
||||
end
|
||||
|
||||
def cp(id, src, dst)
|
||||
local_action("#{@actions_path}/cp #{src} #{dst}",id,:cp)
|
||||
def cp(id, src)
|
||||
local_action("#{@actions_path}/cp #{src} #{id}",id,ACTION[:cp])
|
||||
end
|
||||
|
||||
def rm(id, dst)
|
||||
local_action("#{@actions_path}/rm #{dst}",id,:rm)
|
||||
local_action("#{@actions_path}/rm #{dst} #{id}",id,ACTION[:rm])
|
||||
end
|
||||
|
||||
def mkfs(id, dst, fs, size)
|
||||
local_action("#{@actions_path}/mkfs #{dst} #{fs} #{size}",id,:mkfs)
|
||||
def mkfs(id, fs, size)
|
||||
local_action("#{@actions_path}/mkfs #{fs} #{size} #{id}",id,ACTION[:mkfs])
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -21,7 +21,7 @@
|
||||
# Several SRC types are supported
|
||||
###############################################################################
|
||||
|
||||
# ------------ Set up the environment to source common tools ------------
|
||||
# -------- Set up the environment to source common tools & conf ------------
|
||||
|
||||
if [ -z "${ONE_LOCATION}" ]; then
|
||||
LIB_LOCATION=/usr/lib/one
|
||||
@ -30,11 +30,13 @@ else
|
||||
fi
|
||||
|
||||
. $LIB_LOCATION/sh/scripts_common.sh
|
||||
|
||||
# ------------ Copy the image to the repository ------------
|
||||
source $(dirname $0)/fsrc
|
||||
|
||||
SRC=$1
|
||||
DST=$2
|
||||
ID=$2
|
||||
DST=`generate_image_path`
|
||||
|
||||
# ------------ Copy the image to the repository -------------
|
||||
|
||||
case $SRC in
|
||||
http://*)
|
||||
@ -51,3 +53,5 @@ http://*)
|
||||
esac
|
||||
|
||||
exec_and_log "chmod 0660 $DST"
|
||||
|
||||
echo "$DST"
|
||||
|
37
src/image_mad/remotes/fs/fsrc
Normal file
37
src/image_mad/remotes/fs/fsrc
Normal file
@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2011, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# Configuration File for File-System based Image Repositories
|
||||
#------------------------------------------------------------------------------
|
||||
export IMAGE_REPOSITORY_PATH=$ONE_LOCATION/var/images
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# Function used to generate Image names, you should not need to override this
|
||||
#------------------------------------------------------------------------------
|
||||
function generate_image_path {
|
||||
|
||||
CANONICAL_STR="`$DATE +%s`:$ID"
|
||||
|
||||
CANONICAL_MD5=$($MD5SUM - << EOF
|
||||
$CANONICAL_STR
|
||||
EOF
|
||||
)
|
||||
|
||||
echo "$IMAGE_REPOSITORY_PATH/`echo $CANONICAL_MD5 | cut -d ' ' -f1`"
|
||||
}
|
@ -30,15 +30,20 @@ else
|
||||
fi
|
||||
|
||||
. $LIB_LOCATION/sh/scripts_common.sh
|
||||
source $(dirname $0)/fsrc
|
||||
|
||||
# ------------ Create the image to the repository ------------
|
||||
|
||||
DST=$1
|
||||
FSTYPE=$2
|
||||
SIZE=$3
|
||||
FSTYPE=$1
|
||||
SIZE=$2
|
||||
ID=$3
|
||||
|
||||
DST=`generate_image_path`
|
||||
|
||||
exec_and_log "$DD if=/dev/zero of=$DST bs=1 count=1 seek=${SIZE}M" \
|
||||
"Could not create image $DST"
|
||||
exec_and_log "$MKFS -t $FSTYPE -F $DST" \
|
||||
"Unable to create filesystem $FSTYPE in $DST"
|
||||
exec_and_log "chmod 0660 $DST"
|
||||
|
||||
echo "$DST"
|
||||
|
@ -30,11 +30,20 @@ else
|
||||
fi
|
||||
|
||||
. $LIB_LOCATION/sh/scripts_common.sh
|
||||
|
||||
# ------------ Move the image to the repository ------------
|
||||
source $(dirname $0)/fsrc
|
||||
|
||||
SRC=$1
|
||||
DST=$2
|
||||
ID=$3
|
||||
|
||||
# ------------ Generate a filename for the image ------------
|
||||
|
||||
if [ "$DST" = "-" ] ; then
|
||||
DST=`generate_image_path`
|
||||
fi
|
||||
|
||||
# ------------ Move the image to the repository ------------
|
||||
|
||||
|
||||
case $SRC in
|
||||
http://*)
|
||||
@ -45,9 +54,14 @@ http://*)
|
||||
|
||||
*)
|
||||
log "Moving local image $SRC to the image repository"
|
||||
exec_and_log "mv -f $SRC $DST" \
|
||||
"Could not move $SRC to $DST"
|
||||
if [ \( -L $SRC \) -a \( "`$READLINK $SRC`" = "$DST" \) ] ; then
|
||||
log "Not moving files to image repo, they are the same"
|
||||
else
|
||||
exec_and_log "mv -f $SRC $DST" "Could not move $SRC to $DST"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
exec_and_log "chmod 0660 $DST"
|
||||
|
||||
echo "$DST"
|
||||
|
@ -29,6 +29,7 @@ else
|
||||
fi
|
||||
|
||||
. $LIB_LOCATION/sh/scripts_common.sh
|
||||
source $(dirname $0)/fsrc
|
||||
|
||||
# ------------ Remove the image to the repository ------------
|
||||
|
||||
|
70
src/mad/ruby/scripts_common.rb
Normal file
70
src/mad/ruby/scripts_common.rb
Normal file
@ -0,0 +1,70 @@
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2011, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
module OpenNebula
|
||||
|
||||
# Generic log function
|
||||
def self.log_function(severity, message)
|
||||
STDERR.puts "#{severity}: #{File.basename $0}: #{message}"
|
||||
end
|
||||
|
||||
# Logs an info message
|
||||
def self.log_info(message)
|
||||
log_function("INFO", message)
|
||||
end
|
||||
|
||||
# Logs an error message
|
||||
def self.log_error(message)
|
||||
log_function("ERROR", message)
|
||||
end
|
||||
|
||||
# Logs a debug message
|
||||
def self.log_debug(message)
|
||||
log_function("DEBUG", message)
|
||||
end
|
||||
|
||||
# Alias log to log_info in the singleton class
|
||||
class << self
|
||||
alias :log :log_info
|
||||
end
|
||||
|
||||
# This function is used to pass error message to the mad
|
||||
def self.error_message(message)
|
||||
STDERR.puts "ERROR MESSAGE --8<------"
|
||||
STDERR.puts message
|
||||
STDERR.puts "ERROR MESSAGE ------>8--"
|
||||
end
|
||||
|
||||
# Executes a command, if it fails returns error message and exits
|
||||
# If a second parameter is present it is used as the error message when
|
||||
# the command fails
|
||||
def self.exec_and_log(command, message=nil)
|
||||
output=`#{command} 2>&1 1>/dev/null`
|
||||
code=$?
|
||||
if code!=0
|
||||
log_error "Command \"#{command}\" failed."
|
||||
log_error output
|
||||
if !message
|
||||
error_message output
|
||||
else
|
||||
error_message message
|
||||
end
|
||||
exit code
|
||||
end
|
||||
log "Executed \"#{command}\"."
|
||||
end
|
||||
|
||||
end
|
@ -32,6 +32,7 @@ SED=/bin/sed
|
||||
SSH=/usr/bin/ssh
|
||||
SUDO=/usr/bin/sudo
|
||||
WGET=/usr/bin/wget
|
||||
READLINK=/bin/readlink
|
||||
|
||||
# Used for log messages
|
||||
SCRIPT_NAME=`basename $0`
|
||||
@ -94,7 +95,7 @@ function exec_and_log
|
||||
if [ "x$code" != "x0" ]; then
|
||||
log_error "Command \"$1\" failed."
|
||||
log_error "$output"
|
||||
if [ -n "$message" ]; then
|
||||
if [ -z "$message" ]; then
|
||||
error_message "$output"
|
||||
else
|
||||
error_message "$message"
|
||||
|
@ -208,7 +208,7 @@ void Nebula::start()
|
||||
{
|
||||
ostringstream oss;
|
||||
|
||||
db = new MySqlDB(server,port,user,passwd,0);
|
||||
db = new MySqlDB(server,port,user,passwd,db_name);
|
||||
|
||||
oss << "CREATE DATABASE IF NOT EXISTS " << db_name;
|
||||
rc = db->exec(oss);
|
||||
@ -227,15 +227,27 @@ void Nebula::start()
|
||||
}
|
||||
}
|
||||
|
||||
NebulaLog::log("ONE",Log::INFO,"Bootstraping OpenNebula database.");
|
||||
NebulaLog::log("ONE",Log::INFO,"Checking database version.");
|
||||
rc = check_db_version();
|
||||
|
||||
VirtualMachinePool::bootstrap(db);
|
||||
HostPool::bootstrap(db);
|
||||
VirtualNetworkPool::bootstrap(db);
|
||||
UserPool::bootstrap(db);
|
||||
ImagePool::bootstrap(db);
|
||||
ClusterPool::bootstrap(db);
|
||||
VMTemplatePool::bootstrap(db);
|
||||
if( rc == -1 )
|
||||
{
|
||||
throw runtime_error("Database version mismatch.");
|
||||
}
|
||||
|
||||
if( rc == -2 )
|
||||
{
|
||||
NebulaLog::log("ONE",Log::INFO,"Bootstraping OpenNebula database.");
|
||||
|
||||
bootstrap();
|
||||
VirtualMachinePool::bootstrap(db);
|
||||
HostPool::bootstrap(db);
|
||||
VirtualNetworkPool::bootstrap(db);
|
||||
UserPool::bootstrap(db);
|
||||
ImagePool::bootstrap(db);
|
||||
ClusterPool::bootstrap(db);
|
||||
VMTemplatePool::bootstrap(db);
|
||||
}
|
||||
}
|
||||
catch (exception&)
|
||||
{
|
||||
@ -246,7 +258,6 @@ void Nebula::start()
|
||||
{
|
||||
string mac_prefix;
|
||||
int size;
|
||||
string repository_path;
|
||||
string default_image_type;
|
||||
string default_device_prefix;
|
||||
|
||||
@ -266,13 +277,11 @@ void Nebula::start()
|
||||
|
||||
upool = new UserPool(db);
|
||||
|
||||
nebula_configuration->get("IMAGE_REPOSITORY_PATH", repository_path);
|
||||
nebula_configuration->get("DEFAULT_IMAGE_TYPE", default_image_type);
|
||||
nebula_configuration->get("DEFAULT_DEVICE_PREFIX",
|
||||
default_device_prefix);
|
||||
|
||||
ipool = new ImagePool(db,
|
||||
repository_path,
|
||||
default_image_type,
|
||||
default_device_prefix);
|
||||
|
||||
@ -597,3 +606,99 @@ void Nebula::start()
|
||||
|
||||
NebulaLog::log("ONE", Log::INFO, "All modules finalized, exiting.\n");
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
void Nebula::bootstrap()
|
||||
{
|
||||
ostringstream oss;
|
||||
|
||||
oss << "CREATE TABLE pool_control (tablename VARCHAR(32) PRIMARY KEY, "
|
||||
"last_oid BIGINT UNSIGNED)";
|
||||
db->exec(oss);
|
||||
|
||||
oss.str("");
|
||||
oss << "CREATE TABLE db_versioning (oid INTEGER PRIMARY KEY, "
|
||||
"version INTEGER, timestamp INTEGER, comment VARCHAR(256))";
|
||||
|
||||
db->exec(oss);
|
||||
|
||||
oss.str("");
|
||||
oss << "INSERT INTO db_versioning (oid, version, timestamp, comment) "
|
||||
<< "VALUES (0, " << db_version() << ", " << time(0)
|
||||
<< ", '" << version() << " daemon bootstrap')";
|
||||
|
||||
db->exec(oss);
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
int Nebula::check_db_version()
|
||||
{
|
||||
int rc;
|
||||
ostringstream oss;
|
||||
|
||||
|
||||
int loaded_db_version = 0;
|
||||
|
||||
// Try to read latest version
|
||||
set_callback( static_cast<Callbackable::Callback>(&Nebula::select_cb),
|
||||
static_cast<void *>(&loaded_db_version) );
|
||||
|
||||
oss << "SELECT version FROM db_versioning "
|
||||
<< "WHERE oid=(SELECT MAX(oid) FROM db_versioning)";
|
||||
|
||||
db->exec(oss, this);
|
||||
|
||||
oss.str("");
|
||||
unset_callback();
|
||||
|
||||
if( loaded_db_version == 0 )
|
||||
{
|
||||
// Table user_pool is present for all OpenNebula versions, and it
|
||||
// always contains at least the oneadmin user.
|
||||
oss << "SELECT MAX(oid) FROM user_pool";
|
||||
rc = db->exec(oss);
|
||||
|
||||
oss.str("");
|
||||
|
||||
if( rc != 0 ) // Database needs bootstrap
|
||||
{
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
|
||||
if( db_version() != loaded_db_version )
|
||||
{
|
||||
oss << "Database version mismatch. "
|
||||
<< "Installed " << version() << " uses DB version '" << db_version()
|
||||
<< "', and existing DB version is '"
|
||||
<< loaded_db_version << "'.";
|
||||
|
||||
NebulaLog::log("ONE",Log::ERROR,oss);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int Nebula::select_cb(void *_loaded_db_version, int num, char **values,
|
||||
char **names)
|
||||
{
|
||||
istringstream iss;
|
||||
int * loaded_db_version;
|
||||
|
||||
loaded_db_version = static_cast<int *>(_loaded_db_version);
|
||||
|
||||
*loaded_db_version = 0;
|
||||
|
||||
if ( (values[0]) && (num == 1) )
|
||||
{
|
||||
iss.str(values[0]);
|
||||
iss >> *loaded_db_version;
|
||||
}
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
@ -126,17 +126,10 @@ NebulaTemplate::NebulaTemplate(string& etc_location, string& var_location)
|
||||
#*******************************************************************************
|
||||
# Image Repository Configuration
|
||||
#*******************************************************************************
|
||||
# IMAGE_REPOSITORY_PATH
|
||||
# DEFAULT_IMAGE_TYPE
|
||||
# DEFAULT_DEVICE_PREFIX
|
||||
#*******************************************************************************
|
||||
*/
|
||||
//IMAGE_REPOSITORY_PATH
|
||||
value = var_location + "/images";
|
||||
|
||||
attribute = new SingleAttribute("IMAGE_REPOSITORY_PATH",value);
|
||||
conf_default.insert(make_pair(attribute->name(),attribute));
|
||||
|
||||
//DEFAULT_IMAGE_TYPE
|
||||
value = "OS";
|
||||
|
||||
|
@ -102,21 +102,27 @@ module OpenNebula
|
||||
end
|
||||
end
|
||||
|
||||
# Gets an array of text from elemenets extracted
|
||||
# using the XPATH expression passed as filter
|
||||
def retrieve_elements(filter)
|
||||
ids_array = Array.new
|
||||
if NOKOGIRI
|
||||
elements=@xml.xpath(filter.to_s)
|
||||
elements_array = Array.new
|
||||
|
||||
if elements.size == 0
|
||||
return nil
|
||||
end
|
||||
|
||||
elements.each{ |e| ids_array << e.text }
|
||||
if NOKOGIRI
|
||||
@xml.xpath(filter.to_s).each { |pelem|
|
||||
elements_array << pelem.text if !pelem.text
|
||||
}
|
||||
else
|
||||
@xml.each(filter.to_s) { |e| ids_array << e.text }
|
||||
@xml.elements.each(filter.to_s) { |pelem|
|
||||
elements_array << pelem.text if !pelem.text
|
||||
}
|
||||
end
|
||||
|
||||
if elements_array.size == 0
|
||||
return nil
|
||||
else
|
||||
return elements_array
|
||||
end
|
||||
|
||||
return ids_array
|
||||
end
|
||||
|
||||
# Gets an attribute from an elemenT
|
||||
|
281
src/onedb/1.rb
Normal file
281
src/onedb/1.rb
Normal file
@ -0,0 +1,281 @@
|
||||
# -------------------------------------------------------------------------- */
|
||||
# Copyright 2002-2011, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
||||
# not use this file except in compliance with the License. You may obtain */
|
||||
# a copy of the License at */
|
||||
# */
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
# */
|
||||
# Unless required by applicable law or agreed to in writing, software */
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, */
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
|
||||
# See the License for the specific language governing permissions and */
|
||||
# limitations under the License. */
|
||||
# -------------------------------------------------------------------------- */
|
||||
|
||||
class Migrator < MigratorBase
|
||||
|
||||
def initialize(db, verbose)
|
||||
super(db, verbose)
|
||||
@db_version = 1
|
||||
@one_version = "OpenNebula 2.3.0"
|
||||
end
|
||||
|
||||
def up
|
||||
|
||||
########################################################################
|
||||
# Users
|
||||
########################################################################
|
||||
|
||||
# 2.2 Schema
|
||||
# CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, user_name VARCHAR(256), password TEXT,enabled INTEGER, UNIQUE(user_name));
|
||||
|
||||
# Move table user_pool
|
||||
@db.run "ALTER TABLE user_pool RENAME TO old_user_pool;"
|
||||
|
||||
# Create new user_pool
|
||||
@db.run "CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, name VARCHAR(256), body TEXT, UNIQUE(name));"
|
||||
|
||||
# Read each entry in the old user_pool, and insert into new user_pool
|
||||
@db.fetch("SELECT * FROM old_user_pool") do |row|
|
||||
oid = row[:oid]
|
||||
name = row[:user_name]
|
||||
|
||||
body = "<USER><ID>#{oid}</ID><NAME>#{name}</NAME><PASSWORD>#{row[:password]}</PASSWORD><ENABLED>#{row[:enabled]}</ENABLED></USER>"
|
||||
|
||||
@db.run "INSERT INTO user_pool VALUES(#{oid},'#{name}','#{body}');"
|
||||
end
|
||||
|
||||
# Delete old user_pool
|
||||
@db.run "DROP TABLE old_user_pool"
|
||||
|
||||
########################################################################
|
||||
# Clusters
|
||||
########################################################################
|
||||
|
||||
# 2.2 Schema
|
||||
# CREATE TABLE cluster_pool (oid INTEGER PRIMARY KEY, cluster_name VARCHAR(128), UNIQUE(cluster_name) );
|
||||
|
||||
# Move table
|
||||
@db.run "ALTER TABLE cluster_pool RENAME TO old_cluster_pool;"
|
||||
|
||||
# Create new table
|
||||
@db.run "CREATE TABLE cluster_pool (oid INTEGER PRIMARY KEY, name VARCHAR(256), body TEXT, UNIQUE(name));"
|
||||
|
||||
# Read each entry in the old table, and insert into new table
|
||||
@db.fetch("SELECT * FROM old_cluster_pool") do |row|
|
||||
oid = row[:oid]
|
||||
name = row[:cluster_name]
|
||||
|
||||
body = "<CLUSTER><ID>#{oid}</ID><NAME>#{name}</NAME></CLUSTER>"
|
||||
|
||||
@db.run "INSERT INTO cluster_pool VALUES(#{oid},'#{name}','#{body}');"
|
||||
end
|
||||
|
||||
# Delete old table
|
||||
@db.run "DROP TABLE old_cluster_pool"
|
||||
|
||||
########################################################################
|
||||
# Hosts
|
||||
########################################################################
|
||||
|
||||
# 2.2 Schema
|
||||
# CREATE TABLE host_pool (oid INTEGER PRIMARY KEY,host_name VARCHAR(256), state INTEGER,im_mad VARCHAR(128),vm_mad VARCHAR(128),tm_mad VARCHAR(128),last_mon_time INTEGER, cluster VARCHAR(128), template TEXT, UNIQUE(host_name));
|
||||
# CREATE TABLE host_shares(hid INTEGER PRIMARY KEY,disk_usage INTEGER, mem_usage INTEGER, cpu_usage INTEGER,max_disk INTEGER, max_mem INTEGER, max_cpu INTEGER,free_disk INTEGER, free_mem INTEGER, free_cpu INTEGER,used_disk INTEGER, used_mem INTEGER, used_cpu INTEGER,running_vms INTEGER);
|
||||
|
||||
# Move table
|
||||
@db.run "ALTER TABLE host_pool RENAME TO old_host_pool;"
|
||||
|
||||
# Create new table
|
||||
@db.run "CREATE TABLE host_pool (oid INTEGER PRIMARY KEY, name VARCHAR(256), body TEXT, state INTEGER, last_mon_time INTEGER, cluster VARCHAR(128), UNIQUE(name));"
|
||||
|
||||
# Read each entry in the old table, and insert into new table
|
||||
@db.fetch("SELECT * FROM old_host_pool") do |row|
|
||||
oid = row[:oid]
|
||||
name = row[:host_name]
|
||||
state = row[:state]
|
||||
last_mon_time = row[:last_mon_time]
|
||||
cluster = row[:cluster]
|
||||
|
||||
# There is one host share for each host
|
||||
host_share = ""
|
||||
@db.fetch("SELECT * FROM host_shares WHERE hid=#{oid}") do |share|
|
||||
host_share = "<HOST_SHARE><DISK_USAGE>#{share[:disk_usage]}</DISK_USAGE><MEM_USAGE>#{share[:mem_usage]}</MEM_USAGE><CPU_USAGE>#{share[:cpu_usage]}</CPU_USAGE><MAX_DISK>#{share[:max_disk]}</MAX_DISK><MAX_MEM>#{share[:max_mem]}</MAX_MEM><MAX_CPU>#{share[:max_cpu]}</MAX_CPU><FREE_DISK>#{share[:free_disk]}</FREE_DISK><FREE_MEM>#{share[:free_mem]}</FREE_MEM><FREE_CPU>#{share[:free_cpu]}</FREE_CPU><USED_DISK>#{share[:used_disk]}</USED_DISK><USED_MEM>#{share[:used_mem]}</USED_MEM><USED_CPU>#{share[:used_cpu]}</USED_CPU><RUNNING_VMS>#{share[:running_vms]}</RUNNING_VMS></HOST_SHARE>"
|
||||
end
|
||||
|
||||
body = "<HOST><ID>#{oid}</ID><NAME>#{name}</NAME><STATE>#{state}</STATE><IM_MAD>#{row[:im_mad]}</IM_MAD><VM_MAD>#{row[:vm_mad]}</VM_MAD><TM_MAD>#{row[:tm_mad]}</TM_MAD><LAST_MON_TIME>#{last_mon_time}</LAST_MON_TIME><CLUSTER>#{cluster}</CLUSTER>#{host_share}#{row[:template]}</HOST>"
|
||||
|
||||
@db.run "INSERT INTO host_pool VALUES(#{oid},'#{name}','#{body}', #{state}, #{last_mon_time}, '#{cluster}');"
|
||||
end
|
||||
|
||||
# Delete old table
|
||||
@db.run "DROP TABLE old_host_pool"
|
||||
@db.run "DROP TABLE host_shares"
|
||||
|
||||
########################################################################
|
||||
# Images
|
||||
########################################################################
|
||||
|
||||
# 2.2 Schema
|
||||
# CREATE TABLE image_pool (oid INTEGER PRIMARY KEY, uid INTEGER, name VARCHAR(128), type INTEGER, public INTEGER, persistent INTEGER, regtime INTEGER, source TEXT, state INTEGER, running_vms INTEGER, template TEXT, UNIQUE(name) );
|
||||
|
||||
# Move table
|
||||
@db.run "ALTER TABLE image_pool RENAME TO old_image_pool;"
|
||||
|
||||
# Create new table
|
||||
@db.run "CREATE TABLE image_pool (oid INTEGER PRIMARY KEY, name VARCHAR(256), body TEXT, uid INTEGER, public INTEGER, UNIQUE(name,uid) );"
|
||||
|
||||
# Read each entry in the old table, and insert into new table
|
||||
@db.fetch("SELECT * FROM old_image_pool") do |row|
|
||||
oid = row[:oid]
|
||||
name = row[:name]
|
||||
uid = row[:uid]
|
||||
public = row[:public]
|
||||
|
||||
username = get_username(uid)
|
||||
|
||||
# In OpenNebula 2.0 Image States go from 0 to 3, in 3.0 go
|
||||
# from 0 to 5, but the meaning is the same for states 0 to 3
|
||||
body = "<IMAGE><ID>#{oid}</ID><UID>#{row[:uid]}</UID><USERNAME>#{username}</USERNAME><NAME>#{name}</NAME><TYPE>#{row[:type]}</TYPE><PUBLIC>#{public}</PUBLIC><PERSISTENT>#{row[:persistent]}</PERSISTENT><REGTIME>#{row[:regtime]}</REGTIME><SOURCE>#{row[:source]}</SOURCE><STATE>#{row[:state]}</STATE><RUNNING_VMS>#{row[:running_vms]}</RUNNING_VMS>#{row[:template]}</IMAGE>"
|
||||
|
||||
@db.run "INSERT INTO image_pool VALUES(#{oid},'#{name}','#{body}', #{uid}, #{public});"
|
||||
end
|
||||
|
||||
# Delete old table
|
||||
@db.run "DROP TABLE old_image_pool"
|
||||
|
||||
########################################################################
|
||||
# VMs
|
||||
########################################################################
|
||||
|
||||
# 2.2 Schema
|
||||
# CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY,uid INTEGER,name TEXT,last_poll INTEGER, state INTEGER,lcm_state INTEGER,stime INTEGER,etime INTEGER,deploy_id TEXT,memory INTEGER,cpu INTEGER,net_tx INTEGER,net_rx INTEGER, last_seq INTEGER, template TEXT);
|
||||
# CREATE TABLE history (vid INTEGER,seq INTEGER,host_name TEXT,vm_dir TEXT,hid INTEGER,vm_mad TEXT,tm_mad TEXT,stime INTEGER,etime INTEGER,pstime INTEGER,petime INTEGER,rstime INTEGER,retime INTEGER,estime INTEGER,eetime INTEGER,reason INTEGER,PRIMARY KEY(vid,seq));
|
||||
|
||||
# Move tables
|
||||
@db.run "ALTER TABLE vm_pool RENAME TO old_vm_pool;"
|
||||
@db.run "ALTER TABLE history RENAME TO old_history;"
|
||||
|
||||
# Create new tables
|
||||
@db.run "CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY, name TEXT, body TEXT, uid INTEGER, last_poll INTEGER, state INTEGER, lcm_state INTEGER);"
|
||||
@db.run "CREATE TABLE history (vid INTEGER, seq INTEGER, body TEXT, PRIMARY KEY(vid,seq));"
|
||||
|
||||
|
||||
# Read each entry in the old history table, and insert into new table
|
||||
@db.fetch("SELECT * FROM old_history") do |row|
|
||||
vid = row[:vid]
|
||||
seq = row[:seq]
|
||||
|
||||
body = "<HISTORY><SEQ>#{seq}</SEQ><HOSTNAME>#{row[:host_name]}</HOSTNAME><VM_DIR>#{row[:vm_dir]}</VM_DIR><HID>#{row[:hid]}</HID><STIME>#{row[:stime]}</STIME><ETIME>#{row[:etime]}</ETIME><VMMMAD>#{row[:vm_mad]}</VMMMAD><TMMAD>#{row[:tm_mad]}</TMMAD><PSTIME>#{row[:pstime]}</PSTIME><PETIME>#{row[:petime]}</PETIME><RSTIME>#{row[:rstime]}</RSTIME><RETIME>#{row[:retime]}</RETIME><ESTIME>#{row[:estime]}</ESTIME><EETIME>#{row[:eetime]}</EETIME><REASON>#{row[:reason]}</REASON></HISTORY>"
|
||||
|
||||
@db.run "INSERT INTO history VALUES(#{vid},'#{seq}','#{body}');"
|
||||
end
|
||||
|
||||
|
||||
# Read each entry in the old vm table, and insert into new table
|
||||
@db.fetch("SELECT * FROM old_vm_pool") do |row|
|
||||
oid = row[:oid]
|
||||
name = row[:name]
|
||||
uid = row[:uid]
|
||||
last_poll = row[:last_poll]
|
||||
state = row[:state]
|
||||
lcm_state = row[:lcm_state]
|
||||
|
||||
username = get_username(uid)
|
||||
|
||||
# If the VM has History items, the last one is included in the XML
|
||||
history = ""
|
||||
@db.fetch("SELECT body FROM history WHERE vid=#{oid} AND seq=(SELECT MAX(seq) FROM history WHERE vid=#{oid})") do |history_row|
|
||||
history = history_row[:body]
|
||||
end
|
||||
|
||||
body = "<VM><ID>#{oid}</ID><UID>#{uid}</UID><USERNAME>#{username}</USERNAME><NAME>#{name}</NAME><LAST_POLL>#{last_poll}</LAST_POLL><STATE>#{state}</STATE><LCM_STATE>#{lcm_state}</LCM_STATE><STIME>#{row[:stime]}</STIME><ETIME>#{row[:etime]}</ETIME><DEPLOY_ID>#{row[:deploy_id]}</DEPLOY_ID><MEMORY>#{row[:memory]}</MEMORY><CPU>#{row[:cpu]}</CPU><NET_TX>#{row[:net_tx]}</NET_TX><NET_RX>#{row[:net_rx]}</NET_RX>#{row[:template]}#{history}</VM>"
|
||||
|
||||
@db.run "INSERT INTO vm_pool VALUES(#{oid},'#{name}','#{body}', #{uid}, #{last_poll}, #{state}, #{lcm_state});"
|
||||
end
|
||||
|
||||
|
||||
# Delete old tables
|
||||
@db.run "DROP TABLE old_vm_pool"
|
||||
@db.run "DROP TABLE old_history"
|
||||
|
||||
|
||||
########################################################################
|
||||
# Virtual Networks
|
||||
########################################################################
|
||||
|
||||
# 2.2 Schema
|
||||
# CREATE TABLE network_pool (oid INTEGER PRIMARY KEY, uid INTEGER, name VARCHAR(256), type INTEGER, bridge TEXT, public INTEGER, template TEXT, UNIQUE(name));
|
||||
# CREATE TABLE leases (oid INTEGER, ip BIGINT, mac_prefix BIGINT, mac_suffix BIGINT,vid INTEGER, used INTEGER, PRIMARY KEY(oid,ip));
|
||||
|
||||
# Move tables
|
||||
@db.run "ALTER TABLE network_pool RENAME TO old_network_pool;"
|
||||
@db.run "ALTER TABLE leases RENAME TO old_leases;"
|
||||
|
||||
# Create new tables
|
||||
@db.run "CREATE TABLE network_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body TEXT, uid INTEGER, public INTEGER, UNIQUE(name,uid));"
|
||||
@db.run "CREATE TABLE leases (oid INTEGER, ip BIGINT, body TEXT, PRIMARY KEY(oid,ip));"
|
||||
|
||||
# Read each entry in the old table, and insert into new table
|
||||
@db.fetch("SELECT * FROM old_network_pool") do |row|
|
||||
oid = row[:oid]
|
||||
name = row[:name]
|
||||
uid = row[:uid]
|
||||
public = row[:public]
|
||||
|
||||
username = get_username(uid)
|
||||
|
||||
# <TOTAL_LEASES> is stored in the DB, but it is not used to rebuild
|
||||
# the VirtualNetwork object, and it is generated each time the
|
||||
# network is listed. So setting it to 0 is safe
|
||||
body = "<VNET><ID>#{oid}</ID><UID>#{uid}</UID><USERNAME>#{username}</USERNAME><NAME>#{name}</NAME><TYPE>#{row[:type]}</TYPE><BRIDGE>#{row[:bridge]}</BRIDGE><PUBLIC>#{public}</PUBLIC><TOTAL_LEASES>0</TOTAL_LEASES>#{row[:template]}</VNET>"
|
||||
|
||||
@db.run "INSERT INTO network_pool VALUES(#{oid},'#{name}','#{body}', #{uid}, #{public});"
|
||||
end
|
||||
|
||||
# Read each entry in the old table, and insert into new table
|
||||
@db.fetch("SELECT * FROM old_leases") do |row|
|
||||
oid = row[:oid]
|
||||
ip = row[:ip]
|
||||
|
||||
body = "<LEASE><IP>#{ip}</IP><MAC_PREFIX>#{row[:mac_prefix]}</MAC_PREFIX><MAC_SUFFIX>#{row[:mac_suffix]}</MAC_SUFFIX><USED>#{row[:used]}</USED><VID>#{row[:vid]}</VID></LEASE>"
|
||||
|
||||
@db.run "INSERT INTO leases VALUES(#{oid}, #{ip}, '#{body}');"
|
||||
end
|
||||
|
||||
# Delete old tables
|
||||
@db.run "DROP TABLE old_network_pool"
|
||||
@db.run "DROP TABLE old_leases"
|
||||
|
||||
|
||||
########################################################################
|
||||
# New tables in DB version 1
|
||||
########################################################################
|
||||
|
||||
@db.run "CREATE TABLE db_versioning (oid INTEGER PRIMARY KEY, version INTEGER, timestamp INTEGER, comment VARCHAR(256));"
|
||||
@db.run "CREATE TABLE template_pool (oid INTEGER PRIMARY KEY, name VARCHAR(256), body TEXT, uid INTEGER, public INTEGER);"
|
||||
|
||||
# New pool_control table contains the last_oid used, must be rebuilt
|
||||
@db.run "CREATE TABLE pool_control (tablename VARCHAR(32) PRIMARY KEY, last_oid BIGINT UNSIGNED)"
|
||||
|
||||
for table in ["user_pool", "cluster_pool", "host_pool", "image_pool", "vm_pool", "network_pool"] do
|
||||
@db.fetch("SELECT MAX(oid) FROM #{table}") do |row|
|
||||
if( row[:"MAX(oid)"] != nil )
|
||||
@db.run "INSERT INTO pool_control (tablename, last_oid) VALUES ('#{table}', #{row[:"MAX(oid)"]});"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return true
|
||||
end
|
||||
|
||||
def get_username(uid)
|
||||
username = ""
|
||||
|
||||
@db.fetch("SELECT name FROM user_pool WHERE oid=#{uid}") do |user|
|
||||
username = user[:name]
|
||||
end
|
||||
|
||||
return username
|
||||
end
|
||||
end
|
@ -15,6 +15,25 @@
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
#include "PoolObjectSQL.h"
|
||||
#include "SSLTools.h"
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
string& PoolObjectSQL::to_xml64(string &xml64)
|
||||
{
|
||||
string *str64;
|
||||
|
||||
to_xml(xml64);
|
||||
|
||||
str64 = SSLTools::base64_encode(xml64);
|
||||
|
||||
xml64 = *str64;
|
||||
|
||||
delete str64;
|
||||
|
||||
return xml64;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
@ -50,7 +50,8 @@ int PoolSQL::init_cb(void *nil, int num, char **values, char **names)
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
PoolSQL::PoolSQL(SqlDB * _db, const char * table): db(_db), lastOID(-1)
|
||||
PoolSQL::PoolSQL(SqlDB * _db, const char * _table):
|
||||
db(_db), lastOID(-1), table(_table)
|
||||
{
|
||||
ostringstream oss;
|
||||
|
||||
@ -58,7 +59,7 @@ PoolSQL::PoolSQL(SqlDB * _db, const char * table): db(_db), lastOID(-1)
|
||||
|
||||
set_callback(static_cast<Callbackable::Callback>(&PoolSQL::init_cb));
|
||||
|
||||
oss << "SELECT MAX(oid) FROM " << table;
|
||||
oss << "SELECT last_oid FROM pool_control WHERE tablename='" << table <<"'";
|
||||
|
||||
db->exec(oss,this);
|
||||
|
||||
@ -129,11 +130,29 @@ int PoolSQL::allocate(
|
||||
|
||||
delete objsql;
|
||||
|
||||
if( rc != -1 )
|
||||
{
|
||||
update_lastOID();
|
||||
}
|
||||
|
||||
unlock();
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void PoolSQL::update_lastOID()
|
||||
{
|
||||
// db->escape_str is not used for 'table' since its name can't be set in
|
||||
// any way by the user, it is hardcoded.
|
||||
|
||||
ostringstream oss;
|
||||
oss << "REPLACE INTO pool_control (tablename, last_oid) VALUES ("
|
||||
<< "'" << table << "',"
|
||||
<< lastOID << ")";
|
||||
|
||||
db->exec(oss);
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
|
@ -102,7 +102,7 @@ void RequestManager::VirtualMachineSaveDisk::execute(
|
||||
|
||||
oss << "NAME= " << img_name << endl;
|
||||
oss << "PUBLIC = NO " << endl;
|
||||
oss << "SOURCE = " << Image::generate_source(uid,img_name);
|
||||
oss << "SOURCE = - " << endl;
|
||||
|
||||
img_template = new ImageTemplate;
|
||||
|
||||
|
@ -29,7 +29,6 @@ void RequestManager::TemplateInfo::execute(
|
||||
string session;
|
||||
|
||||
int oid;
|
||||
int uid; // owner user id
|
||||
int rc; // Requesting user id
|
||||
VMTemplate * vm_template;
|
||||
|
||||
@ -55,8 +54,6 @@ void RequestManager::TemplateInfo::execute(
|
||||
goto error_get;
|
||||
}
|
||||
|
||||
uid = vm_template->get_uid();
|
||||
|
||||
// Check if it is a valid user
|
||||
rc = TemplateInfo::upool->authenticate(session);
|
||||
|
||||
|
@ -82,6 +82,11 @@ public:
|
||||
|
||||
private:
|
||||
|
||||
/**
|
||||
* Default message size for XML data off the network
|
||||
*/
|
||||
static const int MESSAGE_SIZE;
|
||||
|
||||
string one_auth;
|
||||
string one_endpoint;
|
||||
|
||||
@ -92,13 +97,6 @@ private:
|
||||
int read_oneauth(string &secret);
|
||||
|
||||
int split_secret(const string secret, string& user, string& pass);
|
||||
|
||||
string sha1_digest(const string& pass);
|
||||
|
||||
/**
|
||||
* Default message size for XML data off the network
|
||||
*/
|
||||
static const int MESSAGE_SIZE;
|
||||
};
|
||||
|
||||
#endif /*ONECLIENT_H_*/
|
||||
|
@ -15,6 +15,7 @@
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
#include "Client.h"
|
||||
#include "SSLTools.h"
|
||||
|
||||
#include <fstream>
|
||||
#include <pwd.h>
|
||||
@ -25,12 +26,8 @@
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
|
||||
#include <openssl/evp.h>
|
||||
#include <iomanip>
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
@ -57,7 +54,7 @@ void Client::set_one_auth(string secret)
|
||||
|
||||
if( rc == 0 )
|
||||
{
|
||||
string sha1_pass = sha1_digest(pass);
|
||||
string sha1_pass = SSLTools::sha1_digest(pass);
|
||||
|
||||
one_auth = user + ":" + sha1_pass;
|
||||
}
|
||||
@ -157,33 +154,6 @@ int Client::split_secret(const string secret, string& user, string& pass)
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
string Client::sha1_digest(const string& pass)
|
||||
{
|
||||
EVP_MD_CTX mdctx;
|
||||
unsigned char md_value[EVP_MAX_MD_SIZE];
|
||||
unsigned int md_len;
|
||||
ostringstream oss;
|
||||
|
||||
EVP_MD_CTX_init(&mdctx);
|
||||
EVP_DigestInit_ex(&mdctx, EVP_sha1(), NULL);
|
||||
|
||||
EVP_DigestUpdate(&mdctx, pass.c_str(), pass.length());
|
||||
|
||||
EVP_DigestFinal_ex(&mdctx,md_value,&md_len);
|
||||
EVP_MD_CTX_cleanup(&mdctx);
|
||||
|
||||
for(unsigned int i = 0; i<md_len; i++)
|
||||
{
|
||||
oss << setfill('0') << setw(2) << hex << nouppercase
|
||||
<< (unsigned short) md_value[i];
|
||||
}
|
||||
|
||||
return oss.str();
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
void Client::set_one_endpoint(string endpoint)
|
||||
{
|
||||
one_endpoint = "http://localhost:2633/RPC2";
|
||||
|
@ -15,6 +15,7 @@
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
#include "MySqlDB.h"
|
||||
#include <mysql/errmsg.h>
|
||||
|
||||
/*********
|
||||
* Doc: http://dev.mysql.com/doc/refman/5.5/en/c-api-function-overview.html
|
||||
@ -23,12 +24,17 @@
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
MySqlDB::MySqlDB(
|
||||
const string& server,
|
||||
int port,
|
||||
const string& user,
|
||||
const string& password,
|
||||
const char * database)
|
||||
const string& _server,
|
||||
int _port,
|
||||
const string& _user,
|
||||
const string& _password,
|
||||
const string& _database)
|
||||
{
|
||||
server = _server;
|
||||
port = _port;
|
||||
user = _user;
|
||||
password = _password;
|
||||
database = _database;
|
||||
|
||||
// Initialize the MySQL library
|
||||
mysql_library_init(0, NULL, NULL);
|
||||
@ -38,7 +44,7 @@ MySqlDB::MySqlDB(
|
||||
|
||||
// Connect to the server
|
||||
if (!mysql_real_connect(db, server.c_str(), user.c_str(),
|
||||
password.c_str(), database, port, NULL, 0))
|
||||
password.c_str(), 0, port, NULL, 0))
|
||||
{
|
||||
throw runtime_error("Could not open database.");
|
||||
}
|
||||
@ -81,8 +87,27 @@ int MySqlDB::exec(ostringstream& cmd, Callbackable* obj)
|
||||
const char * err_msg = mysql_error(db);
|
||||
int err_num = mysql_errno(db);
|
||||
|
||||
oss << "SQL command was: " << c_str;
|
||||
oss << ", error " << err_num << " : " << err_msg;
|
||||
if( err_num == CR_SERVER_GONE_ERROR || err_num == CR_SERVER_LOST )
|
||||
{
|
||||
oss << "MySQL connection error " << err_num << " : " << err_msg;
|
||||
|
||||
// Try to re-connect
|
||||
if (mysql_real_connect(db, server.c_str(), user.c_str(),
|
||||
password.c_str(), database.c_str(),
|
||||
port, NULL, 0))
|
||||
{
|
||||
oss << "... Reconnected.";
|
||||
}
|
||||
else
|
||||
{
|
||||
oss << "... Reconnection attempt failed.";
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
oss << "SQL command was: " << c_str;
|
||||
oss << ", error " << err_num << " : " << err_msg;
|
||||
}
|
||||
|
||||
NebulaLog::log("ONE",Log::ERROR,oss);
|
||||
|
||||
|
@ -22,24 +22,15 @@ if [ -z "$ONE_LOCATION" ]; then
|
||||
SUNSTONE_SERVER=/usr/lib/one/sunstone/config.ru
|
||||
SUNSTONE_LOCK_FILE=/var/lock/one/.sunstone.lock
|
||||
SUNSTONE_LOG=/var/log/one/sunstone.log
|
||||
SUNSTONE_CONF=/etc/one/sunstone-server.conf
|
||||
else
|
||||
SUNSTONE_PID=$ONE_LOCATION/var/sunstone.pid
|
||||
SUNSTONE_SERVER=$ONE_LOCATION/lib/sunstone/config.ru
|
||||
SUNSTONE_LOCK_FILE=$ONE_LOCATION/var/.sunstone.lock
|
||||
SUNSTONE_LOG=$ONE_LOCATION/var/sunstone.log
|
||||
SUNSTONE_CONF=$ONE_LOCATION/etc/sunstone-server.conf
|
||||
fi
|
||||
|
||||
PORT="4567"
|
||||
HOST="127.0.0.1"
|
||||
|
||||
usage() {
|
||||
echo
|
||||
echo "Usage: sunstone-server [-H host] [-p port]"
|
||||
echo
|
||||
echo "-H: Host for the Sunstone server, default value: localhost"
|
||||
echo "-p: Port for incoming connections, default value: 4567"
|
||||
}
|
||||
|
||||
setup()
|
||||
{
|
||||
|
||||
@ -48,7 +39,8 @@ setup()
|
||||
SUNSTONEPID=`cat $SUNSTONE_PID`
|
||||
ps $SUNSTONEPID &> /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "Sunstone Server is still running (PID:$SUNSTONEPID). Please try 'sunstone-server stop' first."
|
||||
echo -n "Sunstone Server is still running (PID:$SUNSTONEPID). Please "
|
||||
echo "try 'sunstone-server stop' first."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
@ -65,14 +57,21 @@ start()
|
||||
exit 1
|
||||
fi
|
||||
|
||||
source $SUNSTONE_CONF
|
||||
|
||||
lsof -i:$PORT &> /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "The port $PORT is being used. Please specify a different one."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start the sunstone daemon
|
||||
touch $SUNSTONE_LOCK_FILE
|
||||
rackup $SUNSTONE_SERVER -s thin -p $PORT -o $HOST -P $SUNSTONE_PID &> $SUNSTONE_LOG &
|
||||
rackup $SUNSTONE_SERVER -s thin -p $PORT -o $HOST \
|
||||
-P $SUNSTONE_PID &> $SUNSTONE_LOG &
|
||||
|
||||
LASTRC=$?
|
||||
|
||||
if [ $LASTRC -ne 0 ]; then
|
||||
echo "Error executing $SUNSTONE_SERVER"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error executing $SUNSTONE_SERVER, please check the log $SUNSTONE_LOG"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@ -80,11 +79,11 @@ start()
|
||||
ps $LASTPID &> /dev/null
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error executing $SUNSTONE_SERVER."
|
||||
echo "Error executing $SUNSTONE_SERVER, please check the log $SUNSTONE_LOG"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "sunstone-server started"
|
||||
echo "sunstone-server listening on $HOST:$PORT"
|
||||
}
|
||||
|
||||
#
|
||||
@ -106,19 +105,8 @@ stop()
|
||||
echo "sunstone-server stopped"
|
||||
}
|
||||
|
||||
while getopts "p:H:" OPTION
|
||||
do
|
||||
case $OPTION in
|
||||
p) PORT=$OPTARG;;
|
||||
H) HOST=$OPTARG;;
|
||||
*) usage; exit 3;;
|
||||
esac
|
||||
done
|
||||
|
||||
shift $((OPTIND-1))
|
||||
|
||||
case "$1" in
|
||||
start) setup; start;;
|
||||
stop) stop;;
|
||||
*) usage; exit 3;;
|
||||
*) echo "Usage: sunstone-server {start|stop}" >&2; exit 3;;
|
||||
esac
|
||||
|
7
src/sunstone/etc/sunstone-server.conf
Normal file
7
src/sunstone/etc/sunstone-server.conf
Normal file
@ -0,0 +1,7 @@
|
||||
# Server Configuration
|
||||
HOST=127.0.0.1
|
||||
PORT=9869
|
||||
|
||||
# VNC Configuration
|
||||
VNC_PROXY_BASE_PORT=29876
|
||||
NOVNC_PATH=
|
@ -32,7 +32,7 @@ module OpenNebulaJSON
|
||||
template = template_to_str(image_hash)
|
||||
end
|
||||
|
||||
OpenNebula::ImageRepository.new.create(self, template)
|
||||
self.allocate(template)
|
||||
end
|
||||
|
||||
def perform_action(template_json)
|
||||
|
@ -17,6 +17,7 @@
|
||||
require 'OpenNebulaJSON/JSONUtils'
|
||||
|
||||
module OpenNebulaJSON
|
||||
|
||||
class VirtualMachineJSON < OpenNebula::VirtualMachine
|
||||
include JSONUtils
|
||||
|
||||
@ -86,43 +87,7 @@ module OpenNebulaJSON
|
||||
end
|
||||
|
||||
def save_as(params=Hash.new)
|
||||
if params['image_type']
|
||||
image_type = params['image_type']
|
||||
else
|
||||
image_id = self["TEMPLATE/DISK[DISK_ID=\"#{params[:disk_id]}\"]/IMAGE_ID"]
|
||||
|
||||
if (image_id != nil)
|
||||
if self["TEMPLATE/DISK[DISK_ID=\"#{disk_id}\"]/SAVE_AS"]
|
||||
error_msg = "Error: The disk #{disk_id} is already" <<
|
||||
" supposed to be saved"
|
||||
return OpenNebula::Error.new(error_msg)
|
||||
end
|
||||
|
||||
# Get the image type
|
||||
image = OpenNebula::Image.new(
|
||||
OpenNebula::Image.build_xml(image_id), @client)
|
||||
|
||||
result = image.info
|
||||
if OpenNebula.is_error?(result)
|
||||
return result
|
||||
end
|
||||
|
||||
image_type = image.type_str
|
||||
end
|
||||
end
|
||||
|
||||
# Build the template and allocate the new Image
|
||||
template = "NAME=\"#{params['image_name']}\"\n"
|
||||
template << "TYPE=#{image_type}\n" if image_type
|
||||
|
||||
image = OpenNebula::Image.new(OpenNebula::Image.build_xml, @client)
|
||||
|
||||
result = image.allocate(template)
|
||||
if OpenNebula.is_error?(result)
|
||||
return result
|
||||
end
|
||||
|
||||
super(params['disk_id'].to_i, image.id)
|
||||
super(params['disk_id'].to_i, params['image_name'])
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -14,22 +14,7 @@
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
ONE_LOCATION = ENV["ONE_LOCATION"]
|
||||
|
||||
if !ONE_LOCATION
|
||||
LOG_LOCATION = "/var/log/one"
|
||||
VAR_LOCATION = "/var/lib/one"
|
||||
RUBY_LIB_LOCATION = "/usr/lib/one/ruby"
|
||||
else
|
||||
VAR_LOCATION = ONE_LOCATION+"/var"
|
||||
LOG_LOCATION = ONE_LOCATION+"/var"
|
||||
RUBY_LIB_LOCATION = ONE_LOCATION+"/lib/ruby"
|
||||
end
|
||||
|
||||
$: << RUBY_LIB_LOCATION
|
||||
$: << File.dirname(__FILE__)
|
||||
|
||||
require 'models/OpenNebulaJSON'
|
||||
require 'OpenNebulaJSON'
|
||||
include OpenNebulaJSON
|
||||
|
||||
class SunstoneServer
|
||||
@ -215,6 +200,69 @@ class SunstoneServer
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
########################################################################
|
||||
# VNC
|
||||
########################################################################
|
||||
def startvnc(id, config)
|
||||
resource = retrieve_resource("vm", id)
|
||||
if OpenNebula.is_error?(resource)
|
||||
return [404, resource.to_json]
|
||||
end
|
||||
|
||||
if resource['LCM_STATE'] != "3"
|
||||
error = OpenNebula::Error.new("VM is not running")
|
||||
return [403, error.to_json]
|
||||
end
|
||||
|
||||
if resource['TEMPLATE/GRAPHICS/TYPE'] != "vnc"
|
||||
error = OpenNebula::Error.new("VM has no VNC configured")
|
||||
return [403, error.to_json]
|
||||
end
|
||||
|
||||
# The VM host and its VNC port
|
||||
host = resource['HISTORY/HOSTNAME']
|
||||
vnc_port = resource['TEMPLATE/GRAPHICS/PORT']
|
||||
|
||||
# The noVNC proxy_port
|
||||
proxy_port = config[:vnc_proxy_base_port].to_i + vnc_port.to_i
|
||||
|
||||
begin
|
||||
novnc_cmd = "#{config[:novnc_path]}/utils/launch.sh"
|
||||
pipe = IO.popen("#{novnc_cmd} --listen #{proxy_port} \
|
||||
--vnc #{host}:#{vnc_port}")
|
||||
rescue Exception => e
|
||||
error = Error.new(e.message)
|
||||
return [500, error.to_json]
|
||||
end
|
||||
|
||||
vnc_pw = resource['TEMPLATE/GRAPHICS/PASSWD']
|
||||
|
||||
info = {:pipe => pipe, :port => proxy_port, :password => vnc_pw}
|
||||
return [200, info]
|
||||
end
|
||||
|
||||
############################################################################
|
||||
#
|
||||
############################################################################
|
||||
def stopvnc(id,pipe)
|
||||
resource = retrieve_resource("vm", id)
|
||||
if OpenNebula.is_error?(resource)
|
||||
return [404, resource.to_json]
|
||||
end
|
||||
|
||||
begin
|
||||
Process.kill('KILL',pipe.pid)
|
||||
pipe.close
|
||||
rescue Exception => e
|
||||
error = Error.new(e.message)
|
||||
return [500, error.to_json]
|
||||
end
|
||||
|
||||
return [200, nil]
|
||||
end
|
||||
|
||||
|
||||
private
|
||||
|
||||
def retrieve_resource(kind, id)
|
||||
|
@ -372,10 +372,13 @@ tr.even:hover{
|
||||
|
||||
|
||||
.info_table td{
|
||||
border-bottom: 1px solid #CCCCCC;
|
||||
color: #353735;
|
||||
padding: 6px 8px;
|
||||
}
|
||||
border-bottom: 1px solid #CCCCCC;
|
||||
color: #353735;
|
||||
padding-top: 6px;
|
||||
padding-bottom: 6px;
|
||||
padding-left: 8px;
|
||||
padding-right: 8px;
|
||||
}
|
||||
|
||||
|
||||
.info_table td.key_td{
|
||||
@ -384,10 +387,6 @@ tr.even:hover{
|
||||
font-weight:bold;
|
||||
}
|
||||
|
||||
.info_table td.key_td:after{
|
||||
content:":";
|
||||
}
|
||||
|
||||
.info_table td.value_td{
|
||||
text-align:left;
|
||||
}
|
||||
|
BIN
src/sunstone/public/images/vnc_off.png
Normal file
BIN
src/sunstone/public/images/vnc_off.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 849 B |
BIN
src/sunstone/public/images/vnc_on.png
Normal file
BIN
src/sunstone/public/images/vnc_on.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.1 KiB |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user