1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-03-20 10:50:08 +03:00

Merge branch 'feature-591'

Conflicts:
	install.sh
This commit is contained in:
Ruben S. Montero 2011-05-12 14:50:22 +02:00
commit bde842ed88
7 changed files with 1082 additions and 12 deletions

View File

@ -37,7 +37,9 @@
#include "AuthManager.h"
#include "ImageManager.h"
class Nebula
#include "Callbackable.h"
class Nebula : public Callbackable
{
public:
@ -222,6 +224,11 @@ public:
return "OpenNebula 2.3.0";
};
static int db_version()
{
return 1;
};
void start();
void get_configuration_attribute(
@ -424,6 +431,28 @@ private:
// ---------------------------------------------------------------
friend void nebula_signal_handler (int sig);
/**
* Bootstraps the database control tables
*/
void bootstrap();
/**
* Callback function to TODO
* @param _loaded_db_version TODO
* @param num the number of columns read from the DB
* @param names the column names
* @param vaues the column values
* @return 0 on success
*/
int select_cb(void *_loaded_db_version, int num, char **values,
char **names);
/*
* TODO
* @return 0 ok, -1 version mismatch, -2 needs bootstrap
*/
int check_db_version();
};
#endif /*NEBULA_H_*/

View File

@ -47,7 +47,7 @@ public:
* counter). If null the OID counter is not updated.
* @param with_uid the Pool objects have an owner id (uid)
*/
PoolSQL(SqlDB * _db, const char * table);
PoolSQL(SqlDB * _db, const char * _table);
virtual ~PoolSQL();
@ -189,6 +189,11 @@ private:
*/
int lastOID;
/**
* Tablename for this pool
*/
string table;
/**
* The pool is implemented with a Map of SQL object pointers, using the
* OID as key.
@ -253,6 +258,11 @@ private:
return key.str();
};
/**
* Inserts the last oid into the pool_control table
*/
void update_lastOID();
/* ---------------------------------------------------------------------- */
/* ---------------------------------------------------------------------- */

View File

@ -181,6 +181,7 @@ LIB_DIRS="$LIB_LOCATION/ruby \
$LIB_LOCATION/ruby/cloud/econe \
$LIB_LOCATION/ruby/cloud/econe/views \
$LIB_LOCATION/ruby/cloud/occi \
$LIB_LOCATION/onedb \
$LIB_LOCATION/tm_commands \
$LIB_LOCATION/tm_commands/nfs \
$LIB_LOCATION/tm_commands/ssh \
@ -264,6 +265,7 @@ INSTALL_FILES=(
MAD_SH_LIB_FILES:$LIB_LOCATION/sh
MAD_SH_LIB_FILES:$LIB_LOCATION/remotes
MAD_SH_LIB_FILES:$VAR_LOCATION/remotes
ONEDB_MIGRATOR_FILES:$LIB_LOCATION/onedb
MADS_LIB_FILES:$LIB_LOCATION/mads
IM_PROBES_FILES:$VAR_LOCATION/remotes/im
IM_PROBES_KVM_FILES:$VAR_LOCATION/remotes/im/kvm.d
@ -368,6 +370,7 @@ BIN_FILES="src/nebula/oned \
src/cli/oneimage \
src/cli/onecluster \
src/cli/onetemplate \
src/cli/onedb \
share/scripts/one \
src/authm_mad/oneauth"
@ -550,6 +553,11 @@ IMAGE_DRIVER_FS_SCRIPTS="src/image_mad/remotes/fs/cp \
src/image_mad/remotes/fs/fsrc \
src/image_mad/remotes/fs/rm"
#-------------------------------------------------------------------------------
# Migration scripts for onedb command, to be installed under $LIB_LOCATION
#-------------------------------------------------------------------------------
ONEDB_MIGRATOR_FILES="src/onedb/1.rb"
#-------------------------------------------------------------------------------
# Configuration files for OpenNebula, to be installed under $ETC_LOCATION
#-------------------------------------------------------------------------------
@ -849,6 +857,7 @@ MAN_FILES="share/man/oneauth.8.gz \
share/man/onevm.8.gz \
share/man/onevnet.8.gz \
share/man/onetemplate.8.gz \
share/man/onedb.8.gz \
share/man/econe-describe-images.8.gz \
share/man/econe-describe-instances.8.gz \
share/man/econe-register.8.gz \

614
src/cli/onedb Executable file
View File

@ -0,0 +1,614 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- */
# Copyright 2002-2011, OpenNebula Project Leads (OpenNebula.org) #
# Licensed under the Apache License, Version 2.0 (the "License"); you may */
# not use this file except in compliance with the License. You may obtain */
# a copy of the License at */
# */
# http://www.apache.org/licenses/LICENSE-2.0 */
# */
# Unless required by applicable law or agreed to in writing, software */
# distributed under the License is distributed on an "AS IS" BASIS, */
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
# See the License for the specific language governing permissions and */
# limitations under the License. */
# -------------------------------------------------------------------------- */
# ----------------------------------------------------------------------------
# Set up the environment
# ----------------------------------------------------------------------------
ONE_LOCATION = ENV["ONE_LOCATION"]
if !ONE_LOCATION
LIB_LOCATION = "/usr/lib/one"
RUBY_LIB_LOCATION = LIB_LOCATION + "/ruby"
VAR_LOCATION = "/var/lib/one"
ETC_LOCATION = "/etc/one"
LOCK_FILE = "/var/lock/one/one"
else
LIB_LOCATION = ONE_LOCATION + "/lib"
RUBY_LIB_LOCATION = LIB_LOCATION + "/ruby"
VAR_LOCATION = ONE_LOCATION + "/var"
ETC_LOCATION = ONE_LOCATION + "/etc"
LOCK_FILE = VAR_LOCATION + "/.lock"
end
$: << RUBY_LIB_LOCATION
require 'OpenNebula'
require 'command_parse'
# TODO: Move the Configuration file to OpenNebula ruby lib?
require "#{RUBY_LIB_LOCATION}/cloud/Configuration"
require 'rubygems'
require 'sequel'
class MigratorBase
attr_reader :db_version
attr_reader :one_version
@verbose
def initialize(db, verbose)
@db = db
@verbose = verbose
end
def up
puts "Method up not implemented for version #{@version}"
return false
end
end
class OneDBParse < CommandParse
COMMANDS_HELP=<<-EOT
DB Connection options:
By default, onedb reads the connection data from oned.conf
If any of these options is set, oned.conf is ignored (i.e. if you set MySQL's
port onedb won't look for the rest of the options in oned.conf)
Description:
This command enables the user to manage the OpenNebula database. It provides
information about the DB version, means to upgrade it to the latest version, and
backup tools.
Commands:
* upgrade (Upgrades the DB to the latest version)
onedb upgrade [<version>]
where <version> : DB version (e.g. 1, 3) to upgrade. By default the DB is
upgraded to the latest version
* version (Prints the current DB version. Use -v flag to see also OpenNebula version)
onedb version
* history (Prints the upgrades history)
onedb history
* backup (Dumps the DB to a file)
onedb backup [<output_file>]
where <output_file> : Same as --backup
* restore (Restores the DB from a backup file. Only restores backups generated
from the same backend (SQLite or MySQL))
onedb restore [<backup_file>]
where <backup_file> : Same as --backup
EOT
def text_commands
COMMANDS_HELP
end
def text_command_name
"onedb"
end
def special_options(opts, options)
opts.on_tail("-f", "--force", "Forces the backup even if the DB exists") do |o|
options[:force] = true
end
opts.on_tail("--backup file", "Use this file to store/read SQL dump", String) do |o|
options[:backup] = o
end
opts.on_tail("-s file", "--sqlite file", "SQLite DB file", String) do |o|
options[:backend] = :sqlite
options[:sqlite] = o
end
opts.on_tail("--server host", "MySQL server hostname or IP. Defaults "<<
"to localhost", String) do |o|
options[:backend] = :mysql
options[:server] = o
end
opts.on_tail("--port port", "MySQL server port. Defaults to 3306", Integer) do |o|
options[:backend] = :mysql
options[:port] = o
end
opts.on_tail("--user username", "MySQL username", String) do |o|
options[:backend] = :mysql
options[:user] = o
end
opts.on_tail("--passwd password", "MySQL password. Leave unset to be "<<
"prompted for it", String) do |o|
options[:backend] = :mysql
options[:passwd] = o
end
opts.on_tail("--dbname name", "MySQL DB name for OpenNebula", String) do |o|
options[:backend] = :mysql
options[:dbname] = o
end
end
end
################################################################################
# Helpers
################################################################################
def connection_params()
if( @ops[:backend] == nil )
read_onedconf()
else
@backend = @ops[:backend]
if( @backend == :sqlite )
@sqlite_file = @ops[:sqlite]
else
@server = @ops[:server]
@port = @ops[:port]
@user = @ops[:user]
@passwd = @ops[:passwd]
@db_name = @ops[:dbname]
# Check for errors:
error = false
missing = ""
(error = true; missing = "--user" ) if @user == nil
(error = true; missing = "--dbname") if @db_name == nil
if error
puts "MySQL option #{missing} is needed"
exit -1
end
# Check for defaults:
@server = "localhost" if @server == nil
@port = 0 if @port == nil
if @passwd == nil
# Hide input characters
`stty -echo`
print "MySQL Password: "
@passwd = STDIN.gets.strip
`stty echo`
puts ""
end
end
end
end
def read_onedconf()
config = Configuration.new("#{ETC_LOCATION}/oned.conf")
if config[:db] == nil
puts "No DB defined."
exit -1
end
if config[:db]["BACKEND"].upcase.include? "SQLITE"
@backend = :sqlite
@sqlite_file = "#{VAR_LOCATION}/one.db"
elsif config[:db]["BACKEND"].upcase.include? "MYSQL"
@backend = :mysql
@server = config[:db]["SERVER"]
@port = config[:db]["PORT"]
@user = config[:db]["USER"]
@passwd = config[:db]["PASSWD"]
@db_name = config[:db]["DB_NAME"]
# In OpenNebula 2.0 PORT wasn't present in oned.conf, set default
@port = "0" if @port == nil
# Check for errors:
error = false
missing = ""
(error = true; missing = "SERVER" ) if @server == nil
(error = true; missing = "USER" ) if @user == nil
(error = true; missing = "PASSWD" ) if @passwd == nil
(error = true; missing = "DB_NAME") if @db_name == nil
if error
puts "MySQL attribute #{missing} not found in " +
"#{ETC_LOCATION}/oned.conf"
exit -1
end
# Clean leading and trailing quotes, if any
@server = @server [1..-2] if @server [0] == ?"
@port = @port [1..-2] if @port [0] == ?"
@user = @user [1..-2] if @user [0] == ?"
@passwd = @passwd [1..-2] if @passwd [0] == ?"
@db_name = @db_name[1..-2] if @db_name[0] == ?"
else
puts "Could not load DB configuration from #{ETC_LOCATION}/oned.conf"
exit -1
end
end
def get_bck_file()
bck_file = ""
if( @ops[:backup] != nil )
bck_file = @ops[:backup]
elsif @backend == :sqlite
bck_file = "#{VAR_LOCATION}/one.db.bck"
elsif @backend == :mysql
bck_file = "#{VAR_LOCATION}/mysql_#{@server}_#{@db_name}.sql"
end
return bck_file
end
def backup_db()
bck_file = get_bck_file()
if( !@ops[:force] && File.exists?(bck_file) )
puts "File #{bck_file} exists, backup aborted. Use -f to overwrite."
exit -1
end
case @backend
when :sqlite
if( ! File.exists?(@sqlite_file) )
puts "File #{@sqlite_file} doesn't exist, backup aborted."
exit -1
end
FileUtils.cp(@sqlite_file, "#{bck_file}")
puts "Sqlite database backup stored in #{bck_file}"
puts "Use 'onedb restore' or copy the file back to restore the DB."
when :mysql
cmd = "mysqldump -u #{@user} -p#{@passwd} -h #{@server} " +
"-P #{@port} #{@db_name} > #{bck_file}"
rc = system(cmd)
if( !rc )
puts "Unknown error running '#{cmd}'"
exit -1
end
puts "MySQL dump stored in #{bck_file}"
puts "Use 'onedb restore' or restore the DB using the mysql command:"
puts "mysql -u user -h server -P port db_name < backup_file"
else
puts "Unknown DB #{@backend}"
exit -1
end
puts ""
end
def connect_db()
case @backend
when :sqlite
if( ! File.exists?(@sqlite_file) )
puts "File #{@sqlite_file} doesn't exist."
exit -1
end
@db = Sequel.sqlite(@sqlite_file)
when :mysql
@db = Sequel.connect(
"mysql://#{@user}:#{@passwd}@#{@server}:#{@port}/#{@db_name}")
else
puts "Unknown DB #{@backend}"
exit -1
end
end
def read_db_version()
version = 0
timestamp = 0
comment = ""
@db.fetch("SELECT version, timestamp, comment FROM db_versioning " +
"WHERE oid=(SELECT MAX(oid) FROM db_versioning)") do |row|
version = row[:version]
timestamp = row[:timestamp]
comment = row[:comment]
end
return [version.to_i, timestamp, comment]
rescue
# If the DB doesn't have db_versioning table, it means it is empty or a 2.x
# OpenNebula DB
begin
# User with ID 0 (oneadmin) always exists
@db.fetch("SELECT * FROM user_pool WHERE oid=0") do |row|
end
rescue
puts "Database schema does not look to be created by OpenNebula:"
puts "table user_pool is missing or empty."
exit -1
end
begin
# Table image_pool is present only in 2.X DBs
@db.fetch("SELECT * FROM image_pool") do |row|
end
rescue
puts "Database schema looks to be created by OpenNebula 1.X."
puts "This tool only works with databases created by 2.X versions."
exit -1
end
comment = "Could not read any previous db_versioning data, assuming it is "+
"an OpenNebula 2.0 or 2.2 DB."
return [0, 0, comment]
end
def one_not_running()
if File.exists?(LOCK_FILE)
puts "First stop OpenNebula. Lock file found: #{LOCK_FILE}"
exit -1
end
end
################################################################################
################################################################################
onedb_opts = OneDBParse.new([])
onedb_opts.parse(ARGV)
@ops = onedb_opts.options
@verbose = @ops[:verbose]
command = ARGV.shift
case command
when "upgrade"
# Check opennebula is not running
one_not_running()
# Get DB connection parameters, from oned.conf or command arguments
connection_params()
# Connect to DB
connect_db()
# Read DB's version
version, timestamp, comment = read_db_version()
if( @verbose )
puts "Version read:"
puts "#{version} : #{comment}"
puts ""
end
# Upgrade, using the scripts in $LIB_LOCATION/onedb/xx.rb
max_version = nil
if( ARGV[0] )
max_version = ARGV[0].to_i
end
migrator_version = version + 1
migrator = nil
file = "#{LIB_LOCATION}/onedb/#{migrator_version}.rb"
if( File.exists?(file) &&
(max_version == nil || migrator_version <= max_version) )
# At least one upgrade will be executed, make DB backup
backup_db()
end
while( File.exists?(file) &&
(max_version == nil || migrator_version <= max_version) )
puts " > Running migrator #{file}" if @verbose
load(file)
migrator = Migrator.new(@db, @verbose)
result = migrator.up
if( !result )
puts "Error while upgrading from #{migrator_version-1} to #{migrator.db_version}"
return -1
end
puts " > Done" if @verbose
puts "" if @verbose
migrator_version += 1
file = "#{LIB_LOCATION}/onedb/#{migrator_version}.rb"
end
# Modify db_versioning table
if( migrator != nil )
comment = "Database migrated from #{version} to #{migrator.db_version}"+
" (#{migrator.one_version}) by onedb command."
max_oid = nil
@db.fetch("SELECT MAX(oid) FROM db_versioning") do |row|
max_oid = row[:"MAX(oid)"].to_i
end
max_oid = 0 if max_oid == nil
@db.run "INSERT INTO db_versioning (oid, version, timestamp, comment) "+
"VALUES (" +
"#{max_oid+1}, " +
"'#{migrator.db_version}', " +
"#{Time.new.to_i}, " +
"'#{comment}')"
puts comment
else
puts "Database already uses version #{version}"
end
when "version"
connection_params()
connect_db()
version, timestamp, comment = read_db_version()
if(@verbose)
puts "Version: #{version}"
time = version == 0 ? Time.now : Time.at(timestamp)
# TODO: UTC or Local time?
puts "Timestamp: #{time.getgm.strftime("%b %d, %Y %H:%M")}"
puts "Comment: #{comment}"
else
puts version
end
when "history"
connection_params()
connect_db()
begin
@db.fetch("SELECT version, timestamp, comment FROM db_versioning") do |row|
puts "Version: #{row[:version]}"
time = version == 0 ? Time.now : Time.at(row[:timestamp])
# TODO: UTC or Local time?
puts "Timestamp: #{time.getgm.strftime("%b %d, %Y %H:%M")}"
puts "Comment: #{row[:comment]}"
puts ""
end
rescue Exception => e
puts "No version records found. Error message:"
puts e.message
end
when "backup"
if( ARGV[0] != nil )
@ops[:backup] = ARGV[0]
end
connection_params()
backup_db()
when "restore"
if( ARGV[0] != nil )
@ops[:backup] = ARGV[0]
end
connection_params()
# Source sql dump file
bck_file = get_bck_file()
if( ! File.exists?(bck_file) )
puts "File #{bck_file} doesn't exist, backup restoration aborted."
exit -1
end
one_not_running()
case @backend
when :sqlite
if( !@ops[:force] && File.exists?(@sqlite_file) )
puts "File #{@sqlite_file} exists, use -f to overwrite."
exit -1
end
FileUtils.cp(bck_file, @sqlite_file)
puts "Sqlite database backup restored in #{@sqlite_file}"
when :mysql
connect_db()
# Check if target database exists
exists = false
begin
# User with ID 0 (oneadmin) always exists
@db.fetch("SELECT * FROM user_pool WHERE oid=0") do |row|
end
exists = true
rescue
end
if( !@ops[:force] && exists )
puts "MySQL database #{@db_name} at #{@server} exists, use -f to overwrite."
exit -1
end
mysql_cmd = "mysql -u #{@user} -p#{@passwd} -h #{@server} " +
"-P #{@port} "
rc = system( mysql_cmd + "-e 'DROP DATABASE IF EXISTS #{@db_name};'")
if( !rc )
puts "Error dropping MySQL DB #{@db_name} at #{@server}."
exit -1
end
rc = system( mysql_cmd + "-e 'CREATE DATABASE IF NOT EXISTS #{@db_name};'")
if( !rc )
puts "Error creating MySQL DB #{@db_name} at #{@server}."
exit -1
end
rc = system( mysql_cmd + "#{@db_name} < #{bck_file}")
if( !rc )
puts "Error while restoring MySQL DB #{@db_name} at #{@server}."
exit -1
end
puts "MySQL DB #{@db_name} at #{@server} restored."
else
puts "Unknown DB #{@backend}"
exit -1
end
else
onedb_opts.print_help
exit -1
end
exit 0

View File

@ -227,15 +227,27 @@ void Nebula::start()
}
}
NebulaLog::log("ONE",Log::INFO,"Bootstraping OpenNebula database.");
NebulaLog::log("ONE",Log::INFO,"Checking database version.");
rc = check_db_version();
VirtualMachinePool::bootstrap(db);
HostPool::bootstrap(db);
VirtualNetworkPool::bootstrap(db);
UserPool::bootstrap(db);
ImagePool::bootstrap(db);
ClusterPool::bootstrap(db);
VMTemplatePool::bootstrap(db);
if( rc == -1 )
{
throw runtime_error("Database version mismatch.");
}
if( rc == -2 )
{
NebulaLog::log("ONE",Log::INFO,"Bootstraping OpenNebula database.");
bootstrap();
VirtualMachinePool::bootstrap(db);
HostPool::bootstrap(db);
VirtualNetworkPool::bootstrap(db);
UserPool::bootstrap(db);
ImagePool::bootstrap(db);
ClusterPool::bootstrap(db);
VMTemplatePool::bootstrap(db);
}
}
catch (exception&)
{
@ -594,3 +606,99 @@ void Nebula::start()
NebulaLog::log("ONE", Log::INFO, "All modules finalized, exiting.\n");
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void Nebula::bootstrap()
{
ostringstream oss;
oss << "CREATE TABLE pool_control (tablename VARCHAR(32) PRIMARY KEY, "
"last_oid BIGINT UNSIGNED)";
db->exec(oss);
oss.str("");
oss << "CREATE TABLE db_versioning (oid INTEGER PRIMARY KEY, "
"version INTEGER, timestamp INTEGER, comment VARCHAR(256))";
db->exec(oss);
oss.str("");
oss << "INSERT INTO db_versioning (oid, version, timestamp, comment) "
<< "VALUES (0, " << db_version() << ", " << time(0)
<< ", '" << version() << " daemon bootstrap')";
db->exec(oss);
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int Nebula::check_db_version()
{
int rc;
ostringstream oss;
int loaded_db_version = 0;
// Try to read latest version
set_callback( static_cast<Callbackable::Callback>(&Nebula::select_cb),
static_cast<void *>(&loaded_db_version) );
oss << "SELECT version FROM db_versioning "
<< "WHERE oid=(SELECT MAX(oid) FROM db_versioning)";
db->exec(oss, this);
oss.str("");
unset_callback();
if( loaded_db_version == 0 )
{
// Table user_pool is present for all OpenNebula versions, and it
// always contains at least the oneadmin user.
oss << "SELECT MAX(oid) FROM user_pool";
rc = db->exec(oss);
oss.str("");
if( rc != 0 ) // Database needs bootstrap
{
return -2;
}
}
if( db_version() != loaded_db_version )
{
oss << "Database version mismatch. "
<< "Installed " << version() << " uses DB version '" << db_version()
<< "', and existing DB version is '"
<< loaded_db_version << "'.";
NebulaLog::log("ONE",Log::ERROR,oss);
return -1;
}
return 0;
}
int Nebula::select_cb(void *_loaded_db_version, int num, char **values,
char **names)
{
istringstream iss;
int * loaded_db_version;
loaded_db_version = static_cast<int *>(_loaded_db_version);
*loaded_db_version = 0;
if ( (values[0]) && (num == 1) )
{
iss.str(values[0]);
iss >> *loaded_db_version;
}
return 0;
};

281
src/onedb/1.rb Normal file
View File

@ -0,0 +1,281 @@
# -------------------------------------------------------------------------- */
# Copyright 2002-2011, OpenNebula Project Leads (OpenNebula.org) #
# Licensed under the Apache License, Version 2.0 (the "License"); you may */
# not use this file except in compliance with the License. You may obtain */
# a copy of the License at */
# */
# http://www.apache.org/licenses/LICENSE-2.0 */
# */
# Unless required by applicable law or agreed to in writing, software */
# distributed under the License is distributed on an "AS IS" BASIS, */
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
# See the License for the specific language governing permissions and */
# limitations under the License. */
# -------------------------------------------------------------------------- */
class Migrator < MigratorBase
def initialize(db, verbose)
super(db, verbose)
@db_version = 1
@one_version = "OpenNebula 2.3.0"
end
def up
########################################################################
# Users
########################################################################
# 2.2 Schema
# CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, user_name VARCHAR(256), password TEXT,enabled INTEGER, UNIQUE(user_name));
# Move table user_pool
@db.run "ALTER TABLE user_pool RENAME TO old_user_pool;"
# Create new user_pool
@db.run "CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, name VARCHAR(256), body TEXT, UNIQUE(name));"
# Read each entry in the old user_pool, and insert into new user_pool
@db.fetch("SELECT * FROM old_user_pool") do |row|
oid = row[:oid]
name = row[:user_name]
body = "<USER><ID>#{oid}</ID><NAME>#{name}</NAME><PASSWORD>#{row[:password]}</PASSWORD><ENABLED>#{row[:enabled]}</ENABLED></USER>"
@db.run "INSERT INTO user_pool VALUES(#{oid},'#{name}','#{body}');"
end
# Delete old user_pool
@db.run "DROP TABLE old_user_pool"
########################################################################
# Clusters
########################################################################
# 2.2 Schema
# CREATE TABLE cluster_pool (oid INTEGER PRIMARY KEY, cluster_name VARCHAR(128), UNIQUE(cluster_name) );
# Move table
@db.run "ALTER TABLE cluster_pool RENAME TO old_cluster_pool;"
# Create new table
@db.run "CREATE TABLE cluster_pool (oid INTEGER PRIMARY KEY, name VARCHAR(256), body TEXT, UNIQUE(name));"
# Read each entry in the old table, and insert into new table
@db.fetch("SELECT * FROM old_cluster_pool") do |row|
oid = row[:oid]
name = row[:cluster_name]
body = "<CLUSTER><ID>#{oid}</ID><NAME>#{name}</NAME></CLUSTER>"
@db.run "INSERT INTO cluster_pool VALUES(#{oid},'#{name}','#{body}');"
end
# Delete old table
@db.run "DROP TABLE old_cluster_pool"
########################################################################
# Hosts
########################################################################
# 2.2 Schema
# CREATE TABLE host_pool (oid INTEGER PRIMARY KEY,host_name VARCHAR(256), state INTEGER,im_mad VARCHAR(128),vm_mad VARCHAR(128),tm_mad VARCHAR(128),last_mon_time INTEGER, cluster VARCHAR(128), template TEXT, UNIQUE(host_name));
# CREATE TABLE host_shares(hid INTEGER PRIMARY KEY,disk_usage INTEGER, mem_usage INTEGER, cpu_usage INTEGER,max_disk INTEGER, max_mem INTEGER, max_cpu INTEGER,free_disk INTEGER, free_mem INTEGER, free_cpu INTEGER,used_disk INTEGER, used_mem INTEGER, used_cpu INTEGER,running_vms INTEGER);
# Move table
@db.run "ALTER TABLE host_pool RENAME TO old_host_pool;"
# Create new table
@db.run "CREATE TABLE host_pool (oid INTEGER PRIMARY KEY, name VARCHAR(256), body TEXT, state INTEGER, last_mon_time INTEGER, cluster VARCHAR(128), UNIQUE(name));"
# Read each entry in the old table, and insert into new table
@db.fetch("SELECT * FROM old_host_pool") do |row|
oid = row[:oid]
name = row[:host_name]
state = row[:state]
last_mon_time = row[:last_mon_time]
cluster = row[:cluster]
# There is one host share for each host
host_share = ""
@db.fetch("SELECT * FROM host_shares WHERE hid=#{oid}") do |share|
host_share = "<HOST_SHARE><DISK_USAGE>#{share[:disk_usage]}</DISK_USAGE><MEM_USAGE>#{share[:mem_usage]}</MEM_USAGE><CPU_USAGE>#{share[:cpu_usage]}</CPU_USAGE><MAX_DISK>#{share[:max_disk]}</MAX_DISK><MAX_MEM>#{share[:max_mem]}</MAX_MEM><MAX_CPU>#{share[:max_cpu]}</MAX_CPU><FREE_DISK>#{share[:free_disk]}</FREE_DISK><FREE_MEM>#{share[:free_mem]}</FREE_MEM><FREE_CPU>#{share[:free_cpu]}</FREE_CPU><USED_DISK>#{share[:used_disk]}</USED_DISK><USED_MEM>#{share[:used_mem]}</USED_MEM><USED_CPU>#{share[:used_cpu]}</USED_CPU><RUNNING_VMS>#{share[:running_vms]}</RUNNING_VMS></HOST_SHARE>"
end
body = "<HOST><ID>#{oid}</ID><NAME>#{name}</NAME><STATE>#{state}</STATE><IM_MAD>#{row[:im_mad]}</IM_MAD><VM_MAD>#{row[:vm_mad]}</VM_MAD><TM_MAD>#{row[:tm_mad]}</TM_MAD><LAST_MON_TIME>#{last_mon_time}</LAST_MON_TIME><CLUSTER>#{cluster}</CLUSTER>#{host_share}#{row[:template]}</HOST>"
@db.run "INSERT INTO host_pool VALUES(#{oid},'#{name}','#{body}', #{state}, #{last_mon_time}, '#{cluster}');"
end
# Delete old table
@db.run "DROP TABLE old_host_pool"
@db.run "DROP TABLE host_shares"
########################################################################
# Images
########################################################################
# 2.2 Schema
# CREATE TABLE image_pool (oid INTEGER PRIMARY KEY, uid INTEGER, name VARCHAR(128), type INTEGER, public INTEGER, persistent INTEGER, regtime INTEGER, source TEXT, state INTEGER, running_vms INTEGER, template TEXT, UNIQUE(name) );
# Move table
@db.run "ALTER TABLE image_pool RENAME TO old_image_pool;"
# Create new table
@db.run "CREATE TABLE image_pool (oid INTEGER PRIMARY KEY, name VARCHAR(256), body TEXT, uid INTEGER, public INTEGER, UNIQUE(name,uid) );"
# Read each entry in the old table, and insert into new table
@db.fetch("SELECT * FROM old_image_pool") do |row|
oid = row[:oid]
name = row[:name]
uid = row[:uid]
public = row[:public]
username = get_username(uid)
# In OpenNebula 2.0 Image States go from 0 to 3, in 3.0 go
# from 0 to 5, but the meaning is the same for states 0 to 3
body = "<IMAGE><ID>#{oid}</ID><UID>#{row[:uid]}</UID><USERNAME>#{username}</USERNAME><NAME>#{name}</NAME><TYPE>#{row[:type]}</TYPE><PUBLIC>#{public}</PUBLIC><PERSISTENT>#{row[:persistent]}</PERSISTENT><REGTIME>#{row[:regtime]}</REGTIME><SOURCE>#{row[:source]}</SOURCE><STATE>#{row[:state]}</STATE><RUNNING_VMS>#{row[:running_vms]}</RUNNING_VMS>#{row[:template]}</IMAGE>"
@db.run "INSERT INTO image_pool VALUES(#{oid},'#{name}','#{body}', #{uid}, #{public});"
end
# Delete old table
@db.run "DROP TABLE old_image_pool"
########################################################################
# VMs
########################################################################
# 2.2 Schema
# CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY,uid INTEGER,name TEXT,last_poll INTEGER, state INTEGER,lcm_state INTEGER,stime INTEGER,etime INTEGER,deploy_id TEXT,memory INTEGER,cpu INTEGER,net_tx INTEGER,net_rx INTEGER, last_seq INTEGER, template TEXT);
# CREATE TABLE history (vid INTEGER,seq INTEGER,host_name TEXT,vm_dir TEXT,hid INTEGER,vm_mad TEXT,tm_mad TEXT,stime INTEGER,etime INTEGER,pstime INTEGER,petime INTEGER,rstime INTEGER,retime INTEGER,estime INTEGER,eetime INTEGER,reason INTEGER,PRIMARY KEY(vid,seq));
# Move tables
@db.run "ALTER TABLE vm_pool RENAME TO old_vm_pool;"
@db.run "ALTER TABLE history RENAME TO old_history;"
# Create new tables
@db.run "CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY, name TEXT, body TEXT, uid INTEGER, last_poll INTEGER, state INTEGER, lcm_state INTEGER);"
@db.run "CREATE TABLE history (vid INTEGER, seq INTEGER, body TEXT, PRIMARY KEY(vid,seq));"
# Read each entry in the old history table, and insert into new table
@db.fetch("SELECT * FROM old_history") do |row|
vid = row[:vid]
seq = row[:seq]
body = "<HISTORY><SEQ>#{seq}</SEQ><HOSTNAME>#{row[:host_name]}</HOSTNAME><VM_DIR>#{row[:vm_dir]}</VM_DIR><HID>#{row[:hid]}</HID><STIME>#{row[:stime]}</STIME><ETIME>#{row[:etime]}</ETIME><VMMMAD>#{row[:vm_mad]}</VMMMAD><TMMAD>#{row[:tm_mad]}</TMMAD><PSTIME>#{row[:pstime]}</PSTIME><PETIME>#{row[:petime]}</PETIME><RSTIME>#{row[:rstime]}</RSTIME><RETIME>#{row[:retime]}</RETIME><ESTIME>#{row[:estime]}</ESTIME><EETIME>#{row[:eetime]}</EETIME><REASON>#{row[:reason]}</REASON></HISTORY>"
@db.run "INSERT INTO history VALUES(#{vid},'#{seq}','#{body}');"
end
# Read each entry in the old vm table, and insert into new table
@db.fetch("SELECT * FROM old_vm_pool") do |row|
oid = row[:oid]
name = row[:name]
uid = row[:uid]
last_poll = row[:last_poll]
state = row[:state]
lcm_state = row[:lcm_state]
username = get_username(uid)
# If the VM has History items, the last one is included in the XML
history = ""
@db.fetch("SELECT body FROM history WHERE vid=#{oid} AND seq=(SELECT MAX(seq) FROM history WHERE vid=#{oid})") do |history_row|
history = history_row[:body]
end
body = "<VM><ID>#{oid}</ID><UID>#{uid}</UID><USERNAME>#{username}</USERNAME><NAME>#{name}</NAME><LAST_POLL>#{last_poll}</LAST_POLL><STATE>#{state}</STATE><LCM_STATE>#{lcm_state}</LCM_STATE><STIME>#{row[:stime]}</STIME><ETIME>#{row[:etime]}</ETIME><DEPLOY_ID>#{row[:deploy_id]}</DEPLOY_ID><MEMORY>#{row[:memory]}</MEMORY><CPU>#{row[:cpu]}</CPU><NET_TX>#{row[:net_tx]}</NET_TX><NET_RX>#{row[:net_rx]}</NET_RX>#{row[:template]}#{history}</VM>"
@db.run "INSERT INTO vm_pool VALUES(#{oid},'#{name}','#{body}', #{uid}, #{last_poll}, #{state}, #{lcm_state});"
end
# Delete old tables
@db.run "DROP TABLE old_vm_pool"
@db.run "DROP TABLE old_history"
########################################################################
# Virtual Networks
########################################################################
# 2.2 Schema
# CREATE TABLE network_pool (oid INTEGER PRIMARY KEY, uid INTEGER, name VARCHAR(256), type INTEGER, bridge TEXT, public INTEGER, template TEXT, UNIQUE(name));
# CREATE TABLE leases (oid INTEGER, ip BIGINT, mac_prefix BIGINT, mac_suffix BIGINT,vid INTEGER, used INTEGER, PRIMARY KEY(oid,ip));
# Move tables
@db.run "ALTER TABLE network_pool RENAME TO old_network_pool;"
@db.run "ALTER TABLE leases RENAME TO old_leases;"
# Create new tables
@db.run "CREATE TABLE network_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body TEXT, uid INTEGER, public INTEGER, UNIQUE(name,uid));"
@db.run "CREATE TABLE leases (oid INTEGER, ip BIGINT, body TEXT, PRIMARY KEY(oid,ip));"
# Read each entry in the old table, and insert into new table
@db.fetch("SELECT * FROM old_network_pool") do |row|
oid = row[:oid]
name = row[:name]
uid = row[:uid]
public = row[:public]
username = get_username(uid)
# <TOTAL_LEASES> is stored in the DB, but it is not used to rebuild
# the VirtualNetwork object, and it is generated each time the
# network is listed. So setting it to 0 is safe
body = "<VNET><ID>#{oid}</ID><UID>#{uid}</UID><USERNAME>#{username}</USERNAME><NAME>#{name}</NAME><TYPE>#{row[:type]}</TYPE><BRIDGE>#{row[:bridge]}</BRIDGE><PUBLIC>#{public}</PUBLIC><TOTAL_LEASES>0</TOTAL_LEASES>#{row[:template]}</VNET>"
@db.run "INSERT INTO network_pool VALUES(#{oid},'#{name}','#{body}', #{uid}, #{public});"
end
# Read each entry in the old table, and insert into new table
@db.fetch("SELECT * FROM old_leases") do |row|
oid = row[:oid]
ip = row[:ip]
body = "<LEASE><IP>#{ip}</IP><MAC_PREFIX>#{row[:mac_prefix]}</MAC_PREFIX><MAC_SUFFIX>#{row[:mac_suffix]}</MAC_SUFFIX><USED>#{row[:used]}</USED><VID>#{row[:vid]}</VID></LEASE>"
@db.run "INSERT INTO leases VALUES(#{oid}, #{ip}, '#{body}');"
end
# Delete old tables
@db.run "DROP TABLE old_network_pool"
@db.run "DROP TABLE old_leases"
########################################################################
# New tables in DB version 1
########################################################################
@db.run "CREATE TABLE db_versioning (oid INTEGER PRIMARY KEY, version INTEGER, timestamp INTEGER, comment VARCHAR(256));"
@db.run "CREATE TABLE template_pool (oid INTEGER PRIMARY KEY, name VARCHAR(256), body TEXT, uid INTEGER, public INTEGER);"
# New pool_control table contains the last_oid used, must be rebuilt
@db.run "CREATE TABLE pool_control (tablename VARCHAR(32) PRIMARY KEY, last_oid BIGINT UNSIGNED)"
for table in ["user_pool", "cluster_pool", "host_pool", "image_pool", "vm_pool", "network_pool"] do
@db.fetch("SELECT MAX(oid) FROM #{table}") do |row|
if( row[:"MAX(oid)"] != nil )
@db.run "INSERT INTO pool_control (tablename, last_oid) VALUES ('#{table}', #{row[:"MAX(oid)"]});"
end
end
end
return true
end
def get_username(uid)
username = ""
@db.fetch("SELECT name FROM user_pool WHERE oid=#{uid}") do |user|
username = user[:name]
end
return username
end
end

View File

@ -50,7 +50,8 @@ int PoolSQL::init_cb(void *nil, int num, char **values, char **names)
/* -------------------------------------------------------------------------- */
PoolSQL::PoolSQL(SqlDB * _db, const char * table): db(_db), lastOID(-1)
PoolSQL::PoolSQL(SqlDB * _db, const char * _table):
db(_db), lastOID(-1), table(_table)
{
ostringstream oss;
@ -58,7 +59,7 @@ PoolSQL::PoolSQL(SqlDB * _db, const char * table): db(_db), lastOID(-1)
set_callback(static_cast<Callbackable::Callback>(&PoolSQL::init_cb));
oss << "SELECT MAX(oid) FROM " << table;
oss << "SELECT last_oid FROM pool_control WHERE tablename='" << table <<"'";
db->exec(oss,this);
@ -129,11 +130,29 @@ int PoolSQL::allocate(
delete objsql;
if( rc != -1 )
{
update_lastOID();
}
unlock();
return rc;
}
void PoolSQL::update_lastOID()
{
// db->escape_str is not used for 'table' since its name can't be set in
// any way by the user, it is hardcoded.
ostringstream oss;
oss << "REPLACE INTO pool_control (tablename, last_oid) VALUES ("
<< "'" << table << "',"
<< lastOID << ")";
db->exec(oss);
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */