1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-03-25 02:50:08 +03:00

Merge branch 'master' into feature-407

Conflicts:
	install.sh
	src/nebula/Nebula.cc
This commit is contained in:
Carlos Martín 2011-05-12 17:28:03 +02:00
commit e9b0bf3eea
25 changed files with 1358 additions and 111 deletions

View File

@ -9,7 +9,7 @@ distributed data center infrastructures.
Complete documentation can be found at
http://opennebula.org/documentation:rel2.0
http://opennebula.org/documentation:rel2.4
## INSTALLATION
@ -111,7 +111,7 @@ where **install_options** can be one or more of:
## CONFIGURATION
Information on how to configure OpenNebula is located at http://opennebula.org/documentation:rel2.0
Information on how to configure OpenNebula is located at http://opennebula.org/documentation:rel2.4
## CONTACT

View File

@ -38,7 +38,9 @@
#include "AuthManager.h"
#include "ImageManager.h"
class Nebula
#include "Callbackable.h"
class Nebula : public Callbackable
{
public:
@ -228,6 +230,11 @@ public:
return "OpenNebula 2.3.0";
};
static int db_version()
{
return 1;
};
void start();
void get_configuration_attribute(
@ -436,6 +443,28 @@ private:
// ---------------------------------------------------------------
friend void nebula_signal_handler (int sig);
/**
* Bootstraps the database control tables
*/
void bootstrap();
/**
* Callback function to TODO
* @param _loaded_db_version TODO
* @param num the number of columns read from the DB
* @param names the column names
* @param vaues the column values
* @return 0 on success
*/
int select_cb(void *_loaded_db_version, int num, char **values,
char **names);
/*
* TODO
* @return 0 ok, -1 version mismatch, -2 needs bootstrap
*/
int check_db_version();
};
#endif /*NEBULA_H_*/

View File

@ -47,7 +47,7 @@ public:
* counter). If null the OID counter is not updated.
* @param with_uid the Pool objects have an owner id (uid)
*/
PoolSQL(SqlDB * _db, const char * table);
PoolSQL(SqlDB * _db, const char * _table);
virtual ~PoolSQL();
@ -189,6 +189,11 @@ private:
*/
int lastOID;
/**
* Tablename for this pool
*/
string table;
/**
* The pool is implemented with a Map of SQL object pointers, using the
* OID as key.
@ -253,6 +258,11 @@ private:
return key.str();
};
/**
* Inserts the last oid into the pool_control table
*/
void update_lastOID();
/* ---------------------------------------------------------------------- */
/* ---------------------------------------------------------------------- */

View File

@ -181,6 +181,7 @@ LIB_DIRS="$LIB_LOCATION/ruby \
$LIB_LOCATION/ruby/cloud/econe \
$LIB_LOCATION/ruby/cloud/econe/views \
$LIB_LOCATION/ruby/cloud/occi \
$LIB_LOCATION/onedb \
$LIB_LOCATION/tm_commands \
$LIB_LOCATION/tm_commands/nfs \
$LIB_LOCATION/tm_commands/ssh \
@ -258,9 +259,13 @@ INSTALL_FILES=(
LIB_FILES:$LIB_LOCATION
RUBY_LIB_FILES:$LIB_LOCATION/ruby
RUBY_OPENNEBULA_LIB_FILES:$LIB_LOCATION/ruby/OpenNebula
MAD_RUBY_LIB_FILES:$LIB_LOCATION/ruby
MAD_RUBY_LIB_FILES:$LIB_LOCATION/remotes
MAD_RUBY_LIB_FILES:$VAR_LOCATION/remotes
MAD_SH_LIB_FILES:$LIB_LOCATION/sh
MAD_SH_LIB_FILES:$LIB_LOCATION/remotes
MAD_SH_LIB_FILES:$VAR_LOCATION/remotes
ONEDB_MIGRATOR_FILES:$LIB_LOCATION/onedb
MADS_LIB_FILES:$LIB_LOCATION/mads
IM_PROBES_FILES:$VAR_LOCATION/remotes/im
IM_PROBES_KVM_FILES:$VAR_LOCATION/remotes/im/kvm.d
@ -366,6 +371,7 @@ BIN_FILES="src/nebula/oned \
src/cli/onecluster \
src/cli/onetemplate \
src/cli/onegroup \
src/cli/onedb \
share/scripts/one \
src/authm_mad/oneauth"
@ -418,11 +424,12 @@ RUBY_OPENNEBULA_LIB_FILES="src/oca/ruby/OpenNebula/Host.rb \
#-----------------------------------------------------------------------------
# MAD ShellScript library files, to be installed under $LIB_LOCATION/sh
# MAD Script library files, to be installed under $LIB_LOCATION/<script lang>
# and remotes directory
#-----------------------------------------------------------------------------
MAD_SH_LIB_FILES="src/mad/sh/scripts_common.sh"
MAD_RUBY_LIB_FILES="src/mad/ruby/scripts_common.rb"
#-------------------------------------------------------------------------------
# Driver executable files, to be installed under $LIB_LOCATION/mads
@ -549,6 +556,11 @@ IMAGE_DRIVER_FS_SCRIPTS="src/image_mad/remotes/fs/cp \
src/image_mad/remotes/fs/fsrc \
src/image_mad/remotes/fs/rm"
#-------------------------------------------------------------------------------
# Migration scripts for onedb command, to be installed under $LIB_LOCATION
#-------------------------------------------------------------------------------
ONEDB_MIGRATOR_FILES="src/onedb/1.rb"
#-------------------------------------------------------------------------------
# Configuration files for OpenNebula, to be installed under $ETC_LOCATION
#-------------------------------------------------------------------------------
@ -758,6 +770,7 @@ SUNSTONE_MODELS_JSON_FILES="src/sunstone/models/OpenNebulaJSON/ClusterJSON.rb \
src/sunstone/models/OpenNebulaJSON/PoolJSON.rb \
src/sunstone/models/OpenNebulaJSON/UserJSON.rb \
src/sunstone/models/OpenNebulaJSON/VirtualMachineJSON.rb \
src/sunstone/models/OpenNebulaJSON/TemplateJSON.rb \
src/sunstone/models/OpenNebulaJSON/VirtualNetworkJSON.rb"
SUNSTONE_TEMPLATE_FILES="src/sunstone/templates/index.html \
@ -773,6 +786,7 @@ SUNSTONE_PUBLIC_JS_PLUGINS_FILES="\
src/sunstone/public/js/plugins/dashboard-tab.js \
src/sunstone/public/js/plugins/hosts-tab.js \
src/sunstone/public/js/plugins/images-tab.js \
src/sunstone/public/js/plugins/templates-tab.js \
src/sunstone/public/js/plugins/users-tab.js \
src/sunstone/public/js/plugins/vms-tab.js \
src/sunstone/public/js/plugins/vnets-tab.js"
@ -848,6 +862,7 @@ MAN_FILES="share/man/oneauth.8.gz \
share/man/onevnet.8.gz \
share/man/onetemplate.8.gz \
share/man/onegroup.8.gz \
share/man/onedb.8.gz \
share/man/econe-describe-images.8.gz \
share/man/econe-describe-instances.8.gz \
share/man/econe-register.8.gz \

614
src/cli/onedb Executable file
View File

@ -0,0 +1,614 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- */
# Copyright 2002-2011, OpenNebula Project Leads (OpenNebula.org) #
# Licensed under the Apache License, Version 2.0 (the "License"); you may */
# not use this file except in compliance with the License. You may obtain */
# a copy of the License at */
# */
# http://www.apache.org/licenses/LICENSE-2.0 */
# */
# Unless required by applicable law or agreed to in writing, software */
# distributed under the License is distributed on an "AS IS" BASIS, */
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
# See the License for the specific language governing permissions and */
# limitations under the License. */
# -------------------------------------------------------------------------- */
# ----------------------------------------------------------------------------
# Set up the environment
# ----------------------------------------------------------------------------
ONE_LOCATION = ENV["ONE_LOCATION"]
if !ONE_LOCATION
LIB_LOCATION = "/usr/lib/one"
RUBY_LIB_LOCATION = LIB_LOCATION + "/ruby"
VAR_LOCATION = "/var/lib/one"
ETC_LOCATION = "/etc/one"
LOCK_FILE = "/var/lock/one/one"
else
LIB_LOCATION = ONE_LOCATION + "/lib"
RUBY_LIB_LOCATION = LIB_LOCATION + "/ruby"
VAR_LOCATION = ONE_LOCATION + "/var"
ETC_LOCATION = ONE_LOCATION + "/etc"
LOCK_FILE = VAR_LOCATION + "/.lock"
end
$: << RUBY_LIB_LOCATION
require 'OpenNebula'
require 'command_parse'
# TODO: Move the Configuration file to OpenNebula ruby lib?
require "#{RUBY_LIB_LOCATION}/cloud/Configuration"
require 'rubygems'
require 'sequel'
class MigratorBase
attr_reader :db_version
attr_reader :one_version
@verbose
def initialize(db, verbose)
@db = db
@verbose = verbose
end
def up
puts "Method up not implemented for version #{@version}"
return false
end
end
class OneDBParse < CommandParse
COMMANDS_HELP=<<-EOT
DB Connection options:
By default, onedb reads the connection data from oned.conf
If any of these options is set, oned.conf is ignored (i.e. if you set MySQL's
port onedb won't look for the rest of the options in oned.conf)
Description:
This command enables the user to manage the OpenNebula database. It provides
information about the DB version, means to upgrade it to the latest version, and
backup tools.
Commands:
* upgrade (Upgrades the DB to the latest version)
onedb upgrade [<version>]
where <version> : DB version (e.g. 1, 3) to upgrade. By default the DB is
upgraded to the latest version
* version (Prints the current DB version. Use -v flag to see also OpenNebula version)
onedb version
* history (Prints the upgrades history)
onedb history
* backup (Dumps the DB to a file)
onedb backup [<output_file>]
where <output_file> : Same as --backup
* restore (Restores the DB from a backup file. Only restores backups generated
from the same backend (SQLite or MySQL))
onedb restore [<backup_file>]
where <backup_file> : Same as --backup
EOT
def text_commands
COMMANDS_HELP
end
def text_command_name
"onedb"
end
def special_options(opts, options)
opts.on_tail("-f", "--force", "Forces the backup even if the DB exists") do |o|
options[:force] = true
end
opts.on_tail("--backup file", "Use this file to store/read SQL dump", String) do |o|
options[:backup] = o
end
opts.on_tail("-s file", "--sqlite file", "SQLite DB file", String) do |o|
options[:backend] = :sqlite
options[:sqlite] = o
end
opts.on_tail("--server host", "MySQL server hostname or IP. Defaults "<<
"to localhost", String) do |o|
options[:backend] = :mysql
options[:server] = o
end
opts.on_tail("--port port", "MySQL server port. Defaults to 3306", Integer) do |o|
options[:backend] = :mysql
options[:port] = o
end
opts.on_tail("--user username", "MySQL username", String) do |o|
options[:backend] = :mysql
options[:user] = o
end
opts.on_tail("--passwd password", "MySQL password. Leave unset to be "<<
"prompted for it", String) do |o|
options[:backend] = :mysql
options[:passwd] = o
end
opts.on_tail("--dbname name", "MySQL DB name for OpenNebula", String) do |o|
options[:backend] = :mysql
options[:dbname] = o
end
end
end
################################################################################
# Helpers
################################################################################
def connection_params()
if( @ops[:backend] == nil )
read_onedconf()
else
@backend = @ops[:backend]
if( @backend == :sqlite )
@sqlite_file = @ops[:sqlite]
else
@server = @ops[:server]
@port = @ops[:port]
@user = @ops[:user]
@passwd = @ops[:passwd]
@db_name = @ops[:dbname]
# Check for errors:
error = false
missing = ""
(error = true; missing = "--user" ) if @user == nil
(error = true; missing = "--dbname") if @db_name == nil
if error
puts "MySQL option #{missing} is needed"
exit -1
end
# Check for defaults:
@server = "localhost" if @server == nil
@port = 0 if @port == nil
if @passwd == nil
# Hide input characters
`stty -echo`
print "MySQL Password: "
@passwd = STDIN.gets.strip
`stty echo`
puts ""
end
end
end
end
def read_onedconf()
config = Configuration.new("#{ETC_LOCATION}/oned.conf")
if config[:db] == nil
puts "No DB defined."
exit -1
end
if config[:db]["BACKEND"].upcase.include? "SQLITE"
@backend = :sqlite
@sqlite_file = "#{VAR_LOCATION}/one.db"
elsif config[:db]["BACKEND"].upcase.include? "MYSQL"
@backend = :mysql
@server = config[:db]["SERVER"]
@port = config[:db]["PORT"]
@user = config[:db]["USER"]
@passwd = config[:db]["PASSWD"]
@db_name = config[:db]["DB_NAME"]
# In OpenNebula 2.0 PORT wasn't present in oned.conf, set default
@port = "0" if @port == nil
# Check for errors:
error = false
missing = ""
(error = true; missing = "SERVER" ) if @server == nil
(error = true; missing = "USER" ) if @user == nil
(error = true; missing = "PASSWD" ) if @passwd == nil
(error = true; missing = "DB_NAME") if @db_name == nil
if error
puts "MySQL attribute #{missing} not found in " +
"#{ETC_LOCATION}/oned.conf"
exit -1
end
# Clean leading and trailing quotes, if any
@server = @server [1..-2] if @server [0] == ?"
@port = @port [1..-2] if @port [0] == ?"
@user = @user [1..-2] if @user [0] == ?"
@passwd = @passwd [1..-2] if @passwd [0] == ?"
@db_name = @db_name[1..-2] if @db_name[0] == ?"
else
puts "Could not load DB configuration from #{ETC_LOCATION}/oned.conf"
exit -1
end
end
def get_bck_file()
bck_file = ""
if( @ops[:backup] != nil )
bck_file = @ops[:backup]
elsif @backend == :sqlite
bck_file = "#{VAR_LOCATION}/one.db.bck"
elsif @backend == :mysql
bck_file = "#{VAR_LOCATION}/mysql_#{@server}_#{@db_name}.sql"
end
return bck_file
end
def backup_db()
bck_file = get_bck_file()
if( !@ops[:force] && File.exists?(bck_file) )
puts "File #{bck_file} exists, backup aborted. Use -f to overwrite."
exit -1
end
case @backend
when :sqlite
if( ! File.exists?(@sqlite_file) )
puts "File #{@sqlite_file} doesn't exist, backup aborted."
exit -1
end
FileUtils.cp(@sqlite_file, "#{bck_file}")
puts "Sqlite database backup stored in #{bck_file}"
puts "Use 'onedb restore' or copy the file back to restore the DB."
when :mysql
cmd = "mysqldump -u #{@user} -p#{@passwd} -h #{@server} " +
"-P #{@port} #{@db_name} > #{bck_file}"
rc = system(cmd)
if( !rc )
puts "Unknown error running '#{cmd}'"
exit -1
end
puts "MySQL dump stored in #{bck_file}"
puts "Use 'onedb restore' or restore the DB using the mysql command:"
puts "mysql -u user -h server -P port db_name < backup_file"
else
puts "Unknown DB #{@backend}"
exit -1
end
puts ""
end
def connect_db()
case @backend
when :sqlite
if( ! File.exists?(@sqlite_file) )
puts "File #{@sqlite_file} doesn't exist."
exit -1
end
@db = Sequel.sqlite(@sqlite_file)
when :mysql
@db = Sequel.connect(
"mysql://#{@user}:#{@passwd}@#{@server}:#{@port}/#{@db_name}")
else
puts "Unknown DB #{@backend}"
exit -1
end
end
def read_db_version()
version = 0
timestamp = 0
comment = ""
@db.fetch("SELECT version, timestamp, comment FROM db_versioning " +
"WHERE oid=(SELECT MAX(oid) FROM db_versioning)") do |row|
version = row[:version]
timestamp = row[:timestamp]
comment = row[:comment]
end
return [version.to_i, timestamp, comment]
rescue
# If the DB doesn't have db_versioning table, it means it is empty or a 2.x
# OpenNebula DB
begin
# User with ID 0 (oneadmin) always exists
@db.fetch("SELECT * FROM user_pool WHERE oid=0") do |row|
end
rescue
puts "Database schema does not look to be created by OpenNebula:"
puts "table user_pool is missing or empty."
exit -1
end
begin
# Table image_pool is present only in 2.X DBs
@db.fetch("SELECT * FROM image_pool") do |row|
end
rescue
puts "Database schema looks to be created by OpenNebula 1.X."
puts "This tool only works with databases created by 2.X versions."
exit -1
end
comment = "Could not read any previous db_versioning data, assuming it is "+
"an OpenNebula 2.0 or 2.2 DB."
return [0, 0, comment]
end
def one_not_running()
if File.exists?(LOCK_FILE)
puts "First stop OpenNebula. Lock file found: #{LOCK_FILE}"
exit -1
end
end
################################################################################
################################################################################
onedb_opts = OneDBParse.new([])
onedb_opts.parse(ARGV)
@ops = onedb_opts.options
@verbose = @ops[:verbose]
command = ARGV.shift
case command
when "upgrade"
# Check opennebula is not running
one_not_running()
# Get DB connection parameters, from oned.conf or command arguments
connection_params()
# Connect to DB
connect_db()
# Read DB's version
version, timestamp, comment = read_db_version()
if( @verbose )
puts "Version read:"
puts "#{version} : #{comment}"
puts ""
end
# Upgrade, using the scripts in $LIB_LOCATION/onedb/xx.rb
max_version = nil
if( ARGV[0] )
max_version = ARGV[0].to_i
end
migrator_version = version + 1
migrator = nil
file = "#{LIB_LOCATION}/onedb/#{migrator_version}.rb"
if( File.exists?(file) &&
(max_version == nil || migrator_version <= max_version) )
# At least one upgrade will be executed, make DB backup
backup_db()
end
while( File.exists?(file) &&
(max_version == nil || migrator_version <= max_version) )
puts " > Running migrator #{file}" if @verbose
load(file)
migrator = Migrator.new(@db, @verbose)
result = migrator.up
if( !result )
puts "Error while upgrading from #{migrator_version-1} to #{migrator.db_version}"
return -1
end
puts " > Done" if @verbose
puts "" if @verbose
migrator_version += 1
file = "#{LIB_LOCATION}/onedb/#{migrator_version}.rb"
end
# Modify db_versioning table
if( migrator != nil )
comment = "Database migrated from #{version} to #{migrator.db_version}"+
" (#{migrator.one_version}) by onedb command."
max_oid = nil
@db.fetch("SELECT MAX(oid) FROM db_versioning") do |row|
max_oid = row[:"MAX(oid)"].to_i
end
max_oid = 0 if max_oid == nil
@db.run "INSERT INTO db_versioning (oid, version, timestamp, comment) "+
"VALUES (" +
"#{max_oid+1}, " +
"'#{migrator.db_version}', " +
"#{Time.new.to_i}, " +
"'#{comment}')"
puts comment
else
puts "Database already uses version #{version}"
end
when "version"
connection_params()
connect_db()
version, timestamp, comment = read_db_version()
if(@verbose)
puts "Version: #{version}"
time = version == 0 ? Time.now : Time.at(timestamp)
# TODO: UTC or Local time?
puts "Timestamp: #{time.getgm.strftime("%b %d, %Y %H:%M")}"
puts "Comment: #{comment}"
else
puts version
end
when "history"
connection_params()
connect_db()
begin
@db.fetch("SELECT version, timestamp, comment FROM db_versioning") do |row|
puts "Version: #{row[:version]}"
time = version == 0 ? Time.now : Time.at(row[:timestamp])
# TODO: UTC or Local time?
puts "Timestamp: #{time.getgm.strftime("%b %d, %Y %H:%M")}"
puts "Comment: #{row[:comment]}"
puts ""
end
rescue Exception => e
puts "No version records found. Error message:"
puts e.message
end
when "backup"
if( ARGV[0] != nil )
@ops[:backup] = ARGV[0]
end
connection_params()
backup_db()
when "restore"
if( ARGV[0] != nil )
@ops[:backup] = ARGV[0]
end
connection_params()
# Source sql dump file
bck_file = get_bck_file()
if( ! File.exists?(bck_file) )
puts "File #{bck_file} doesn't exist, backup restoration aborted."
exit -1
end
one_not_running()
case @backend
when :sqlite
if( !@ops[:force] && File.exists?(@sqlite_file) )
puts "File #{@sqlite_file} exists, use -f to overwrite."
exit -1
end
FileUtils.cp(bck_file, @sqlite_file)
puts "Sqlite database backup restored in #{@sqlite_file}"
when :mysql
connect_db()
# Check if target database exists
exists = false
begin
# User with ID 0 (oneadmin) always exists
@db.fetch("SELECT * FROM user_pool WHERE oid=0") do |row|
end
exists = true
rescue
end
if( !@ops[:force] && exists )
puts "MySQL database #{@db_name} at #{@server} exists, use -f to overwrite."
exit -1
end
mysql_cmd = "mysql -u #{@user} -p#{@passwd} -h #{@server} " +
"-P #{@port} "
rc = system( mysql_cmd + "-e 'DROP DATABASE IF EXISTS #{@db_name};'")
if( !rc )
puts "Error dropping MySQL DB #{@db_name} at #{@server}."
exit -1
end
rc = system( mysql_cmd + "-e 'CREATE DATABASE IF NOT EXISTS #{@db_name};'")
if( !rc )
puts "Error creating MySQL DB #{@db_name} at #{@server}."
exit -1
end
rc = system( mysql_cmd + "#{@db_name} < #{bck_file}")
if( !rc )
puts "Error while restoring MySQL DB #{@db_name} at #{@server}."
exit -1
end
puts "MySQL DB #{@db_name} at #{@server} restored."
else
puts "Unknown DB #{@backend}"
exit -1
end
else
onedb_opts.print_help
exit -1
end
exit 0

View File

@ -814,12 +814,6 @@ when "saveas"
puts result.message
exit -1
end
if vm["TEMPLATE/DISK[DISK_ID=\"#{disk_id}\"]/SAVE_AS"]
puts "Error: The disk #{disk_id} is already" <<
" suppossed to be saved"
exit -1
end
result = vm.save_as(disk_id.to_i, image_name)
if is_successful?(result)

View File

@ -0,0 +1,37 @@
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2011, OpenNebula Project Leads (OpenNebula.org) #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
#------------------------------------------------------------------------------
# Configuration File for File-System based Image Repositories
#------------------------------------------------------------------------------
export IMAGE_REPOSITORY_PATH=$ONE_LOCATION/var/images
#------------------------------------------------------------------------------
# Function used to generate Image names, you should not need to override this
#------------------------------------------------------------------------------
function generate_image_path {
CANONICAL_STR="`$DATE +%s`:$ID"
CANONICAL_MD5=$($MD5SUM - << EOF
$CANONICAL_STR
EOF
)
echo "$IMAGE_REPOSITORY_PATH/`echo $CANONICAL_MD5 | cut -d ' ' -f1`"
}

View File

@ -0,0 +1,70 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2011, OpenNebula Project Leads (OpenNebula.org) #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
module OpenNebula
# Generic log function
def self.log_function(severity, message)
STDERR.puts "#{severity}: #{File.basename $0}: #{message}"
end
# Logs an info message
def self.log_info(message)
log_function("INFO", message)
end
# Logs an error message
def self.log_error(message)
log_function("ERROR", message)
end
# Logs a debug message
def self.log_debug(message)
log_function("DEBUG", message)
end
# Alias log to log_info in the singleton class
class << self
alias :log :log_info
end
# This function is used to pass error message to the mad
def self.error_message(message)
STDERR.puts "ERROR MESSAGE --8<------"
STDERR.puts message
STDERR.puts "ERROR MESSAGE ------>8--"
end
# Executes a command, if it fails returns error message and exits
# If a second parameter is present it is used as the error message when
# the command fails
def self.exec_and_log(command, message=nil)
output=`#{command} 2>&1 1>/dev/null`
code=$?
if code!=0
log_error "Command \"#{command}\" failed."
log_error output
if !message
error_message output
else
error_message message
end
exit code
end
log "Executed \"#{command}\"."
end
end

View File

@ -227,16 +227,28 @@ void Nebula::start()
}
}
NebulaLog::log("ONE",Log::INFO,"Bootstraping OpenNebula database.");
NebulaLog::log("ONE",Log::INFO,"Checking database version.");
rc = check_db_version();
VirtualMachinePool::bootstrap(db);
HostPool::bootstrap(db);
VirtualNetworkPool::bootstrap(db);
UserPool::bootstrap(db);
ImagePool::bootstrap(db);
ClusterPool::bootstrap(db);
GroupPool::bootstrap(db);
VMTemplatePool::bootstrap(db);
if( rc == -1 )
{
throw runtime_error("Database version mismatch.");
}
if( rc == -2 )
{
NebulaLog::log("ONE",Log::INFO,"Bootstraping OpenNebula database.");
bootstrap();
VirtualMachinePool::bootstrap(db);
HostPool::bootstrap(db);
VirtualNetworkPool::bootstrap(db);
UserPool::bootstrap(db);
ImagePool::bootstrap(db);
ClusterPool::bootstrap(db);
GroupPool::bootstrap(db);
VMTemplatePool::bootstrap(db);
}
}
catch (exception&)
{
@ -598,3 +610,99 @@ void Nebula::start()
NebulaLog::log("ONE", Log::INFO, "All modules finalized, exiting.\n");
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void Nebula::bootstrap()
{
ostringstream oss;
oss << "CREATE TABLE pool_control (tablename VARCHAR(32) PRIMARY KEY, "
"last_oid BIGINT UNSIGNED)";
db->exec(oss);
oss.str("");
oss << "CREATE TABLE db_versioning (oid INTEGER PRIMARY KEY, "
"version INTEGER, timestamp INTEGER, comment VARCHAR(256))";
db->exec(oss);
oss.str("");
oss << "INSERT INTO db_versioning (oid, version, timestamp, comment) "
<< "VALUES (0, " << db_version() << ", " << time(0)
<< ", '" << version() << " daemon bootstrap')";
db->exec(oss);
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int Nebula::check_db_version()
{
int rc;
ostringstream oss;
int loaded_db_version = 0;
// Try to read latest version
set_callback( static_cast<Callbackable::Callback>(&Nebula::select_cb),
static_cast<void *>(&loaded_db_version) );
oss << "SELECT version FROM db_versioning "
<< "WHERE oid=(SELECT MAX(oid) FROM db_versioning)";
db->exec(oss, this);
oss.str("");
unset_callback();
if( loaded_db_version == 0 )
{
// Table user_pool is present for all OpenNebula versions, and it
// always contains at least the oneadmin user.
oss << "SELECT MAX(oid) FROM user_pool";
rc = db->exec(oss);
oss.str("");
if( rc != 0 ) // Database needs bootstrap
{
return -2;
}
}
if( db_version() != loaded_db_version )
{
oss << "Database version mismatch. "
<< "Installed " << version() << " uses DB version '" << db_version()
<< "', and existing DB version is '"
<< loaded_db_version << "'.";
NebulaLog::log("ONE",Log::ERROR,oss);
return -1;
}
return 0;
}
int Nebula::select_cb(void *_loaded_db_version, int num, char **values,
char **names)
{
istringstream iss;
int * loaded_db_version;
loaded_db_version = static_cast<int *>(_loaded_db_version);
*loaded_db_version = 0;
if ( (values[0]) && (num == 1) )
{
iss.str(values[0]);
iss >> *loaded_db_version;
}
return 0;
};

281
src/onedb/1.rb Normal file
View File

@ -0,0 +1,281 @@
# -------------------------------------------------------------------------- */
# Copyright 2002-2011, OpenNebula Project Leads (OpenNebula.org) #
# Licensed under the Apache License, Version 2.0 (the "License"); you may */
# not use this file except in compliance with the License. You may obtain */
# a copy of the License at */
# */
# http://www.apache.org/licenses/LICENSE-2.0 */
# */
# Unless required by applicable law or agreed to in writing, software */
# distributed under the License is distributed on an "AS IS" BASIS, */
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
# See the License for the specific language governing permissions and */
# limitations under the License. */
# -------------------------------------------------------------------------- */
class Migrator < MigratorBase
def initialize(db, verbose)
super(db, verbose)
@db_version = 1
@one_version = "OpenNebula 2.3.0"
end
def up
########################################################################
# Users
########################################################################
# 2.2 Schema
# CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, user_name VARCHAR(256), password TEXT,enabled INTEGER, UNIQUE(user_name));
# Move table user_pool
@db.run "ALTER TABLE user_pool RENAME TO old_user_pool;"
# Create new user_pool
@db.run "CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, name VARCHAR(256), body TEXT, UNIQUE(name));"
# Read each entry in the old user_pool, and insert into new user_pool
@db.fetch("SELECT * FROM old_user_pool") do |row|
oid = row[:oid]
name = row[:user_name]
body = "<USER><ID>#{oid}</ID><NAME>#{name}</NAME><PASSWORD>#{row[:password]}</PASSWORD><ENABLED>#{row[:enabled]}</ENABLED></USER>"
@db.run "INSERT INTO user_pool VALUES(#{oid},'#{name}','#{body}');"
end
# Delete old user_pool
@db.run "DROP TABLE old_user_pool"
########################################################################
# Clusters
########################################################################
# 2.2 Schema
# CREATE TABLE cluster_pool (oid INTEGER PRIMARY KEY, cluster_name VARCHAR(128), UNIQUE(cluster_name) );
# Move table
@db.run "ALTER TABLE cluster_pool RENAME TO old_cluster_pool;"
# Create new table
@db.run "CREATE TABLE cluster_pool (oid INTEGER PRIMARY KEY, name VARCHAR(256), body TEXT, UNIQUE(name));"
# Read each entry in the old table, and insert into new table
@db.fetch("SELECT * FROM old_cluster_pool") do |row|
oid = row[:oid]
name = row[:cluster_name]
body = "<CLUSTER><ID>#{oid}</ID><NAME>#{name}</NAME></CLUSTER>"
@db.run "INSERT INTO cluster_pool VALUES(#{oid},'#{name}','#{body}');"
end
# Delete old table
@db.run "DROP TABLE old_cluster_pool"
########################################################################
# Hosts
########################################################################
# 2.2 Schema
# CREATE TABLE host_pool (oid INTEGER PRIMARY KEY,host_name VARCHAR(256), state INTEGER,im_mad VARCHAR(128),vm_mad VARCHAR(128),tm_mad VARCHAR(128),last_mon_time INTEGER, cluster VARCHAR(128), template TEXT, UNIQUE(host_name));
# CREATE TABLE host_shares(hid INTEGER PRIMARY KEY,disk_usage INTEGER, mem_usage INTEGER, cpu_usage INTEGER,max_disk INTEGER, max_mem INTEGER, max_cpu INTEGER,free_disk INTEGER, free_mem INTEGER, free_cpu INTEGER,used_disk INTEGER, used_mem INTEGER, used_cpu INTEGER,running_vms INTEGER);
# Move table
@db.run "ALTER TABLE host_pool RENAME TO old_host_pool;"
# Create new table
@db.run "CREATE TABLE host_pool (oid INTEGER PRIMARY KEY, name VARCHAR(256), body TEXT, state INTEGER, last_mon_time INTEGER, cluster VARCHAR(128), UNIQUE(name));"
# Read each entry in the old table, and insert into new table
@db.fetch("SELECT * FROM old_host_pool") do |row|
oid = row[:oid]
name = row[:host_name]
state = row[:state]
last_mon_time = row[:last_mon_time]
cluster = row[:cluster]
# There is one host share for each host
host_share = ""
@db.fetch("SELECT * FROM host_shares WHERE hid=#{oid}") do |share|
host_share = "<HOST_SHARE><DISK_USAGE>#{share[:disk_usage]}</DISK_USAGE><MEM_USAGE>#{share[:mem_usage]}</MEM_USAGE><CPU_USAGE>#{share[:cpu_usage]}</CPU_USAGE><MAX_DISK>#{share[:max_disk]}</MAX_DISK><MAX_MEM>#{share[:max_mem]}</MAX_MEM><MAX_CPU>#{share[:max_cpu]}</MAX_CPU><FREE_DISK>#{share[:free_disk]}</FREE_DISK><FREE_MEM>#{share[:free_mem]}</FREE_MEM><FREE_CPU>#{share[:free_cpu]}</FREE_CPU><USED_DISK>#{share[:used_disk]}</USED_DISK><USED_MEM>#{share[:used_mem]}</USED_MEM><USED_CPU>#{share[:used_cpu]}</USED_CPU><RUNNING_VMS>#{share[:running_vms]}</RUNNING_VMS></HOST_SHARE>"
end
body = "<HOST><ID>#{oid}</ID><NAME>#{name}</NAME><STATE>#{state}</STATE><IM_MAD>#{row[:im_mad]}</IM_MAD><VM_MAD>#{row[:vm_mad]}</VM_MAD><TM_MAD>#{row[:tm_mad]}</TM_MAD><LAST_MON_TIME>#{last_mon_time}</LAST_MON_TIME><CLUSTER>#{cluster}</CLUSTER>#{host_share}#{row[:template]}</HOST>"
@db.run "INSERT INTO host_pool VALUES(#{oid},'#{name}','#{body}', #{state}, #{last_mon_time}, '#{cluster}');"
end
# Delete old table
@db.run "DROP TABLE old_host_pool"
@db.run "DROP TABLE host_shares"
########################################################################
# Images
########################################################################
# 2.2 Schema
# CREATE TABLE image_pool (oid INTEGER PRIMARY KEY, uid INTEGER, name VARCHAR(128), type INTEGER, public INTEGER, persistent INTEGER, regtime INTEGER, source TEXT, state INTEGER, running_vms INTEGER, template TEXT, UNIQUE(name) );
# Move table
@db.run "ALTER TABLE image_pool RENAME TO old_image_pool;"
# Create new table
@db.run "CREATE TABLE image_pool (oid INTEGER PRIMARY KEY, name VARCHAR(256), body TEXT, uid INTEGER, public INTEGER, UNIQUE(name,uid) );"
# Read each entry in the old table, and insert into new table
@db.fetch("SELECT * FROM old_image_pool") do |row|
oid = row[:oid]
name = row[:name]
uid = row[:uid]
public = row[:public]
username = get_username(uid)
# In OpenNebula 2.0 Image States go from 0 to 3, in 3.0 go
# from 0 to 5, but the meaning is the same for states 0 to 3
body = "<IMAGE><ID>#{oid}</ID><UID>#{row[:uid]}</UID><USERNAME>#{username}</USERNAME><NAME>#{name}</NAME><TYPE>#{row[:type]}</TYPE><PUBLIC>#{public}</PUBLIC><PERSISTENT>#{row[:persistent]}</PERSISTENT><REGTIME>#{row[:regtime]}</REGTIME><SOURCE>#{row[:source]}</SOURCE><STATE>#{row[:state]}</STATE><RUNNING_VMS>#{row[:running_vms]}</RUNNING_VMS>#{row[:template]}</IMAGE>"
@db.run "INSERT INTO image_pool VALUES(#{oid},'#{name}','#{body}', #{uid}, #{public});"
end
# Delete old table
@db.run "DROP TABLE old_image_pool"
########################################################################
# VMs
########################################################################
# 2.2 Schema
# CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY,uid INTEGER,name TEXT,last_poll INTEGER, state INTEGER,lcm_state INTEGER,stime INTEGER,etime INTEGER,deploy_id TEXT,memory INTEGER,cpu INTEGER,net_tx INTEGER,net_rx INTEGER, last_seq INTEGER, template TEXT);
# CREATE TABLE history (vid INTEGER,seq INTEGER,host_name TEXT,vm_dir TEXT,hid INTEGER,vm_mad TEXT,tm_mad TEXT,stime INTEGER,etime INTEGER,pstime INTEGER,petime INTEGER,rstime INTEGER,retime INTEGER,estime INTEGER,eetime INTEGER,reason INTEGER,PRIMARY KEY(vid,seq));
# Move tables
@db.run "ALTER TABLE vm_pool RENAME TO old_vm_pool;"
@db.run "ALTER TABLE history RENAME TO old_history;"
# Create new tables
@db.run "CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY, name TEXT, body TEXT, uid INTEGER, last_poll INTEGER, state INTEGER, lcm_state INTEGER);"
@db.run "CREATE TABLE history (vid INTEGER, seq INTEGER, body TEXT, PRIMARY KEY(vid,seq));"
# Read each entry in the old history table, and insert into new table
@db.fetch("SELECT * FROM old_history") do |row|
vid = row[:vid]
seq = row[:seq]
body = "<HISTORY><SEQ>#{seq}</SEQ><HOSTNAME>#{row[:host_name]}</HOSTNAME><VM_DIR>#{row[:vm_dir]}</VM_DIR><HID>#{row[:hid]}</HID><STIME>#{row[:stime]}</STIME><ETIME>#{row[:etime]}</ETIME><VMMMAD>#{row[:vm_mad]}</VMMMAD><TMMAD>#{row[:tm_mad]}</TMMAD><PSTIME>#{row[:pstime]}</PSTIME><PETIME>#{row[:petime]}</PETIME><RSTIME>#{row[:rstime]}</RSTIME><RETIME>#{row[:retime]}</RETIME><ESTIME>#{row[:estime]}</ESTIME><EETIME>#{row[:eetime]}</EETIME><REASON>#{row[:reason]}</REASON></HISTORY>"
@db.run "INSERT INTO history VALUES(#{vid},'#{seq}','#{body}');"
end
# Read each entry in the old vm table, and insert into new table
@db.fetch("SELECT * FROM old_vm_pool") do |row|
oid = row[:oid]
name = row[:name]
uid = row[:uid]
last_poll = row[:last_poll]
state = row[:state]
lcm_state = row[:lcm_state]
username = get_username(uid)
# If the VM has History items, the last one is included in the XML
history = ""
@db.fetch("SELECT body FROM history WHERE vid=#{oid} AND seq=(SELECT MAX(seq) FROM history WHERE vid=#{oid})") do |history_row|
history = history_row[:body]
end
body = "<VM><ID>#{oid}</ID><UID>#{uid}</UID><USERNAME>#{username}</USERNAME><NAME>#{name}</NAME><LAST_POLL>#{last_poll}</LAST_POLL><STATE>#{state}</STATE><LCM_STATE>#{lcm_state}</LCM_STATE><STIME>#{row[:stime]}</STIME><ETIME>#{row[:etime]}</ETIME><DEPLOY_ID>#{row[:deploy_id]}</DEPLOY_ID><MEMORY>#{row[:memory]}</MEMORY><CPU>#{row[:cpu]}</CPU><NET_TX>#{row[:net_tx]}</NET_TX><NET_RX>#{row[:net_rx]}</NET_RX>#{row[:template]}#{history}</VM>"
@db.run "INSERT INTO vm_pool VALUES(#{oid},'#{name}','#{body}', #{uid}, #{last_poll}, #{state}, #{lcm_state});"
end
# Delete old tables
@db.run "DROP TABLE old_vm_pool"
@db.run "DROP TABLE old_history"
########################################################################
# Virtual Networks
########################################################################
# 2.2 Schema
# CREATE TABLE network_pool (oid INTEGER PRIMARY KEY, uid INTEGER, name VARCHAR(256), type INTEGER, bridge TEXT, public INTEGER, template TEXT, UNIQUE(name));
# CREATE TABLE leases (oid INTEGER, ip BIGINT, mac_prefix BIGINT, mac_suffix BIGINT,vid INTEGER, used INTEGER, PRIMARY KEY(oid,ip));
# Move tables
@db.run "ALTER TABLE network_pool RENAME TO old_network_pool;"
@db.run "ALTER TABLE leases RENAME TO old_leases;"
# Create new tables
@db.run "CREATE TABLE network_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body TEXT, uid INTEGER, public INTEGER, UNIQUE(name,uid));"
@db.run "CREATE TABLE leases (oid INTEGER, ip BIGINT, body TEXT, PRIMARY KEY(oid,ip));"
# Read each entry in the old table, and insert into new table
@db.fetch("SELECT * FROM old_network_pool") do |row|
oid = row[:oid]
name = row[:name]
uid = row[:uid]
public = row[:public]
username = get_username(uid)
# <TOTAL_LEASES> is stored in the DB, but it is not used to rebuild
# the VirtualNetwork object, and it is generated each time the
# network is listed. So setting it to 0 is safe
body = "<VNET><ID>#{oid}</ID><UID>#{uid}</UID><USERNAME>#{username}</USERNAME><NAME>#{name}</NAME><TYPE>#{row[:type]}</TYPE><BRIDGE>#{row[:bridge]}</BRIDGE><PUBLIC>#{public}</PUBLIC><TOTAL_LEASES>0</TOTAL_LEASES>#{row[:template]}</VNET>"
@db.run "INSERT INTO network_pool VALUES(#{oid},'#{name}','#{body}', #{uid}, #{public});"
end
# Read each entry in the old table, and insert into new table
@db.fetch("SELECT * FROM old_leases") do |row|
oid = row[:oid]
ip = row[:ip]
body = "<LEASE><IP>#{ip}</IP><MAC_PREFIX>#{row[:mac_prefix]}</MAC_PREFIX><MAC_SUFFIX>#{row[:mac_suffix]}</MAC_SUFFIX><USED>#{row[:used]}</USED><VID>#{row[:vid]}</VID></LEASE>"
@db.run "INSERT INTO leases VALUES(#{oid}, #{ip}, '#{body}');"
end
# Delete old tables
@db.run "DROP TABLE old_network_pool"
@db.run "DROP TABLE old_leases"
########################################################################
# New tables in DB version 1
########################################################################
@db.run "CREATE TABLE db_versioning (oid INTEGER PRIMARY KEY, version INTEGER, timestamp INTEGER, comment VARCHAR(256));"
@db.run "CREATE TABLE template_pool (oid INTEGER PRIMARY KEY, name VARCHAR(256), body TEXT, uid INTEGER, public INTEGER);"
# New pool_control table contains the last_oid used, must be rebuilt
@db.run "CREATE TABLE pool_control (tablename VARCHAR(32) PRIMARY KEY, last_oid BIGINT UNSIGNED)"
for table in ["user_pool", "cluster_pool", "host_pool", "image_pool", "vm_pool", "network_pool"] do
@db.fetch("SELECT MAX(oid) FROM #{table}") do |row|
if( row[:"MAX(oid)"] != nil )
@db.run "INSERT INTO pool_control (tablename, last_oid) VALUES ('#{table}', #{row[:"MAX(oid)"]});"
end
end
end
return true
end
def get_username(uid)
username = ""
@db.fetch("SELECT name FROM user_pool WHERE oid=#{uid}") do |user|
username = user[:name]
end
return username
end
end

View File

@ -50,7 +50,8 @@ int PoolSQL::init_cb(void *nil, int num, char **values, char **names)
/* -------------------------------------------------------------------------- */
PoolSQL::PoolSQL(SqlDB * _db, const char * table): db(_db), lastOID(-1)
PoolSQL::PoolSQL(SqlDB * _db, const char * _table):
db(_db), lastOID(-1), table(_table)
{
ostringstream oss;
@ -58,7 +59,7 @@ PoolSQL::PoolSQL(SqlDB * _db, const char * table): db(_db), lastOID(-1)
set_callback(static_cast<Callbackable::Callback>(&PoolSQL::init_cb));
oss << "SELECT MAX(oid) FROM " << table;
oss << "SELECT last_oid FROM pool_control WHERE tablename='" << table <<"'";
db->exec(oss,this);
@ -129,11 +130,29 @@ int PoolSQL::allocate(
delete objsql;
if( rc != -1 )
{
update_lastOID();
}
unlock();
return rc;
}
void PoolSQL::update_lastOID()
{
// db->escape_str is not used for 'table' since its name can't be set in
// any way by the user, it is hardcoded.
ostringstream oss;
oss << "REPLACE INTO pool_control (tablename, last_oid) VALUES ("
<< "'" << table << "',"
<< lastOID << ")";
db->exec(oss);
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */

View File

@ -32,7 +32,7 @@ module OpenNebulaJSON
template = template_to_str(image_hash)
end
OpenNebula::ImageRepository.new.create(self, template)
self.allocate(template)
end
def perform_action(template_json)

View File

@ -86,43 +86,7 @@ module OpenNebulaJSON
end
def save_as(params=Hash.new)
if params['image_type']
image_type = params['image_type']
else
image_id = self["TEMPLATE/DISK[DISK_ID=\"#{params[:disk_id]}\"]/IMAGE_ID"]
if (image_id != nil)
if self["TEMPLATE/DISK[DISK_ID=\"#{disk_id}\"]/SAVE_AS"]
error_msg = "Error: The disk #{disk_id} is already" <<
" supposed to be saved"
return OpenNebula::Error.new(error_msg)
end
# Get the image type
image = OpenNebula::Image.new(
OpenNebula::Image.build_xml(image_id), @client)
result = image.info
if OpenNebula.is_error?(result)
return result
end
image_type = image.type_str
end
end
# Build the template and allocate the new Image
template = "NAME=\"#{params['image_name']}\"\n"
template << "TYPE=#{image_type}\n" if image_type
image = OpenNebula::Image.new(OpenNebula::Image.build_xml, @client)
result = image.allocate(template)
if OpenNebula.is_error?(result)
return result
end
super(params['disk_id'].to_i, image.id)
super(params['disk_id'].to_i, params['image_name'])
end
end
end

View File

@ -384,10 +384,6 @@ tr.even:hover{
font-weight:bold;
}
.info_table td.key_td:after{
content:":";
}
.info_table td.value_td{
text-align:left;
}

View File

@ -55,6 +55,13 @@ var OpenNebula = {
"ERROR",
"DISABLED"][value];
break;
case "HOST_SIMPLE","host_simple":
return ["ON",
"ON",
"ON",
"ERROR",
"OFF"][value];
break;
case "VM","vm":
return ["INIT",
"PENDING",

View File

@ -376,14 +376,14 @@ function hostElementArray(host_json){
var pb_mem =
'<div style="height:10px" class="ratiobar ui-progressbar ui-widget ui-widget-content ui-corner-all" role="progressbar" aria-valuemin="0" aria-valuemax="100" aria-valuenow="'+ratio_mem+'">\
<div class="ui-progressbar-value ui-widget-header ui-corner-left ui-corner-right" style="width: '+ratio_mem+'%;"/>\
<span style="position:relative;left:45px;top:-4px;font-size:0.6em">'+ratio_mem+'%</span>\
<span style="position:relative;left:90px;top:-4px;font-size:0.6em">'+ratio_mem+'%</span>\
</div>\
</div>';
var pb_cpu =
'<div style="height:10px" class="ratiobar ui-progressbar ui-widget ui-widget-content ui-corner-all" role="progressbar" aria-valuemin="0" aria-valuemax="100" aria-valuenow="'+ratio_cpu+'">\
<div class="ui-progressbar-value ui-widget-header ui-corner-left ui-corner-right" style="width: '+ratio_cpu+'%;"/>\
<span style="position:relative;left:45px;top:-4px;font-size:0.6em">'+ratio_cpu+'%</span>\
<span style="position:relative;left:90px;top:-4px;font-size:0.6em">'+ratio_cpu+'%</span>\
</div>\
</div>';
@ -395,7 +395,7 @@ function hostElementArray(host_json){
host.HOST_SHARE.RUNNING_VMS, //rvm
pb_cpu,
pb_mem,
OpenNebula.Helper.resource_state("host",host.STATE) ];
OpenNebula.Helper.resource_state("host_simple",host.STATE) ];
}
@ -416,7 +416,7 @@ function hostInfoListener(){
//updates the host select by refreshing the options in it
function updateHostSelect(){
hosts_select = makeSelectOptions(dataTable_hosts,1,2,7,"DISABLED");
hosts_select = makeSelectOptions(dataTable_hosts,1,2,7,"DISABLED",-1);
}
@ -661,7 +661,7 @@ $(document).ready(function(){
{ "bSortable": false, "aTargets": ["check"] },
{ "sWidth": "60px", "aTargets": [0,4] },
{ "sWidth": "35px", "aTargets": [1] },
{ "sWidth": "120px", "aTargets": [5,6] }
{ "sWidth": "200px", "aTargets": [5,6] }
]
});

View File

@ -474,7 +474,8 @@ function imageInfoListener(){
//Updates the select input field with an option for each image
function updateImageSelect(){
images_select = makeSelectOptions(dataTable_images,1,3,8,"DISABLED");
images_select =
makeSelectOptions(dataTable_images,1,3,8,"DISABLED",2);
//update static selectors:
//in the VM section
@ -499,8 +500,7 @@ function deleteImageElement(req){
function addImageElement(request, image_json){
var element = imageElementArray(image_json);
addElement(element,dataTable_images);
//NOTE that the select is not updated because newly added images
//are disabled by default
updateImageSelect();
}
// Callback to refresh the list of images

View File

@ -448,6 +448,32 @@ var create_template_tmpl = '<div id="template_create_tabs">\
</div>\
\
\
<!--custom variables -->\
<div class="vm_section" id="custom_var">\
<div class="show_hide" id="add_context_cb">\
<h3>Add custom variables <a id="add_custom_var" class="icon_left" href="#"><span class="ui-icon ui-icon-plus" /></a></h3>\
</div>\
<fieldset><legend>Custom variables</legend>\
<div class="vm_param kvm_opt xen_opt vmware_opt">\
<label for="custom_var_name">Name:</label>\
<input type="text" id="custom_var_name" name="custom_var_name" />\
<div class="tip">Name for the custom variable</div>\
</div>\
<div class="vm_param kvm_opt xen_opt">\
<label for="custom_var_value">Value:</label>\
<input type="text" id="custom_var_value" name="custom_var_value" />\
<div class="tip">Value of the custom variable</div>\
</div>\
<div class="">\
<button class="add_remove_button add_button" id="add_custom_var_button" value="add_custom_var">Add</button>\
<button class="add_remove_button" id="remove_custom_var_button" value="remove_custom_var">Remove selected</button>\
<div class="clear"></div>\
<label for="custom_var_box">Current variables:</label>\
<select id="custom_var_box" name="custom_var_box" style="width:150px;height:100px;" multiple>\
</select>\
</div>\
</fieldset>\
</div>\
<!-- submit -->\
<fieldset>\
<div class="form_buttons">\
@ -724,7 +750,8 @@ function templateInfoListener(){
//Updates the select input field with an option for each template
function updateTemplateSelect(){
templates_select = makeSelectOptions(dataTable_templates,1,3,5,"No");
templates_select =
makeSelectOptions(dataTable_templates,1,3,5,"no",2);
//update static selectors:
$('#create_vm_dialog #template_id').html(templates_select);
@ -748,8 +775,7 @@ function deleteTemplateElement(req){
function addTemplateElement(request, template_json){
var element = templateElementArray(template_json);
addElement(element,dataTable_templates);
//NOTE that the select is not updated because newly added templates
//are not public
updateTemplateSelect();
}
// Callback to refresh the list of templates
@ -1349,6 +1375,11 @@ function setupCreateTemplateDialog(){
//format hidden
$('#FORMAT',section_disks).parent().hide();
$('#FORMAT',section_disks).parent().attr("disabled","disabled");
//source hidden
$('#SOURCE',section_disks).parent().hide();
$('#SOURCE',section_disks).parent().
attr("disabled","disabled");
break;
case "fs":
//size mandatory
@ -1367,6 +1398,10 @@ function setupCreateTemplateDialog(){
$('#FORMAT',section_disks).parent().removeClass(opt_class);
$('#FORMAT',section_disks).parent().addClass(man_class);
//source hidden
$('#SOURCE',section_disks).parent().hide();
$('#SOURCE',section_disks).parent().
attr("disabled","disabled");
break;
case "block":
//size shown and optional
@ -1382,6 +1417,11 @@ function setupCreateTemplateDialog(){
//format hidden
$('#FORMAT',section_disks).parent().hide();
$('#FORMAT',section_disks).parent().attr("disabled","disabled");
//source hidden
$('#SOURCE',section_disks).parent().hide();
$('#SOURCE',section_disks).parent().
attr("disabled","disabled");
break;
case "floppy":
case "disk":
@ -1398,6 +1438,11 @@ function setupCreateTemplateDialog(){
//format optional
$('#FORMAT',section_disks).parent().hide();
$('#FORMAT',section_disks).parent().attr("disabled","disabled");
//source shown
$('#SOURCE',section_disks).parent().show();
$('#SOURCE',section_disks).parent().
removeAttr("disabled");
}
//hide_disabled(section_disks);
});
@ -1588,6 +1633,38 @@ function setupCreateTemplateDialog(){
});
};
//set up the custom variables section
var custom_variables_setup = function(){
$('fieldset',section_custom_var).hide();
$('#add_custom_var',section_custom_var).click(function(){
$('fieldset',section_custom_var).toggle();
return false;
});
$('#add_custom_var_button', section_custom_var).click(
function(){
var name = $('#custom_var_name',section_custom_var).val();
var value = $('#custom_var_value',section_custom_var).val();
if (!name.length || !value.length) {
notifyError("Custom variable name and value must be\
filled in");
return false;
}
option= '<option value=\''+value+'\' name=\''+name+'\'>'+
name+'='+value+
'</option>';
$('select#custom_var_box',section_custom_var).append(option);
return false;
});
$('#remove_custom_var_button', section_custom_var).click(
function(){
box_remove_element(section_custom_var,'#custom_var_box');
return false;
});
}
//***CREATE VM DIALOG MAIN BODY***
$('div#dialogs').append('<div title="Create VM Template" id="create_template_dialog"></div>');
@ -1625,6 +1702,7 @@ function setupCreateTemplateDialog(){
var section_context = $('#context');
var section_placement = $('#placement');
var section_raw = $('#raw');
var section_custom_var = $('#custom_var');
//Different selector for items of kvm and xen (mandatory and optional)
var items = '.vm_param input,.vm_param select';
@ -1667,12 +1745,14 @@ function setupCreateTemplateDialog(){
context_setup();
placement_setup();
raw_setup();
custom_variables_setup();
//Process form
$('button#create_template_form_easy').click(function(){
//validate form
var vm_json = {};
var name,value,boot_method;
//process capacity options
var scope = section_capacity;
@ -1722,7 +1802,6 @@ function setupCreateTemplateDialog(){
//context
scope = section_context;
var context = $('#CONTEXT',scope).val();
vm_json["CONTEXT"] = {};
$('#context_box option',scope).each(function(){
name = $(this).attr("name");
@ -1730,15 +1809,29 @@ function setupCreateTemplateDialog(){
vm_json["CONTEXT"][name]=value;
});
//placement -> fetch with value
scope = section_placement;
addSectionJSON(vm_json,scope);
//placement -> fetch with value, escape double quotes
scope = section_placement;
var requirements = $('input#REQUIREMENTS',scope).val();
requirements = escapeDoubleQuotes(requirements);
$('input#REQUIREMENTS',scope).val(requirements);
var rank = $('input#RANK',scope).val();
rank = escapeDoubleQuotes(rank);
$('input#RANK',scope).val(rank);
addSectionJSON(vm_json,scope);
//raw -> if value set type to driver and fetch
scope = section_raw;
vm_json["RAW"] = {};
addSectionJSON(vm_json["RAW"],scope);
//custom vars
scope = section_custom_var;
$('#custom_var_box option',scope).each(function(){
name = $(this).attr("name");
value = $(this).val();
vm_json[name]=value;
});
// remove empty elements
vm_json = removeEmptyObjects(vm_json);

View File

@ -308,7 +308,8 @@ function vNetworkInfoListener(){
//updates the vnet select different options
function updateNetworkSelect(){
vnetworks_select= makeSelectOptions(dataTable_vNetworks,1,3,6,"no")
vnetworks_select=
makeSelectOptions(dataTable_vNetworks,1,3,6,"no",2);
//update static selectors:
//in the VM creation dialog

View File

@ -40,7 +40,7 @@ function pretty_time(time_seconds)
var hour = pad(d.getHours(),2);
var mins = pad(d.getMinutes(),2);
var day = pad(d.getDate(),2);
var month = pad(d.getMonth(),2);
var month = pad(d.getMonth()+1,2); //getMonths returns 0-11
var year = d.getFullYear();
return hour + ":" + mins +":" + secs + "&nbsp;" + month + "/" + day + "/" + year;
@ -189,18 +189,22 @@ function notifyError(msg){
// Returns an HTML string with the json keys and values in the form
// key: value<br />
// It recursively explores objects, and flattens their contents in
// the result.
function prettyPrintJSON(template_json){
var str = ""
for (field in template_json) {
if (typeof template_json[field] == 'object'){
str += prettyPrintJSON(template_json[field]) + '<tr><td></td><td></td></tr>';
} else {
str += '<tr><td class="key_td">'+field+'</td><td class="value_td">'+template_json[field]+'</td></tr>';
};
};
return str;
// It recursively explores objects
function prettyPrintJSON(template_json,padding,weight, border_bottom){
var str = ""
if (!padding) {padding=0};
if (!weight) {weight="bold";}
if (!border_bottom) {border_bottom = "1px solid #CCCCCC";}
for (field in template_json) {
if (typeof template_json[field] == 'object'){
str += '<tr><td class="key_td" style="padding-left:'+padding+'px;font-weight:'+weight+';border-bottom:'+border_bottom+'">'+field+'</td><td class="value_td" style="border-bottom:'+border_bottom+'"></td></tr>';
str += prettyPrintJSON(template_json[field],padding+25,"normal","0") + '<tr><td class="key_td" style="padding-left:'+(padding+10)+'px"></td><td class="value_td"></td></tr>';
} else {
str += '<tr><td class="key_td" style="padding-left:'+padding+'px;font-weight:'+weight+';border-bottom:'+border_bottom+'">'+field+'</td><td class="value_td" style="border-bottom:'+border_bottom+'">'+template_json[field]+'</td></tr>';
};
};
return str;
}
//Add a listener to the check-all box of a datatable, enabling it to
@ -313,7 +317,7 @@ function waitingNodes(dataTable){
//not defined then it returns "uid UID".
//TODO not very nice to hardcode a dataTable here...
function getUserName(uid){
var user = "uid "+uid;
var user = uid;
if (typeof(dataTable_users) == "undefined") {
return user;
}
@ -382,7 +386,11 @@ function getSelectedNodes(dataTable){
//returns a HTML string with a select input code generated from
//a dataTable
function makeSelectOptions(dataTable,id_col,name_col,status_col,status_bad){
function makeSelectOptions(dataTable,
id_col,name_col,
status_col,
status_bad,
user_col){
var nodes = dataTable.fnGetData();
var select = "<option value=\"\">Please select</option>";
var array;
@ -390,13 +398,23 @@ function makeSelectOptions(dataTable,id_col,name_col,status_col,status_bad){
var id = this[id_col];
var name = this[name_col];
var status = this[status_col];
if (status != status_bad){
var user = user_col > 0 ? this[user_col] : false;
var isMine = user ? (username == user) || (uid == user) : true;
if ((status != status_bad) || isMine ){
select +='<option value="'+id+'">'+name+'</option>';
}
});
return select;
}
//Escape " in a string and return it
function escapeDoubleQuotes(string){
string = string.replace(/\\"/g,'"');
return string.replace(/"/g,'\\"');
}
//functions that used as true and false conditions for testing mainly
function True(){
return true;

View File

@ -14,8 +14,6 @@
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#include <sqlite3.h>
#include "Template.h"
#include "template_syntax.h"

View File

@ -513,7 +513,7 @@ int LibVirtDriver::deployment_description_kvm(
if ( the_filter != 0 )
{
file <<"\t\t\t<filterref filter='"<< *the_filter <<"'/>"<<endl;
file <<"\t\t\t<filterref filter='"<< *the_filter <<"'>"<<endl;
file << "\t\t\t\t<parameter name='IP' value='"
<< ip << "'/>" << endl;
file << "\t\t\t</filterref>" << endl;

View File

@ -166,11 +166,6 @@ int LibVirtDriver::deployment_description_vmware(
get_default("DISK","DRIVER",default_driver);
if (default_driver.empty())
{
default_driver = "raw";
}
num = vm->get_template_attribute("DISK",attrs);
if (num!=0)
@ -252,7 +247,10 @@ int LibVirtDriver::deployment_description_vmware(
}
else
{
file << default_driver << "'/>" << endl;
if (!default_driver.empty())
{
file << default_driver << "'/>" << endl;
}
}
if (readonly)

View File

@ -36,4 +36,4 @@ FEATURES = [ PAE = "no", ACPI = "yes" ]
DISK = [ driver = "raw" , cache = "default"]
NIC = [ filter = "clean-traffic" ]
#NIC = [ filter = "clean-traffic" ]

View File

@ -13,8 +13,3 @@
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
# To change the directory where the VMM scripts are copied on the remote node
# uncomment and change the path of VMM_REMOTE_DIR
#
#VMM_REMOTE_DIR=/tmp/ne_im_scripts