1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-03-25 02:50:08 +03:00

Merge branch 'master' of git.opennebula.org:one

This commit is contained in:
Ruben S. Montero 2011-09-16 18:49:31 +02:00
commit cbb3f33f6c
8 changed files with 927 additions and 7 deletions

View File

@ -589,7 +589,7 @@ public:
Image * img;
VectorAttribute * disk;
int oid_0, oid_1, index;
int oid_0, oid_1, index, img_id;
string value;
Image::ImageType img_type;
@ -625,7 +625,7 @@ public:
disk = new VectorAttribute("DISK");
disk->replace("IMAGE_ID", "0");
((ImagePool*)imp)->disk_attribute(disk, 0, &index, &img_type,0);
((ImagePool*)imp)->disk_attribute(disk, 0, &index, &img_type,0, img_id);
value = "";
value = disk->vector_value("TARGET");
@ -639,7 +639,7 @@ public:
disk = new VectorAttribute("DISK");
disk->replace("IMAGE_ID", "1");
((ImagePool*)imp)->disk_attribute(disk, 0, &index, &img_type,0);
((ImagePool*)imp)->disk_attribute(disk, 0, &index, &img_type,0, img_id);
value = "";
value = disk->vector_value("TARGET");

View File

@ -257,8 +257,8 @@ module Migrator
public = row[:public]
total_leases = 0
@db.fetch("SELECT COUNT (ip) FROM old_leases WHERE (oid=#{oid} AND used=1)") do |r|
total_leases = r[:"COUNT (ip)"]
@db.fetch("SELECT COUNT(ip) FROM old_leases WHERE (oid=#{oid} AND used=1)") do |r|
total_leases = r[:"COUNT(ip)"]
end

View File

@ -177,7 +177,7 @@ class BackEndMySQL < OneDBBacKEnd
end
create_cmd = mysql_cmd+"-e 'CREATE DATABASE IF NOT EXISTS #{@db_name};'"
rc = system(create_cnd)
rc = system(create_cmd)
if !rc
raise "Error creating MySQL DB #{@db_name} at #{@server}."
end

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,433 @@
#*******************************************************************************
# OpenNebula Configuration file
#*******************************************************************************
#*******************************************************************************
# Daemon configuration attributes
#-------------------------------------------------------------------------------
# MANAGER_TIMER: Time in seconds the core uses to evaluate periodical functions.
# HOST_MONITORING_INTERVAL and VM_POLLING_INTERVAL can not have smaller values
# than MANAGER_TIMER.
#
# HOST_MONITORING_INTERVAL: Time in seconds between host monitorization.
#
# VM_POLLING_INTERVAL: Time in seconds between virtual machine monitorization.
# (use 0 to disable VM monitoring).
#
# VM_DIR: Remote path to store the VM images, it should be shared between all
# the cluster nodes to perform live migrations. This variable is the default
# for all the hosts in the cluster. VM_DIR IS ONLY FOR THE NODES AND *NOT* THE
# FRONT-END
#
# SCRIPTS_REMOTE_DIR: Remote path to store the monitoring and VM management
# scripts.
#
# PORT: Port where oned will listen for xmlrpc calls.
#
# DB: Configuration attributes for the database backend
# backend : can be sqlite or mysql (default is sqlite)
# server : (mysql) host name or an IP address for the MySQL server
# port : (mysql) port for the connection to the server.
# If set to 0, the default port is used.
# user : (mysql) user's MySQL login ID
# passwd : (mysql) the password for user
# db_name : (mysql) the database name
#
# VNC_BASE_PORT: VNC ports for VMs can be automatically set to VNC_BASE_PORT +
# VMID
#
# DEBUG_LEVEL: 0 = ERROR, 1 = WARNING, 2 = INFO, 3 = DEBUG
#*******************************************************************************
#MANAGER_TIMER=30
HOST_MONITORING_INTERVAL = 1
VM_POLLING_INTERVAL = 1
#VM_DIR=/srv/cloud/one/var
SCRIPTS_REMOTE_DIR=/var/tmp/one
PORT=2889
#DB = [ backend = "sqlite" ]
# Sample configuration for MySQL
DB = [ backend = "mysql",
server = "localhost",
port = 0,
user = "oneadmin",
passwd = "oneadmin",
db_name = "onedb_test" ]
VNC_BASE_PORT = 5900
DEBUG_LEVEL=3
#*******************************************************************************
# Physical Networks configuration
#*******************************************************************************
# NETWORK_SIZE: Here you can define the default size for the virtual networks
#
# MAC_PREFIX: Default MAC prefix to be used to create the auto-generated MAC
# addresses is defined here (this can be overrided by the Virtual Network
# template)
#*******************************************************************************
NETWORK_SIZE = 254
MAC_PREFIX = "02:00"
#*******************************************************************************
# Image Repository Configuration
#*******************************************************************************
# DEFAULT_IMAGE_TYPE: This can take values
# OS Image file holding an operating system
# CDROM Image file holding a CDROM
# DATABLOCK Image file holding a datablock,
# always created as an empty block
# DEFAULT_DEVICE_PREFIX: This can be set to
# hd IDE prefix
# sd SCSI
# xvd XEN Virtual Disk
# vd KVM virtual disk
#*******************************************************************************
DEFAULT_IMAGE_TYPE = "OS"
DEFAULT_DEVICE_PREFIX = "hd"
#*******************************************************************************
# Information Driver Configuration
#*******************************************************************************
# You can add more information managers with different configurations but make
# sure it has different names.
#
# name : name for this information manager
#
# executable: path of the information driver executable, can be an
# absolute path or relative to $ONE_LOCATION/lib/mads (or
# /usr/lib/one/mads/ if OpenNebula was installed in /)
#
# arguments : for the driver executable, usually a probe configuration file,
# can be an absolute path or relative to $ONE_LOCATION/etc (or
# /etc/one/ if OpenNebula was installed in /)
#*******************************************************************************
#-------------------------------------------------------------------------------
# KVM Information Driver Manager Configuration
# -r number of retries when monitoring a host
# -t number of threads, i.e. number of hosts monitored at the same time
#-------------------------------------------------------------------------------
#IM_MAD = [
# name = "im_kvm",
# executable = "one_im_ssh",
# arguments = "-r 0 -t 15 kvm" ]
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# XEN Information Driver Manager Configuration
# -r number of retries when monitoring a host
# -t number of threads, i.e. number of hosts monitored at the same time
#-------------------------------------------------------------------------------
#IM_MAD = [
# name = "im_xen",
# executable = "one_im_ssh",
# arguments = "xen" ]
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# EC2 Information Driver Manager Configuration
#-------------------------------------------------------------------------------
#IM_MAD = [
# name = "im_ec2",
# executable = "one_im_ec2",
# arguments = "im_ec2/im_ec2.conf" ]
#-------------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Ganglia Information Driver Manager Configuration
#-----------------------------------------------------------------------------
#IM_MAD = [
# name = "im_ganglia",
# executable = "one_im_sh",
# arguments = "ganglia" ]
#-----------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Dummy Information Driver Manager Configuration
#-------------------------------------------------------------------------------
IM_MAD = [ name="im_dummy", executable="one_im_dummy"]
#-------------------------------------------------------------------------------
#*******************************************************************************
# Virtualization Driver Configuration
#*******************************************************************************
# You can add more virtualization managers with different configurations but
# make sure it has different names.
#
# name : name of the virtual machine manager driver
#
# executable: path of the virtualization driver executable, can be an
# absolute path or relative to $ONE_LOCATION/lib/mads (or
# /usr/lib/one/mads/ if OpenNebula was installed in /)
#
# arguments : for the driver executable
#
# default : default values and configuration parameters for the driver, can
# be an absolute path or relative to $ONE_LOCATION/etc (or
# /etc/one/ if OpenNebula was installed in /)
#
# type : driver type, supported drivers: xen, kvm, xml
#*******************************************************************************
#-------------------------------------------------------------------------------
# KVM Virtualization Driver Manager Configuration
# -r number of retries when monitoring a host
# -t number of threads, i.e. number of hosts monitored at the same time
# -p name of the poll probe (executed locally)
#-------------------------------------------------------------------------------
#VM_MAD = [
# name = "vmm_kvm",
# executable = "one_vmm_ssh",
# arguments = "-t 15 -r 0 kvm",
# default = "vmm_ssh/vmm_ssh_kvm.conf",
# type = "kvm" ]
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# XEN Virtualization Driver Manager Configuration
# -r number of retries when monitoring a host
# -t number of threads, i.e. number of hosts monitored at the same time
# -l do not perform the VM polling in the node
# -p name of the poll probe (executed locally)
#-------------------------------------------------------------------------------
#VM_MAD = [
# name = "vmm_xen",
# executable = "one_vmm_ssh",
# arguments = "xen",
# default = "vmm_ssh/vmm_ssh_xen.conf",
# type = "xen" ]
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# EC2 Virtualization Driver Manager Configuration
# arguments: default values for the EC2 driver, can be an absolute path or
# relative to $ONE_LOCATION/etc (or /etc/one/ if OpenNebula was
# installed in /).
#-------------------------------------------------------------------------------
#VM_MAD = [
# name = "vmm_ec2",
# executable = "one_vmm_ec2",
# arguments = "vmm_ec2/vmm_ec2.conf",
# type = "xml" ]
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Dummy Virtualization Driver Configuration
#-------------------------------------------------------------------------------
VM_MAD = [ name="vmm_dummy", executable="one_vmm_dummy", type="xml" ]
#-------------------------------------------------------------------------------
#*******************************************************************************
# Transfer Manager Driver Configuration
#*******************************************************************************
# You can add more transfer managers with different configurations but make
# sure it has different names.
# name : name for this transfer driver
#
# executable: path of the transfer driver executable, can be an
# absolute path or relative to $ONE_LOCATION/lib/mads (or
# /usr/lib/one/mads/ if OpenNebula was installed in /)
#
# arguments : for the driver executable, usually a commands configuration file
# , can be an absolute path or relative to $ONE_LOCATION/etc (or
# /etc/one/ if OpenNebula was installed in /)
#*******************************************************************************
#-------------------------------------------------------------------------------
# SHARED Transfer Manager Driver Configuration
#-------------------------------------------------------------------------------
#TM_MAD = [
# name = "tm_shared",
# executable = "one_tm",
# arguments = "tm_shared/tm_shared.conf" ]
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# SSH Transfer Manager Driver Configuration
#-------------------------------------------------------------------------------
#TM_MAD = [
# name = "tm_ssh",
# executable = "one_tm",
# arguments = "tm_ssh/tm_ssh.conf" ]
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Dummy Transfer Manager Driver Configuration
#-------------------------------------------------------------------------------
TM_MAD = [
name = "tm_dummy",
executable = "one_tm",
arguments = "tm_dummy/tm_dummy.conf" ]
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# LVM Transfer Manager Driver Configuration
#-------------------------------------------------------------------------------
#TM_MAD = [
# name = "tm_lvm",
# executable = "one_tm",
# arguments = "tm_lvm/tm_lvm.conf" ]
#-------------------------------------------------------------------------------
#*******************************************************************************
# Image Manager Driver Configuration
#*******************************************************************************
# Drivers to manage the image repository, specialized for the storage backend
# executable: path of the transfer driver executable, can be an
# absolute path or relative to $ONE_LOCATION/lib/mads (or
# /usr/lib/one/mads/ if OpenNebula was installed in /)
#
# arguments : for the driver executable
#*******************************************************************************
#-------------------------------------------------------------------------------
# FS based Image Manager Driver Configuration
# -t number of threads, i.e. number of repo operations at the same time
#-------------------------------------------------------------------------------
IMAGE_MAD = [
executable = "one_image",
arguments = "fs -t 15" ]
#-------------------------------------------------------------------------------
#*******************************************************************************
# Hook Manager Configuration
#*******************************************************************************
# The Driver (HM_MAD), used to execute the Hooks
# executable: path of the hook driver executable, can be an
# absolute path or relative to $ONE_LOCATION/lib/mads (or
# /usr/lib/one/mads/ if OpenNebula was installed in /)
#
# arguments : for the driver executable, can be an absolute path or relative
# to $ONE_LOCATION/etc (or /etc/one/ if OpenNebula was installed
# in /)
#
# Virtual Machine Hooks (VM_HOOK) defined by:
# name : for the hook, useful to track the hook (OPTIONAL)
# on : when the hook should be executed,
# - CREATE, when the VM is created (onevm create)
# - RUNNING, after the VM is successfully booted
# - SHUTDOWN, after the VM is shutdown
# - STOP, after the VM is stopped (including VM image transfers)
# - DONE, after the VM is deleted or shutdown
# - FAILED, when the VM enters the failed state
# command : path can be absolute or relative to $ONE_LOCATION/share/hooks
# case of self-contained installation or relative to
# /usr/share/one/hooks in case of system-wide installation
# arguments : for the hook. You can access to VM information with $
# - $VMID, the ID of the virtual machine
# - $TEMPLATE, the VM template in xml and base64 encoded
# remote : values,
# - YES, The hook is executed in the host where the VM was
# allocated
# - NO, The hook is executed in the OpenNebula server (default)
#
#
# Host Hooks (HOST_HOOK) defined by:
# name : for the hook, useful to track the hook (OPTIONAL)
# on : when the hook should be executed,
# - CREATE, when the Host is created (onehost create)
# - ERROR, when the Host enters the error state
# - DISABLE, when the Host is disabled
# command : path can be absolute or relative to $ONE_LOCATION/share/hooks
# case of self-contained installation or relative to
# /usr/share/one/hooks in case of system-wide installation
# arguments : for the hook. You can use the following Host information:
# - $HID, the ID of the host
# - $TEMPLATE, the Host template in xml and base64 encoded
# remote : values,
# - YES, The hook is executed in the host
# - NO, The hook is executed in the OpenNebula server (default)
#-------------------------------------------------------------------------------
HM_MAD = [
executable = "one_hm" ]
#-------------------------------------------------------------------------------
#------------------------------ Fault Tolerance Hooks --------------------------
# This hook is used to perform recovery actions when a host fails. The VMs
# running in the host can be deleted (use -d option) or resubmitted (-r) in
# other host
# Last argument (force) can be "y", so suspended VMs in the host will be
# resubmitted/deleted, or "n", so suspended VMs in the host will be ignored
#
#HOST_HOOK = [
# name = "error",
# on = "ERROR",
# command = "host_error.rb",
# arguments = "$HID -r n",
# remote = "no" ]
#-------------------------------------------------------------------------------
# This two hooks can be used to automatically delete or resubmit VMs that reach
# the "failed" state. This way, the administrator doesn't have to interact
# manually to release its resources or retry the deployment.
#
# Only one of them should be uncommented.
#-------------------------------------------------------------------------------
#
#VM_HOOK = [
# name = "on_failure_delete",
# on = "FAILED",
# command = "/usr/bin/env onevm delete",
# arguments = "$VMID" ]
#
#VM_HOOK = [
# name = "on_failure_resubmit",
# on = "FAILED",
# command = "/usr/bin/env onevm resubmit",
# arguments = "$VMID" ]
#-------------------------------------------------------------------------------
#-------------------------------- ebtables Hook---------------------------------
# You can use these two hooks to isolate networks at the ethernet level so the
# traffic generated in different virtual networks can not be seen in others.
#
# All the network configuration will be done in the cluster nodes, these are the
# additional requisites:
# - ebtables package installed
# - sudoers configured so oneadmin can execute ebtables without password
#
# NOTE: Change the first command for ebtables-xen if you are using Xen
#
#VM_HOOK = [
# name = "ebtables-start",
# on = "running",
# command = "ebtables-kvm", # or ebtables-xen
# arguments = "one-$VMID",
# remote = "yes" ]
#
#VM_HOOK = [
# name = "ebtables-flush",
# on = "done",
# command = "ebtables-flush",
# arguments = "",
# remote = "yes" ]
#-------------------------------------------------------------------------------
#*******************************************************************************
# Auth Manager Configuration
#*******************************************************************************
# The Driver (AUTHM_MAD) that will be used to authenticate and authorize
# OpenNebula requests. If not defined OpenNebula will use the built-in auth
# policies
# executable: path of the auth driver executable, can be an
# absolute path or relative to $ONE_LOCATION/lib/mads (or
# /usr/lib/one/mads/ if OpenNebula was installed in /)
#
# arguments : for the driver executable, can be an absolute path or relative
# to $ONE_LOCATION/etc (or /etc/one/ if OpenNebula was installed
# in /)
#-------------------------------------------------------------------------------
#AUTH_MAD = [
# executable = "one_auth_mad" ]

173
src/onedb/test/test_mysql.sh Executable file
View File

@ -0,0 +1,173 @@
#!/bin/bash
################################################################################
# Initialization
################################################################################
if [ -z $ONE_LOCATION ]; then
echo "ONE_LOCATION not defined."
exit -1
fi
ONEDCONF_LOCATION="$ONE_LOCATION/etc/oned.conf"
if [ -f $ONEDCONF_LOCATION ]; then
echo "$ONEDCONF_LOCATION has to be overwritten, move it to a safe place."
exit -1
fi
VAR_LOCATION="$ONE_LOCATION/var"
# Clean DB
mysql -u oneadmin -poneadmin -h localhost -P 0 -e "DROP DATABASE IF EXISTS onedb_test;"
cp oned_mysql.conf $ONEDCONF_LOCATION
echo "oneadmin:oneadmin" > oneadmin_auth
export ONE_XMLRPC=http://localhost:2889/RPC2
export PATH=$ONE_LOCATION/bin:$PATH
export ONE_AUTH="`pwd`/oneadmin_auth"
PID=$$
mkdir results
################################################################################
# Start OpenNebula and populate a DB
################################################################################
echo "Starting oned, some resources will be created"
oned -f &
sleep 2s;
./create.sh
pkill -P $PID oned
sleep 2s;
pkill -9 -P $PID oned
################################################################################
# Upgrade the 2.2 sample DB, and compare schemas
################################################################################
echo "All resources created, now 2.2 DB will be upgraded."
# dump current DB and schema
onedb backup results/mysqldb.3.0
mysqldump -u oneadmin -poneadmin -h localhost -P 0 --no-data onedb_test > results/mysqldb.3.0.tmpschema
# restore 2.2
onedb restore -f 2.2/mysqldb.sql
# upgrade
onedb upgrade -v --backup results/mysqldb.backup
# dump upgraded DB schema
mysqldump -u oneadmin -poneadmin -h localhost -P 0 --no-data onedb_test > results/mysqldb.upgraded.tmpschema
echo "Done. Upgraded DB and the one just created will be compared."
# Sort the files contents, to avoid false diff errors
sort results/mysqldb.upgraded.tmpschema > results/mysqldb.upgraded.schema
sort results/mysqldb.3.0.tmpschema > results/mysqldb.3.0.schema
rm results/mysqldb.upgraded.tmpschema
rm results/mysqldb.3.0.tmpschema
# Perform a diff
FILE=results/schema.diff
diff <(grep -v -e "Dump completed on" results/mysqldb.upgraded.schema) <(grep -v -e "Dump completed on" results/mysqldb.3.0.schema) > $FILE
if [[ -s $FILE ]] ; then
echo "Error: Schemas do not match. Check file $FILE"
exit -1
fi
################################################################################
# Start oned using the upgraded DB and compare objects XMLs
################################################################################
echo "Schemas match. OpenNebula 3.0 will be started with the upgraded 2.2 DB."
oned -f &
sleep 2s;
for obj in host vnet image vm user; do
for i in 0 1 2 3 4; do
one$obj show -x $i > results/xml_files/$obj-$i-upgraded.xml
done
done
for obj in host vnet image vm acl group user; do
one$obj list a -x > results/xml_files/$obj-pool-upgraded.xml
done
pkill -P $PID oned
sleep 2s;
pkill -9 -P $PID oned
echo "XML output collected. A diff will be performed."
mkdir results/diff_files
diff <(grep -v -e "<LAST_MON_TIME>" -e "<CLUSTER>" -e "NAME>" results/xml_files/host-pool.xml) <(grep -v -e "<LAST_MON_TIME>" -e "<CLUSTER>" -e "NAME>" results/xml_files/host-pool-upgraded.xml) > results/diff_files/host-pool.diff
# TODO: fix
# The image-pool.xml files are the same, but for some reason the Images are
# returned in different order.
#diff <(grep -v -e "<REGTIME>" -e "<SOURCE>" -e "<SIZE>" results/xml_files/image-pool.xml) <(grep -v -e "<REGTIME>" -e "<SOURCE>" -e "<SIZE>" results/xml_files/image-pool-upgraded.xml) > results/diff_files/image-pool.diff
diff <(grep -v -e "<LAST_POLL>" -e "TIME>" -e "<SOURCE>" -e "<TEMPLATE_ID>" -e "<VM_DIR>" results/xml_files/vm-pool.xml) <(grep -v -e "<LAST_POLL>" -e "TIME>" -e "<SOURCE>" -e "<TEMPLATE_ID>" -e "<VM_DIR>" results/xml_files/vm-pool-upgraded.xml) > results/diff_files/vm-pool.diff
for obj in vnet acl group user; do
diff <(cat results/xml_files/$obj-pool.xml) <(cat results/xml_files/$obj-pool-upgraded.xml) > results/diff_files/$obj-pool.diff
done
for i in 0 1 2 3 4; do
diff <(grep -v -e "<LAST_MON_TIME>" -e "<CLUSTER>" -e "NAME>" results/xml_files/host-$i.xml) <(grep -v -e "<LAST_MON_TIME>" -e "<CLUSTER>" -e "NAME>" results/xml_files/host-$i-upgraded.xml) > results/diff_files/host-$i.diff
diff <(cat results/xml_files/vnet-$i.xml) <(cat results/xml_files/vnet-$i-upgraded.xml) > results/diff_files/vnet-$i.diff
diff <(grep -v -e "<REGTIME>" -e "<SOURCE>" -e "<SIZE>" results/xml_files/image-$i.xml) <(grep -v -e "<REGTIME>" -e "<SOURCE>" -e "<SIZE>" results/xml_files/image-$i-upgraded.xml) > results/diff_files/image-$i.diff
diff <(grep -v -e "<LAST_POLL>" -e "TIME>" -e "<SOURCE>" -e "<TEMPLATE_ID>" -e "<VM_DIR>" -e "<NET_TX>" results/xml_files/vm-$i.xml) <(grep -v -e "<LAST_POLL>" -e "TIME>" -e "<SOURCE>" -e "<TEMPLATE_ID>" -e "<VM_DIR>" -e "<NET_TX>" results/xml_files/vm-$i-upgraded.xml) > results/diff_files/vm-$i.diff
diff <(cat results/xml_files/user-$i.xml) <(cat results/xml_files/user-$i-upgraded.xml) > results/diff_files/user-$i.diff
done
CODE=0
for obj in host vnet image vm user; do
for i in 0 1 2 3 4; do
FILE=results/diff_files/$obj-$i.diff
if [[ -s $FILE ]] ; then
echo "Error: diff file $FILE is not empty."
CODE=-1
fi
done
done
for obj in host vnet image vm acl group user; do
FILE=results/diff_files/$obj-pool.diff
if [[ -s $FILE ]] ; then
echo "Error: diff file $FILE is not empty."
CODE=-1
fi
done
if [ $CODE -eq 0 ]; then
echo "Done, all tests passed."
fi
rm oneadmin_auth
exit $CODE

View File

@ -24,7 +24,7 @@ if [ -f $VAR_LOCATION/one.db ]; then
fi
cp oned.conf $ONEDCONF_LOCATION
cp oned_sqlite.conf $ONEDCONF_LOCATION
echo "oneadmin:oneadmin" > oneadmin_auth