1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-03-22 18:50:08 +03:00

Merge branch 'feature-1112' of git.opennebula.org:one into feature-1112

This commit is contained in:
Ruben S. Montero 2012-03-05 23:49:42 +01:00
commit 13bee9d9da
26 changed files with 436 additions and 253 deletions

View File

@ -91,6 +91,13 @@ public:
*/
int disk_attribute(VectorAttribute * disk);
/**
* Replace template for this object. Object should be updated
* after calling this method
* @param tmpl string representation of the template
*/
int replace_template(const string& tmpl_str, string& error);
private:
// -------------------------------------------------------------------------

View File

@ -373,7 +373,7 @@ public:
* after calling this method
* @param tmpl string representation of the template
*/
int replace_template(const string& tmpl_str, string& error);
virtual int replace_template(const string& tmpl_str, string& error);
/**

View File

@ -877,6 +877,14 @@ private:
*/
int parse_requirements(string& error_str);
/**
* Adds automatic placement requirements: Datastore and Cluster
*
* @param error_str Returns the error reason, if any
* @return 0 on success
*/
int automatic_requirements(string& error_str);
/**
* Parse the "GRAPHICS" attribute and generates a default PORT if not
* defined

View File

@ -27,6 +27,7 @@
#include "UserPool.h"
#include "VMTemplatePool.h"
#include "DatastorePool.h"
#include "ClusterPool.h"
#include "VirtualMachineManager.h"
#include "LifeCycleManager.h"
@ -46,7 +47,8 @@ protected:
NebulaTest():mysql(false), need_host_pool(false), need_vm_pool(false),
need_vnet_pool(false), need_image_pool(false),
need_user_pool(false), need_template_pool(false),
need_group_pool(false),
need_group_pool(false), need_datastore_pool(false),
need_cluster_pool(false),
need_vmm(false),
need_im(false), need_tm(false),
need_lcm(false), need_dm(false),
@ -69,6 +71,7 @@ public:
bool need_template_pool;
bool need_group_pool;
bool need_datastore_pool;
bool need_cluster_pool;
bool need_vmm;
bool need_im;
@ -111,6 +114,8 @@ public:
virtual DatastorePool* create_dspool(SqlDB* db);
virtual ClusterPool* create_clpool(SqlDB* db);
// ------------------------------------------------------------------------
// Managers
// ------------------------------------------------------------------------

View File

@ -529,8 +529,6 @@ INSTALL_ETC_FILES=(
VMWARE_ETC_FILES:$ETC_LOCATION
VMM_EC2_ETC_FILES:$ETC_LOCATION/vmm_ec2
VMM_EXEC_ETC_FILES:$ETC_LOCATION/vmm_exec
DATASTORE_DRIVER_FS_ETC_FILES:$ETC_LOCATION/datastore/
DATASTORE_DRIVER_VMWARE_ETC_FILES:$ETC_LOCATION/datastore/
IM_EC2_ETC_FILES:$ETC_LOCATION/im_ec2
TM_LVM_ETC_FILES:$ETC_LOCATION/tm/
HM_ETC_FILES:$ETC_LOCATION/hm
@ -808,21 +806,15 @@ TM_VMWARE_FILES="src/tm_mad/vmware/clone \
# - VMware based Image Repository, $REMOTES_LOCATION/datastore/vmware
#-------------------------------------------------------------------------------
DATASTORE_DRIVER_FS_ETC_FILES="src/datastore_mad/remotes/fs/fs.conf"
DATASTORE_DRIVER_VMWARE_ETC_FILES="src/datastore_mad/remotes/vmware/vmware.conf"
DATASTORE_DRIVER_COMMON_SCRIPTS="src/datastore_mad/remotes/xpath.rb \
src/datastore_mad/remotes/libfs.sh"
DATASTORE_DRIVER_FS_SCRIPTS="src/datastore_mad/remotes/fs/cp \
src/datastore_mad/remotes/fs/mkfs \
src/datastore_mad/remotes/fs/mv \
src/datastore_mad/remotes/fs/rm"
DATASTORE_DRIVER_VMWARE_SCRIPTS="src/datastore_mad/remotes/vmware/cp \
src/datastore_mad/remotes/vmware/mkfs \
src/datastore_mad/remotes/vmware/mv \
src/datastore_mad/remotes/vmware/rm"
#-------------------------------------------------------------------------------

View File

@ -71,6 +71,7 @@ class OneDatastoreHelper < OpenNebulaHelper::OneHelper
puts str % ["CLUSTER", datastore['CLUSTER']]
puts str % ["TYPE", datastore['TYPE']]
puts str % ["TM_MAD", datastore['TM_MAD']]
puts str % ["BASE PATH",datastore['BASE_PATH']]
puts

View File

@ -72,6 +72,14 @@ int Datastore::disk_attribute(VectorAttribute * disk)
disk->replace("DATASTORE_ID", oss.str());
disk->replace("TM_MAD", get_tm_mad());
if ( get_cluster_id() != ClusterPool::NONE_CLUSTER_ID )
{
oss.str("");
oss << get_cluster_id();
disk->replace("CLUSTER_ID", oss.str());
}
return 0;
}
@ -96,14 +104,14 @@ int Datastore::insert(SqlDB *db, string& error_str)
erase_template_attribute("NAME", name);
// NAME is checked in DatastorePool::allocate
erase_template_attribute("TYPE", type);
get_template_attribute("TYPE", type);
if ( type.empty() == true )
{
goto error_type;
}
erase_template_attribute("TM_MAD", tm_mad);
get_template_attribute("TM_MAD", tm_mad);
if ( tm_mad.empty() == true )
{
@ -316,6 +324,40 @@ int Datastore::from_xml(const string& xml)
return 0;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int Datastore::replace_template(const string& tmpl_str, string& error)
{
string new_type;
string new_tm_mad;
int rc;
rc = PoolObjectSQL::replace_template(tmpl_str, error);
if ( rc != 0 )
{
return rc;
}
get_template_attribute("TYPE", new_type);
if ( !new_type.empty() )
{
type = new_type;
}
get_template_attribute("TM_MAD", new_tm_mad);
if ( !new_tm_mad.empty() )
{
tm_mad = new_tm_mad;
}
return 0;
}
/* ------------------------------------------------------------------------ */
/* ------------------------------------------------------------------------ */

View File

@ -39,10 +39,25 @@ source ${DRIVER_PATH}/../libfs.sh
DRV_ACTION=$1
ID=$2
set_up_datastore $DRV_ACTION
XPATH="${DRIVER_PATH}/../xpath.rb -b $DRV_ACTION"
SRC=`$XPATH /DS_DRIVER_ACTION_DATA/IMAGE/PATH`
unset i XPATH_ELEMENTS
while IFS= read -r -d '' element; do
XPATH_ELEMENTS[i++]="$element"
done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/RESTRICTED_DIRS \
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/SAFE_DIRS \
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/UMASK \
/DS_DRIVER_ACTION_DATA/IMAGE/PATH)
BASE_PATH="${XPATH_ELEMENTS[0]}"
RESTRICTED_DIRS="${XPATH_ELEMENTS[1]}"
SAFE_DIRS="${XPATH_ELEMENTS[2]}"
UMASK="${XPATH_ELEMENTS[3]}"
SRC="${XPATH_ELEMENTS[4]}"
set_up_datastore "$BASE_PATH" "$RESTRICTED_DIRS" "$SAFE_DIRS" "$UMASK"
DST=`generate_image_path`
@ -53,8 +68,6 @@ http://*)
log "Downloading $SRC to the image repository"
exec_and_log "$WGET -O $DST $SRC" "Error downloading $SRC"
exec_and_log "chmod 0660 $DST"
;;
*)
@ -67,8 +80,6 @@ http://*)
log "Copying local image $SRC to the image repository"
exec_and_log "cp -f $SRC $DST" "Error copying $SRC to $DST"
exec_and_log "chmod 0660 $DST"
;;
esac

View File

@ -39,20 +39,32 @@ source ${DRIVER_PATH}/../libfs.sh
DRV_ACTION=$1
ID=$2
set_up_datastore $DRV_ACTION
XPATH="${DRIVER_PATH}/../xpath.rb -b $DRV_ACTION"
unset i XPATH_ELEMENTS
while IFS= read -r -d '' element; do
XPATH_ELEMENTS[i++]="$element"
done < <($XPATH /DS_DRIVER_ACTION_DATA/IMAGE/FSTYPE \
done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/RESTRICTED_DIRS \
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/SAFE_DIRS \
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/UMASK \
/DS_DRIVER_ACTION_DATA/IMAGE/PATH \
/DS_DRIVER_ACTION_DATA/IMAGE/FSTYPE \
/DS_DRIVER_ACTION_DATA/IMAGE/SIZE)
FSTYPE="${XPATH_ELEMENTS[0]}"
SIZE="${XPATH_ELEMENTS[1]}"
BASE_PATH="${XPATH_ELEMENTS[0]}"
RESTRICTED_DIRS="${XPATH_ELEMENTS[1]}"
SAFE_DIRS="${XPATH_ELEMENTS[2]}"
UMASK="${XPATH_ELEMENTS[3]}"
SRC="${XPATH_ELEMENTS[4]}"
FSTYPE="${XPATH_ELEMENTS[5]}"
SIZE="${XPATH_ELEMENTS[6]}"
set_up_datastore "$BASE_PATH" "$RESTRICTED_DIRS" "$SAFE_DIRS" "$UMASK"
DST=`generate_image_path`
# ------------ Create the image to the repository ------------
MKFS_CMD=`mkfs_command $DST $FSTYPE`
@ -61,7 +73,6 @@ exec_and_log "$DD if=/dev/zero of=$DST bs=1 count=1 seek=${SIZE}M" \
"Could not create image $DST"
exec_and_log "$MKFS_CMD" \
"Unable to create filesystem $FSTYPE in $DST"
exec_and_log "chmod 0660 $DST"
# ---------------- Get the size of the image ------------
SIZE=`fs_du $DST`

View File

@ -1,75 +0,0 @@
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
###############################################################################
# This script is used to move a VM image (SRC) to the image repository as DST
# Several SRC types are supported
###############################################################################
# ------------ Set up the environment to source common tools ------------
if [ -z "${ONE_LOCATION}" ]; then
LIB_LOCATION=/usr/lib/one
else
LIB_LOCATION=$ONE_LOCATION/lib
fi
. $LIB_LOCATION/sh/scripts_common.sh
source $(dirname $0)/fsrc
SRC=$1
DST=$2
ID=$3
# ------------ Generate a filename for the image ------------
if [ "$DST" = "-" ] ; then
DST=`generate_image_path`
fi
# ------------ Move the image to the repository ------------
case $SRC in
http://*)
log "Downloading $SRC to the image repository"
exec_and_log "$WGET -O $DST $SRC" \
"Error downloading $SRC"
;;
*)
log "Moving local image $SRC to the image repository"
if [ \( -L $SRC \) -a \
\( "`$READLINK -f $SRC`" = "`$READLINK -f $DST`" \) ] ; then
log "Not moving files to image repo, they are the same"
else
exec_and_log "mv -f $SRC $DST" "Could not move $SRC to $DST"
fi
;;
esac
if [ -d $DST ]; then
exec_and_log "chmod 0770 $DST"
else
exec_and_log "chmod 0660 $DST"
fi
# ---------------- Get the size of the image ------------
SIZE=`fs_du $DST`
echo "$DST $SIZE"

View File

@ -17,8 +17,11 @@
#--------------------------------------------------------------------------- #
#------------------------------------------------------------------------------
# Set up environment variables
# @param $1 - template (base 64 encoded) with driver data
# Set up environment variables
# @param $1 - Datastore base_path
# @param $2 - Restricted directories
# @param $3 - Safe dirs
# @param $4 - Umask for new file creation (default: 0007)
# @return sets the following environment variables
# - RESTRICTED_DIRS: Paths that can not be used to register images
# - SAFE_DIRS: Paths that are safe to specify image paths
@ -28,6 +31,11 @@ function set_up_datastore {
#
# Load the default configuration for FS datastores
#
BASE_PATH="$1"
RESTRICTED_DIRS="$2"
SAFE_DIRS="$3"
UMASK="$4"
if [ -z "${ONE_LOCATION}" ]; then
VAR_LOCATION=/var/lib/one/
ETC_LOCATION=/etc/one/
@ -36,24 +44,6 @@ function set_up_datastore {
ETC_LOCATION=$ONE_LOCATION/etc/
fi
CONF_FILE=$ETC_LOCATION/datastore/fs.conf
source $CONF_FILE
#
# Load attributes from the Datastore
#
XPATH="$VAR_LOCATION/remotes/datastore/xpath.rb -b $1"
eval "DS_BASE_PATH=`$XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH`"
if [ -z "${DS_BASE_PATH}" ]; then
if [ -z "${BASE_PATH}" ]; then
BASE_PATH="${VAR_LOCATION}/images"
fi
else
BASE_PATH=${DS_BASE_PATH}
fi
#
# RESTRICTED AND SAFE DIRS (from default configuration)
#
@ -62,6 +52,14 @@ function set_up_datastore {
export BASE_PATH
export RESTRICTED_DIRS
export SAFE_DIRS
mkdir -p $BASE_PATH
if [ -n "$UMASK" ]; then
umask $UMASK
else
umask 0007
fi
}
#-------------------------------------------------------------------------------
@ -102,6 +100,22 @@ function fs_du {
echo "$SIZE"
}
#-------------------------------------------------------------------------------
# Computes the size of an image
# @param $1 - Path to the image
# @return size of the image in Mb
#-------------------------------------------------------------------------------
function qemu_size {
DISK="$1"
SIZE=`$QEMU_IMG info $DISK|grep "^virtual size:"|\
sed 's/^.*(\([0-9]\+\) bytes.*$/\1/g'`
SIZE=$(($SIZE/1048576))
echo "$SIZE"
}
#-------------------------------------------------------------------------------
# Checks if a path is safe for copying the image from
# @param $1 - Path to the image

View File

@ -39,10 +39,25 @@ source ${DRIVER_PATH}/../libfs.sh
DRV_ACTION=$1
ID=$2
set_up_datastore $DRV_ACTION
XPATH="${DRIVER_PATH}/../xpath.rb -b $DRV_ACTION"
SRC=`$XPATH /DS_DRIVER_ACTION_DATA/IMAGE/PATH`
unset i XPATH_ELEMENTS
while IFS= read -r -d '' element; do
XPATH_ELEMENTS[i++]="$element"
done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/RESTRICTED_DIRS \
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/SAFE_DIRS \
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/UMASK \
/DS_DRIVER_ACTION_DATA/IMAGE/PATH)
BASE_PATH="${XPATH_ELEMENTS[0]}"
RESTRICTED_DIRS="${XPATH_ELEMENTS[1]}"
SAFE_DIRS="${XPATH_ELEMENTS[2]}"
UMASK="${XPATH_ELEMENTS[3]}"
SRC="${XPATH_ELEMENTS[4]}"
set_up_datastore "$BASE_PATH" "$RESTRICTED_DIRS" "$SAFE_DIRS" "$UMASK"
DST=`generate_image_path`
@ -66,8 +81,6 @@ case $SRC in
exec_and_log "mv -f $DST/$BASE_DISK_FILE $DST/disk.vmdk" \
"Error renaming disk file $BASE_DISK_FILE to disk.vmdk"
fi
exec_and_log "chmod 0770 $DST"
;;
esac

View File

@ -39,19 +39,29 @@ source ${DRIVER_PATH}/../libfs.sh
DRV_ACTION=$1
ID=$2
set_up_datastore $DRV_ACTION
XPATH="${DRIVER_PATH}/../xpath.rb -b $DRV_ACTION"
unset i XPATH_ELEMENTS
while IFS= read -r -d '' element; do
XPATH_ELEMENTS[i++]="$element"
done < <($XPATH /DS_DRIVER_ACTION_DATA/IMAGE/FSTYPE \
done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/RESTRICTED_DIRS \
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/SAFE_DIRS \
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/UMASK \
/DS_DRIVER_ACTION_DATA/IMAGE/PATH \
/DS_DRIVER_ACTION_DATA/IMAGE/FSTYPE \
/DS_DRIVER_ACTION_DATA/IMAGE/SIZE)
FSTYPE="${XPATH_ELEMENTS[0]}"
SIZE="${XPATH_ELEMENTS[1]}"
BASE_PATH="${XPATH_ELEMENTS[0]}"
RESTRICTED_DIRS="${XPATH_ELEMENTS[1]}"
SAFE_DIRS="${XPATH_ELEMENTS[2]}"
UMASK="${XPATH_ELEMENTS[3]}"
SRC="${XPATH_ELEMENTS[4]}"
FSTYPE="${XPATH_ELEMENTS[5]}"
SIZE="${XPATH_ELEMENTS[6]}"
set_up_datastore "$BASE_PATH" "$RESTRICTED_DIRS" "$SAFE_DIRS" "$UMASK"
DST=`generate_image_path`
@ -60,7 +70,6 @@ DISK_TMP=$DISK.tmp
IMAGE_FORMAT=vmdk
umask 0007
# ------------ Create the image to the repository ------------
MKFS_CMD=`mkfs_command $DISK_TMP $FSTYPE`
@ -75,13 +84,9 @@ exec_and_log "$QEMU_IMG convert -O $IMAGE_FORMAT $DISK_TMP $DISK" \
"Unable to convert to $IMAGE_FORMAT in $DISK_TMP"
exec_and_log "rm -f $DISK_TMP" \
"Unable to remove temporary disk $DISK_TMP"
exec_and_log "chmod 0660 $DISK"
# ---------------- Get the size of the image ------------
SIZE=`$QEMU_IMG info $DISK|grep "^virtual size:"|\
sed 's/^.*(\([0-9]\+\) bytes.*$/\1/g'`
SIZE=$(($SIZE/1048576))
SIZE=`qemu_size $DISK`
echo "$DST $SIZE"

View File

@ -1,75 +0,0 @@
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
###############################################################################
# This script is used to move a VM image (SRC) to the image repository as DST
# Several SRC types are supported
###############################################################################
# ------------ Set up the environment to source common tools ------------
if [ -z "${ONE_LOCATION}" ]; then
LIB_LOCATION=/usr/lib/one
else
LIB_LOCATION=$ONE_LOCATION/lib
fi
. $LIB_LOCATION/sh/scripts_common.sh
source $(dirname $0)/fsrc
SRC=$1
DST=$2
ID=$3
# ------------ Generate a filename for the image ------------
if [ "$DST" = "-" ] ; then
DST=`generate_image_path`
fi
# ------------ Move the image to the repository ------------
case $SRC in
http://*)
log "Downloading $SRC to the image repository"
exec_and_log "$WGET -O $DST $SRC" \
"Error downloading $SRC"
;;
*)
log "Moving local image $SRC to the image repository"
if [ \( -L $SRC \) -a \
\( "`$READLINK -f $SRC`" = "`$READLINK -f $DST`" \) ] ; then
log "Not moving files to image repo, they are the same"
else
exec_and_log "mv -f $SRC $DST" "Could not move $SRC to $DST"
fi
;;
esac
if [ -d $DST ]; then
exec_and_log "chmod 0770 $DST"
else
exec_and_log "chmod 0660 $DST"
fi
# ---------------- Get the size of the image ------------
SIZE=`fs_du $DST`
echo "$DST $SIZE"

View File

@ -47,7 +47,7 @@ xml = REXML::Document.new(tmp).root
ARGV.each do |xpath|
element = xml.elements[xpath]
values << element.text if !element.nil?
values << element.text.to_s if !element.nil?
values << "\0"
end

View File

@ -14,25 +14,17 @@
# limitations under the License. #
#--------------------------------------------------------------------------- #
# PRESERVE BASH SYNTAX
module OZones
#*******************************************************************************
# DEFAULT Configuration File for File-System based Datastores
#-------------------------------------------------------------------------------
# BASE_PATH: Path where the images will be stored. If not defined
# defaults to /var/lib/one/images or $ONE_LOCATION/var/images
#
# RESTRICTED_DIRS: Paths that can not be used to register images. A space
# separated list of paths. This prevents users to access important files like
# oned.db or /etc/shadow. OpenNebula will automatically add its configuration
# dirs:/var/lib/one, /etc/one and oneadmin's home ($HOME).
#
# SAFE_DIRS: Paths that are safe to specify image paths. A space separated list
# of paths.This will allow you to open specific paths within RESTRICTED_DIRS
#*******************************************************************************
class AggregatedClusters < AggregatedPool
#BASE_PATH=/var/lib/one/images
def initialize
super("ZONE_POOL")
end
RESTRICTED_DIRS="/etc/"
def factory(client)
OpenNebulaJSON::ClusterPoolJSON.new(client)
end
end
SAFE_DIRS="$HOME/public/"
end

View File

@ -14,25 +14,17 @@
# limitations under the License. #
#--------------------------------------------------------------------------- #
# PRESERVE BASH SYNTAX
module OZones
#*******************************************************************************
# DEFAULT Configuration File for File-System based Datastores
#-------------------------------------------------------------------------------
# BASE_PATH: Path where the images will be stored. If not defined
# defaults to /var/lib/one/images or $ONE_LOCATION/var/images
#
# RESTRICTED_DIRS: Paths that can not be used to register images. A space
# separated list of paths. This prevents users to access important files like
# oned.db or /etc/shadow. OpenNebula will automatically add its configuration
# dirs:/var/lib/one, /etc/one and oneadmin's home ($HOME).
#
# SAFE_DIRS: Paths that are safe to specify image paths. A space separated list
# of paths.This will allow you to open specific paths within RESTRICTED_DIRS
#*******************************************************************************
class AggregatedDatastores < AggregatedPool
#BASE_PATH=/var/lib/one/images
def initialize
super("ZONE_POOL")
end
RESTRICTED_DIRS="/etc/"
def factory(client)
OpenNebulaJSON::DatastorePoolJSON.new(client)
end
end
SAFE_DIRS="$HOME/public/"
end

View File

@ -0,0 +1,45 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'OpenNebulaJSON/JSONUtils'
module OpenNebulaJSON
class ClusterJSON < OpenNebula::Cluster
include JSONUtils
def create(template_json)
cluster_hash = parse_json(template_json, 'cluster')
if OpenNebula.is_error?(cluster_hash)
return cluster_hash
end
self.allocate(cluster_hash['name'])
end
def perform_action(template_json)
action_hash = parse_json(template_json, 'action')
if OpenNebula.is_error?(action_hash)
return action_hash
end
error_msg = "#{action_hash['perform']} action not " <<
" available for this resource"
OpenNebula::Error.new(error_msg)
end
end
end

View File

@ -0,0 +1,50 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'OpenNebulaJSON/JSONUtils'
module OpenNebulaJSON
class ClusterJSON < OpenNebula::Datastore
include JSONUtils
def create(template_json, cluster_id=ClusterPool::NONE_CLUSTER_ID)
datastore_hash = parse_json(template_json, 'datastore')
if OpenNebula.is_error?(datastore_hash)
return datastore_hash
end
if datastore_hash['datastore_raw']
template = datastore_hash['image_raw']
else
template = template_to_str(datastore_hash)
end
self.allocate(template,cluster_id)
end
def perform_action(template_json)
action_hash = parse_json(template_json, 'action')
if OpenNebula.is_error?(action_hash)
return action_hash
end
error_msg = "#{action_hash['perform']} action not " <<
" available for this resource"
OpenNebula::Error.new(error_msg)
end
end
end

View File

@ -92,6 +92,11 @@ void Nebula::start()
delete dspool;
}
if ( clpool != 0)
{
delete clpool;
}
if ( vmm != 0)
{
delete vmm;
@ -190,6 +195,7 @@ void Nebula::start()
GroupPool::bootstrap(db);
AclManager::bootstrap(db);
DatastorePool::bootstrap(db);
ClusterPool::bootstrap(db);
}
catch (exception&)
{
@ -205,6 +211,11 @@ void Nebula::start()
string default_image_type = "OS";
string default_device_prefix = "hd";
if (tester->need_cluster_pool)
{
clpool = tester->create_clpool(db);
}
if (tester->need_vm_pool)
{
vmpool = tester->create_vmpool(db,hook_location,var_location);

View File

@ -67,6 +67,11 @@ DatastorePool* NebulaTest::create_dspool(SqlDB* db)
return new DatastorePool(db);
}
ClusterPool* NebulaTest::create_clpool(SqlDB* db)
{
return new ClusterPool(db);
}
// -----------------------------------------------------------
// Managers
// -----------------------------------------------------------

View File

@ -289,6 +289,13 @@ int VirtualMachine::insert(SqlDB * db, string& error_str)
goto error_requirements;
}
rc = automatic_requirements(error_str);
if ( rc != 0 )
{
goto error_requirements;
}
parse_graphics();
// ------------------------------------------------------------------------
@ -519,6 +526,113 @@ int VirtualMachine::parse_requirements(string& error_str)
/* ------------------------------------------------------------------------ */
/* ------------------------------------------------------------------------ */
int VirtualMachine::automatic_requirements(string& error_str)
{
int num_vatts;
vector<Attribute * > v_attributes;
VectorAttribute * vatt;
ostringstream oss;
string requirements;
string cluster_id;
bool error = false;
oss << "Incompatible cluster IDs.";
// Get cluster id from all DISK vector attributes
num_vatts = obj_template->get("DISK",v_attributes);
for(int i=0; i<num_vatts; i++)
{
vatt = dynamic_cast<VectorAttribute * >(v_attributes[i]);
if ( vatt == 0 )
{
continue;
}
string vatt_cluster_id = vatt->vector_value("CLUSTER_ID");
if ( !vatt_cluster_id.empty() )
{
oss << endl << "DISK [" << i << "]: IMAGE ["
<< vatt->vector_value("IMAGE_ID") << "] from DATASTORE ["
<< vatt->vector_value("DATASTORE_ID") << "] requires CLUSTER ["
<< vatt_cluster_id << "]";
if ( cluster_id.empty() )
{
cluster_id = vatt_cluster_id;
}
else if ( cluster_id != vatt_cluster_id )
{
error = true;
}
}
}
// Get cluster id from all NIC vector attributes
v_attributes.clear();
num_vatts = obj_template->get("NIC",v_attributes);
for(int i=0; i<num_vatts; i++)
{
vatt = dynamic_cast<VectorAttribute * >(v_attributes[i]);
if ( vatt == 0 )
{
continue;
}
string vatt_cluster_id = vatt->vector_value("CLUSTER_ID");
if ( !vatt_cluster_id.empty() )
{
oss << endl << "NIC [" << i << "]: NETWORK ["
<< vatt->vector_value("NETWORK_ID") << "] requires CLUSTER ["
<< vatt_cluster_id << "]";
if ( cluster_id.empty() )
{
cluster_id = vatt_cluster_id;
}
else if ( cluster_id != vatt_cluster_id )
{
error = true;
}
}
}
if ( error == true )
{
error_str = oss.str();
return -1;
}
if ( !cluster_id.empty() )
{
oss.str("");
oss << "CLUSTER_ID = " << cluster_id;
obj_template->get("REQUIREMENTS", requirements);
if ( !requirements.empty() )
{
oss << " & ( " << requirements << " )";
}
replace_template_attribute("REQUIREMENTS", oss.str());
}
return 0;
}
/* ------------------------------------------------------------------------ */
/* ------------------------------------------------------------------------ */
int VirtualMachine::insert_replace(SqlDB *db, bool replace, string& error_str)
{
ostringstream oss;
@ -1466,4 +1580,4 @@ string VirtualMachine::get_system_dir() const
oss << nd.get_ds_location() << DatastorePool::SYSTEM_DS_ID << "/"<< oid;
return oss.str();
};
};

View File

@ -29,6 +29,8 @@ env.Prepend(LIBS=[
'nebula_xml',
'nebula_image',
'nebula_datastore',
'nebula_cluster',
'nebula_um',
'nebula_mad',
'nebula_common',
'nebula_log',

View File

@ -22,6 +22,8 @@ env.Prepend(LIBS=[
'nebula_vm',
'nebula_vmtemplate',
'nebula_hm',
'nebula_cluster',
'nebula_datastore',
'nebula_vnm',
'nebula_authm',
'nebula_acl',

View File

@ -24,6 +24,7 @@
#include "FixedLeases.h"
#include "AuthManager.h"
#include "ClusterPool.h"
#define TO_UPPER(S) transform(S.begin(),S.end(),S.begin(),(int(*)(int))toupper)
@ -626,10 +627,10 @@ int VirtualNetwork::nic_attribute(VectorAttribute *nic, int vid)
string ip;
string mac;
ostringstream vnid;
ostringstream oss;
ip = nic->vector_value("IP");
vnid << oid;
oss << oid;
//--------------------------------------------------------------------------
// GET NETWORK LEASE
@ -654,7 +655,7 @@ int VirtualNetwork::nic_attribute(VectorAttribute *nic, int vid)
//--------------------------------------------------------------------------
nic->replace("NETWORK" ,name);
nic->replace("NETWORK_ID",vnid.str());
nic->replace("NETWORK_ID",oss.str());
nic->replace("BRIDGE" ,bridge);
nic->replace("MAC" ,mac);
nic->replace("IP" ,ip);
@ -678,6 +679,14 @@ int VirtualNetwork::nic_attribute(VectorAttribute *nic, int vid)
nic->replace("VLAN_ID", vlan_id);
}
if ( get_cluster_id() != ClusterPool::NONE_CLUSTER_ID )
{
oss.str("");
oss << get_cluster_id();
nic->replace("CLUSTER_ID", oss.str());
}
return 0;
}

View File

@ -125,7 +125,9 @@ public:
if( rc == 0 )
{
return VirtualNetworkPool::allocate(uid, 0,"the_user","oneadmin", vn_template, oid, err);
return VirtualNetworkPool::allocate(uid, 0,"the_user","oneadmin",
vn_template, oid, ClusterPool::NONE_CLUSTER_ID,
ClusterPool::NONE_CLUSTER_NAME, err);
}
else
{