mirror of
https://github.com/OpenNebula/one.git
synced 2025-01-26 10:03:37 +03:00
Merge remote-tracking branch 'upstream/feature-1112' into feature-1112
This commit is contained in:
commit
c61159fb30
@ -839,6 +839,7 @@ ONEDB_MIGRATOR_FILES="src/onedb/2.0_to_2.9.80.rb \
|
||||
src/onedb/3.1.80_to_3.2.0.rb \
|
||||
src/onedb/3.2.0_to_3.2.1.rb \
|
||||
src/onedb/3.2.1_to_3.3.0.rb \
|
||||
src/onedb/3.3.0_to_3.3.80.rb \
|
||||
src/onedb/onedb.rb \
|
||||
src/onedb/onedb_backend.rb"
|
||||
|
||||
|
@ -429,7 +429,7 @@ void AuthManager::notify_request(int auth_id,bool result,const string& message)
|
||||
void AuthManager::load_mads(int uid)
|
||||
{
|
||||
ostringstream oss;
|
||||
const VectorAttribute * vattr;
|
||||
const VectorAttribute * vattr = 0;
|
||||
int rc;
|
||||
string name;
|
||||
AuthManagerDriver * authm_driver = 0;
|
||||
@ -438,7 +438,10 @@ void AuthManager::load_mads(int uid)
|
||||
|
||||
NebulaLog::log("AuM",Log::INFO,oss);
|
||||
|
||||
vattr = static_cast<const VectorAttribute *>(mad_conf[0]);
|
||||
if ( mad_conf.size() > 0 )
|
||||
{
|
||||
vattr = static_cast<const VectorAttribute *>(mad_conf[0]);
|
||||
}
|
||||
|
||||
if ( vattr == 0 )
|
||||
{
|
||||
|
@ -34,7 +34,7 @@ class OneUserHelper < OpenNebulaHelper::OneHelper
|
||||
begin
|
||||
password = File.read(arg).split("\n").first
|
||||
rescue
|
||||
return -1, "Can not read file: #{arg}"
|
||||
return -1, "Cannot read file: #{arg}"
|
||||
end
|
||||
else
|
||||
password = arg.dup
|
||||
|
@ -53,7 +53,7 @@ setup()
|
||||
start()
|
||||
{
|
||||
if [ ! -f "$ECONE_SERVER" ]; then
|
||||
echo "Can not find $ECONE_SERVER."
|
||||
echo "Cannot find $ECONE_SERVER."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
@ -53,7 +53,7 @@ setup()
|
||||
start()
|
||||
{
|
||||
if [ ! -x "$OCCI_SERVER" ]; then
|
||||
echo "Can not find $OCCI_SERVER."
|
||||
echo "Cannot find $OCCI_SERVER."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
@ -131,7 +131,7 @@ module OCCIClient
|
||||
file_path="/"+m[1]
|
||||
end
|
||||
elsif !image_info.elements['TYPE'] == "DATABLOCK"
|
||||
return CloudClient::Error.new("Can not find URL")
|
||||
return CloudClient::Error.new("Cannot find URL")
|
||||
end
|
||||
|
||||
if curb
|
||||
@ -316,7 +316,7 @@ module OCCIClient
|
||||
end
|
||||
|
||||
if info.elements['ID'] == nil
|
||||
return CloudClient::Error.new("Can not find RESOURCE ID")
|
||||
return CloudClient::Error.new("Cannot find RESOURCE ID")
|
||||
end
|
||||
|
||||
resource_id = info.elements['ID'].text
|
||||
|
@ -79,8 +79,9 @@ let TID=ID+BASE_TID
|
||||
REGISTER_CMD=$(cat <<EOF
|
||||
set -e
|
||||
$SUDO $LVCREATE -L${SIZE}M ${VG_NAME} -n ${LV_NAME}
|
||||
$SUDO $(iscsi_target_new "$TID" "$IQN")
|
||||
$SUDO $(iscsi_logicalunit_new "$TID" "$DEV")
|
||||
$SUDO $(tgtadm_target_new "$TID" "$IQN")
|
||||
$SUDO $(tgtadm_target_bind_all "$TID")
|
||||
$SUDO $(tgtadm_logicalunit_new "$TID" "$DEV")
|
||||
EOF
|
||||
)
|
||||
|
||||
|
@ -80,8 +80,9 @@ let TID=ID+BASE_TID
|
||||
REGISTER_CMD=$(cat <<EOF
|
||||
set -e
|
||||
$SUDO $LVCREATE -L${SIZE}M ${VG_NAME} -n ${LV_NAME}
|
||||
$SUDO $(iscsi_target_new "$TID" "$IQN")
|
||||
$SUDO $(iscsi_logicalunit_new "$TID" "$DEV")
|
||||
$SUDO $(tgtadm_target_new "$TID" "$IQN")
|
||||
$SUDO $(tgtadm_target_bind_all "$TID")
|
||||
$SUDO $(tgtadm_logicalunit_new "$TID" "$DEV")
|
||||
$SUDO $(mkfs_command "$DEV" "$FSTYPE")
|
||||
EOF
|
||||
)
|
||||
|
@ -61,15 +61,8 @@ DEV="/dev/$VG_NAME/$LV_NAME"
|
||||
let TID=ID+BASE_TID
|
||||
|
||||
RM_COMMAND=$(cat <<EOF
|
||||
n_snapshots=\$($SUDO $LVS --noheadings -o name,origin $VG_NAME | \
|
||||
$AWK '{if(\$2 == "$LV_NAME"){print \$1}}'|wc -l)
|
||||
if [ "\$n_snapshots" = "0" ]; then
|
||||
$SUDO $(iscsi_target_delete "$TID")
|
||||
$SUDO $LVREMOVE -f $VG_NAME/$LV_NAME
|
||||
else
|
||||
echo "Error: \$n_snapshots active snapshots for $VG_NAME/$LV_NAME" >&2
|
||||
exit 1
|
||||
fi
|
||||
$SUDO $(tgtadm_target_delete "$TID")
|
||||
$SUDO $LVREMOVE -f $VG_NAME/$LV_NAME
|
||||
EOF
|
||||
)
|
||||
|
||||
|
@ -23,7 +23,7 @@
|
||||
# @param $3 - Safe dirs
|
||||
# @param $4 - Umask for new file creation (default: 0007)
|
||||
# @return sets the following environment variables
|
||||
# - RESTRICTED_DIRS: Paths that can not be used to register images
|
||||
# - RESTRICTED_DIRS: Paths that cannot be used to register images
|
||||
# - SAFE_DIRS: Paths that are safe to specify image paths
|
||||
# - BASE_PATH: Path where the images will be stored
|
||||
#------------------------------------------------------------------------------
|
||||
@ -136,37 +136,3 @@ function check_restricted {
|
||||
|
||||
echo 0
|
||||
}
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# iSCSI functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Returns the command to create a new target
|
||||
# @param $1 - ID of the image
|
||||
# @param $2 - Target Host
|
||||
# @param $3 - Device
|
||||
# @return the command to create a new target
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
function iscsi_target_new {
|
||||
ID="$1"
|
||||
IQN="$2"
|
||||
|
||||
echo "$TGTADM --lld iscsi --op new --mode target --tid $ID "\
|
||||
"--targetname $IQN"
|
||||
}
|
||||
|
||||
function iscsi_logicalunit_new {
|
||||
ID="$1"
|
||||
DEV="$2"
|
||||
|
||||
echo "$TGTADM --lld iscsi --op new --mode logicalunit --tid $ID "\
|
||||
"--lun 1 --backing-store $DEV"
|
||||
}
|
||||
|
||||
function iscsi_target_delete {
|
||||
ID="$1"
|
||||
echo "$TGTADM --lld iscsi --op delete --mode target --tid $ID"
|
||||
}
|
||||
|
@ -664,7 +664,7 @@ int DispatchManager::resubmit(int vid)
|
||||
{
|
||||
case VirtualMachine::SUSPENDED:
|
||||
NebulaLog::log("DiM",Log::ERROR,
|
||||
"Can not resubmit a suspended VM. Resume it first");
|
||||
"Cannot resubmit a suspended VM. Resume it first");
|
||||
rc = -2;
|
||||
break;
|
||||
|
||||
@ -688,7 +688,7 @@ int DispatchManager::resubmit(int vid)
|
||||
break;
|
||||
case VirtualMachine::DONE:
|
||||
NebulaLog::log("DiM",Log::ERROR,
|
||||
"Can not resubmit a VM already in DONE state");
|
||||
"Cannot resubmit a VM already in DONE state");
|
||||
rc = -2;
|
||||
break;
|
||||
}
|
||||
|
@ -22,7 +22,7 @@
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* There are two default groups boostrapped by the core: */
|
||||
/* - oneadmin can not be removed */
|
||||
/* - oneadmin cannot be removed */
|
||||
/* - users to place regular users by default */
|
||||
/* The first 100 group IDs are reserved for system groups. Regular ones start */
|
||||
/* from ID 100 */
|
||||
|
@ -49,12 +49,15 @@ void HookManager::load_mads(int uid)
|
||||
{
|
||||
HookManagerDriver * hm_mad;
|
||||
ostringstream oss;
|
||||
const VectorAttribute * vattr;
|
||||
const VectorAttribute * vattr = 0;
|
||||
int rc;
|
||||
|
||||
NebulaLog::log("HKM",Log::INFO,"Loading Hook Manager driver.");
|
||||
|
||||
vattr = static_cast<const VectorAttribute *>(mad_conf[0]);
|
||||
if ( mad_conf.size() > 0 )
|
||||
{
|
||||
vattr = static_cast<const VectorAttribute *>(mad_conf[0]);
|
||||
}
|
||||
|
||||
if ( vattr == 0 )
|
||||
{
|
||||
|
@ -267,7 +267,7 @@ public:
|
||||
CPPUNIT_ASSERT( oid_1 == -1 );
|
||||
CPPUNIT_ASSERT( rc == oid_1 );
|
||||
|
||||
// the hostname can not be repeated if the drivers change
|
||||
// the hostname cannot be repeated if the drivers change
|
||||
rc = hp->allocate(&oid_1,
|
||||
names[0],
|
||||
im_mad_2,
|
||||
|
@ -50,12 +50,15 @@ void ImageManager::load_mads(int uid)
|
||||
{
|
||||
ImageManagerDriver * imagem_mad;
|
||||
ostringstream oss;
|
||||
const VectorAttribute * vattr;
|
||||
const VectorAttribute * vattr = 0;
|
||||
int rc;
|
||||
|
||||
NebulaLog::log("ImM",Log::INFO,"Loading Image Manager driver.");
|
||||
|
||||
vattr = static_cast<const VectorAttribute *>(mad_conf[0]);
|
||||
if ( mad_conf.size() > 0 )
|
||||
{
|
||||
vattr = static_cast<const VectorAttribute *>(mad_conf[0]);
|
||||
}
|
||||
|
||||
if ( vattr == 0 )
|
||||
{
|
||||
|
@ -95,7 +95,7 @@ int ImageManager::acquire_image(Image *img, string& error)
|
||||
case Image::USED:
|
||||
if (img->isPersistent())
|
||||
{
|
||||
error = "Cannot aquire persistent image, it is already in use";
|
||||
error = "Cannot acquire persistent image, it is already in use";
|
||||
rc = -1;
|
||||
}
|
||||
else
|
||||
@ -106,15 +106,15 @@ int ImageManager::acquire_image(Image *img, string& error)
|
||||
break;
|
||||
|
||||
case Image::DISABLED:
|
||||
error = "Cannot aquire image, it is disabled";
|
||||
error = "Cannot acquire image, it is disabled";
|
||||
rc = -1;
|
||||
break;
|
||||
case Image::LOCKED:
|
||||
error = "Cannot aquire image, it is locked";
|
||||
error = "Cannot acquire image, it is locked";
|
||||
rc = -1;
|
||||
break;
|
||||
case Image::ERROR:
|
||||
error = "Cannot aquire image, it is in an error state";
|
||||
error = "Cannot acquire image, it is in an error state";
|
||||
rc = -1;
|
||||
break;
|
||||
default:
|
||||
@ -276,13 +276,13 @@ int ImageManager::delete_image(int iid, const string& ds_data)
|
||||
if ( img->get_running() != 0 )
|
||||
{
|
||||
img->unlock();
|
||||
return -1; //Can not remove images in use
|
||||
return -1; //Cannot remove images in use
|
||||
}
|
||||
break;
|
||||
|
||||
case Image::USED:
|
||||
img->unlock();
|
||||
return -1; //Can not remove images in use
|
||||
return -1; //Cannot remove images in use
|
||||
break;
|
||||
|
||||
case Image::INIT:
|
||||
|
@ -239,13 +239,13 @@ int Mad::start()
|
||||
|
||||
error_exec:
|
||||
oss.str("");
|
||||
oss << "Can not load driver " << executable << ", " << strerror(errno);
|
||||
oss << "Cannot load driver " << executable << ", " << strerror(errno);
|
||||
NebulaLog::log("MAD", Log::ERROR, oss);
|
||||
exit(-1);
|
||||
|
||||
error_dup2:
|
||||
oss.str("");
|
||||
oss << "Can not duplicate descriptors, " << strerror(errno);
|
||||
oss << "Cannot duplicate descriptors, " << strerror(errno);
|
||||
NebulaLog::log("MAD", Log::ERROR, oss);
|
||||
exit(-1);
|
||||
|
||||
@ -275,7 +275,7 @@ error_attributes:
|
||||
|
||||
error_pipes:
|
||||
oss.str("");
|
||||
oss << "Can not create driver pipes, " << strerror(errno);
|
||||
oss << "Cannot create driver pipes, " << strerror(errno);
|
||||
NebulaLog::log("MAD", Log::ERROR, oss);
|
||||
return -1;
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ DATE=date
|
||||
DD=dd
|
||||
DU=du
|
||||
GREP=grep
|
||||
ISCSIADM=iscsiadm
|
||||
LVCREATE=lvcreate
|
||||
LVREMOVE=lvremove
|
||||
LVS=lvs
|
||||
@ -244,3 +245,61 @@ EOF`
|
||||
exit $SSH_EXEC_RC
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# iSCSI functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Returns the command to create a new target
|
||||
# @param $1 - ID of the image
|
||||
# @param $2 - Target Host
|
||||
# @param $3 - Device
|
||||
# @return the command to create a new target
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
function tgtadm_target_new {
|
||||
ID="$1"
|
||||
IQN="$2"
|
||||
|
||||
echo "$TGTADM --lld iscsi --op new --mode target --tid $ID "\
|
||||
"--targetname $IQN;"
|
||||
}
|
||||
|
||||
function tgtadm_target_bind_all {
|
||||
ID="$1"
|
||||
echo "$TGTADM --lld iscsi --op bind --mode target --tid $ID -I ALL"
|
||||
}
|
||||
|
||||
function tgtadm_logicalunit_new {
|
||||
ID="$1"
|
||||
DEV="$2"
|
||||
|
||||
echo "$TGTADM --lld iscsi --op new --mode logicalunit --tid $ID "\
|
||||
"--lun 1 --backing-store $DEV"
|
||||
}
|
||||
|
||||
function tgtadm_target_delete {
|
||||
ID="$1"
|
||||
echo "$TGTADM --lld iscsi --op delete --mode target --tid $ID"
|
||||
}
|
||||
|
||||
###
|
||||
|
||||
function iscsiadm_discovery {
|
||||
TARGET_HOST="$1"
|
||||
echo "$ISCSIADM -m discovery -t st -p $TARGET_HOST"
|
||||
}
|
||||
|
||||
function iscsiadm_login {
|
||||
IQN="$1"
|
||||
TARGET_HOST="$2"
|
||||
echo "$ISCSIADM -m node --targetname $IQN -p $TARGET_HOST --login"
|
||||
}
|
||||
|
||||
function iscsiadm_logout {
|
||||
IQN="$1"
|
||||
echo "$ISCSIADM -m node --targetname $IQN --logout"
|
||||
}
|
||||
|
||||
|
@ -127,7 +127,7 @@ int main(int argc, char **argv)
|
||||
|
||||
if( fd == -1)
|
||||
{
|
||||
cerr<< "Error: Can not start oned, opening lock file " << lockfile
|
||||
cerr<< "Error: Cannot start oned, opening lock file " << lockfile
|
||||
<< endl;
|
||||
|
||||
exit(-1);
|
||||
@ -186,7 +186,7 @@ int main(int argc, char **argv)
|
||||
return 0;
|
||||
|
||||
error_chdir:
|
||||
cerr << "Error: can not change to dir " << wd << "\n";
|
||||
cerr << "Error: cannot change to dir " << wd << "\n";
|
||||
unlink(lockfile.c_str());
|
||||
exit(-1);
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
# Daemon configuration attributes
|
||||
#-------------------------------------------------------------------------------
|
||||
# MANAGER_TIMER: Time in seconds the core uses to evaluate periodical functions.
|
||||
# HOST_MONITORING_INTERVAL and VM_POLLING_INTERVAL can not have smaller values
|
||||
# HOST_MONITORING_INTERVAL and VM_POLLING_INTERVAL cannot have smaller values
|
||||
# than MANAGER_TIMER.
|
||||
#
|
||||
# HOST_MONITORING_INTERVAL: Time in seconds between host monitorization.
|
||||
|
@ -69,7 +69,7 @@ module OpenNebula
|
||||
# Creates ACLs for the group. The ACL rules are described in a file
|
||||
def create_acls(filename = GROUP_DEFAULT)
|
||||
if !File.readable?(filename)
|
||||
return -1, "Can not read deafult ACL file for group"
|
||||
return -1, "Cannot read deafult ACL file for group"
|
||||
end
|
||||
|
||||
msg = String.new
|
||||
|
277
src/onedb/3.3.0_to_3.3.80.rb
Normal file
277
src/onedb/3.3.0_to_3.3.80.rb
Normal file
@ -0,0 +1,277 @@
|
||||
# -------------------------------------------------------------------------- *
|
||||
# Copyright 2002-2011, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may *
|
||||
# not use this file except in compliance with the License. You may obtain *
|
||||
# a copy of the License at *
|
||||
# *
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 *
|
||||
# *
|
||||
# Unless required by applicable law or agreed to in writing, software *
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, *
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
|
||||
# See the License for the specific language governing permissions and *
|
||||
# limitations under the License. *
|
||||
# -------------------------------------------------------------------------- *
|
||||
|
||||
require "rexml/document"
|
||||
include REXML
|
||||
|
||||
module Migrator
|
||||
def db_version
|
||||
"3.3.80"
|
||||
end
|
||||
|
||||
def one_version
|
||||
"OpenNebula 3.3.80"
|
||||
end
|
||||
|
||||
def up
|
||||
one_location = ENV["ONE_LOCATION"]
|
||||
|
||||
if !one_location
|
||||
var_location = "/var/lib/one"
|
||||
else
|
||||
var_location = one_location + "/var"
|
||||
end
|
||||
|
||||
########################################################################
|
||||
# Get oneadmin user and group names
|
||||
########################################################################
|
||||
|
||||
oneadmin_uname = nil
|
||||
|
||||
@db.fetch("SELECT name FROM user_pool WHERE oid=0") do |row|
|
||||
oneadmin_uname = row[:name]
|
||||
end
|
||||
|
||||
if oneadmin_uname == nil
|
||||
puts "Error trying to read oneadmin's user name ('SELECT name FROM user_pool WHERE oid=0')"
|
||||
return false
|
||||
end
|
||||
|
||||
oneadmin_gname = nil
|
||||
|
||||
@db.fetch("SELECT name FROM group_pool WHERE oid=0") do |row|
|
||||
oneadmin_gname = row[:name]
|
||||
end
|
||||
|
||||
if oneadmin_gname == nil
|
||||
puts "Error trying to read oneadmin's group name ('SELECT name FROM group_pool WHERE oid=0')"
|
||||
return false
|
||||
end
|
||||
|
||||
########################################################################
|
||||
# Create the cluster and datastore tables
|
||||
########################################################################
|
||||
|
||||
# New table for Clusters
|
||||
@db.run "CREATE TABLE cluster_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body TEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));"
|
||||
|
||||
# New table for Datastores
|
||||
@db.run "CREATE TABLE datastore_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body TEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));"
|
||||
|
||||
# Insert system datastore
|
||||
|
||||
xml =
|
||||
"<DATASTORE>" <<
|
||||
" <ID>0</ID>" <<
|
||||
" <UID>0</UID>" <<
|
||||
" <GID>0</GID>" <<
|
||||
" <UNAME>#{oneadmin_uname}</UNAME>" <<
|
||||
" <GNAME>#{oneadmin_gname}</GNAME>" <<
|
||||
" <NAME>system</NAME>" <<
|
||||
" <PERMISSIONS>" <<
|
||||
" <OWNER_U>1</OWNER_U>" <<
|
||||
" <OWNER_M>1</OWNER_M>" <<
|
||||
" <OWNER_A>0</OWNER_A>" <<
|
||||
" <GROUP_U>0</GROUP_U>" <<
|
||||
" <GROUP_M>0</GROUP_M>" <<
|
||||
" <GROUP_A>0</GROUP_A>" <<
|
||||
" <OTHER_U>0</OTHER_U>" <<
|
||||
" <OTHER_M>0</OTHER_M>" <<
|
||||
" <OTHER_A>0</OTHER_A>" <<
|
||||
" </PERMISSIONS>" <<
|
||||
" <DS_MAD>fs</DS_MAD>" <<
|
||||
" <TM_MAD>shared</TM_MAD>" <<
|
||||
" <BASE_PATH>#{var_location}/datastores/0</BASE_PATH>" <<
|
||||
" <CLUSTER_ID>-1</CLUSTER_ID>" <<
|
||||
" <CLUSTER>none</CLUSTER>" <<
|
||||
" <IMAGES/>" <<
|
||||
" <TEMPLATE>" <<
|
||||
" <DS_MAD><![CDATA[fs]]></DS_MAD>" <<
|
||||
" <TM_MAD><![CDATA[shared]]></TM_MAD>" <<
|
||||
" </TEMPLATE>" <<
|
||||
"</DATASTORE>"
|
||||
|
||||
@db[:datastore_pool].insert(
|
||||
:oid => 0,
|
||||
:name => 'system',
|
||||
:body => xml,
|
||||
:uid => 0,
|
||||
:gid => 0,
|
||||
:owner_u => 1,
|
||||
:group_u => 0,
|
||||
:other_u => 0)
|
||||
|
||||
# Last oid for cluster_pool and datastore_pool
|
||||
|
||||
@db[:pool_control].insert(
|
||||
:tablename => 'cluster_pool',
|
||||
:last_oid => 99)
|
||||
|
||||
@db[:pool_control].insert(
|
||||
:tablename => 'datastore_pool',
|
||||
:last_oid => 99)
|
||||
|
||||
########################################################################
|
||||
# Add each Host to Cluster -1 (none)
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE host_pool RENAME TO old_host_pool;"
|
||||
@db.run "CREATE TABLE host_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body TEXT, state INTEGER, last_mon_time INTEGER, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));"
|
||||
|
||||
@db.fetch("SELECT * FROM old_host_pool") do |row|
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
# Delete TM_MAD elem
|
||||
doc.root.delete_element("TM_MAD")
|
||||
|
||||
# Add Cluster elements
|
||||
doc.root.add_element("CLUSTER_ID").text = "-1"
|
||||
doc.root.add_element("CLUSTER").text = "none"
|
||||
|
||||
@db[:host_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:state => row[:state],
|
||||
:last_mon_time => row[:last_mon_time],
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_host_pool;"
|
||||
|
||||
########################################################################
|
||||
# Add each VNet to Cluster -1 (none)
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE network_pool RENAME TO old_network_pool;"
|
||||
@db.run "CREATE TABLE network_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body TEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name,uid));"
|
||||
|
||||
@db.fetch("SELECT * FROM old_network_pool") do |row|
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
# Add Cluster elements
|
||||
doc.root.add_element("CLUSTER_ID").text = "-1"
|
||||
doc.root.add_element("CLUSTER").text = "none"
|
||||
|
||||
@db[:network_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_network_pool;"
|
||||
|
||||
########################################################################
|
||||
# Add each Image to Datastore 1 (default)
|
||||
########################################################################
|
||||
|
||||
images_element = "<IMAGES>"
|
||||
|
||||
@db.run "ALTER TABLE image_pool RENAME TO old_image_pool;"
|
||||
@db.run "CREATE TABLE image_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body TEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name,uid) );"
|
||||
|
||||
@db.fetch("SELECT * FROM old_image_pool") do |row|
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
# Add Cluster elements
|
||||
doc.root.add_element("DATASTORE_ID").text = "1"
|
||||
doc.root.add_element("DATASTORE").text = "default"
|
||||
|
||||
images_element << "<ID>#{row[:oid]}</ID>"
|
||||
|
||||
# Update SOURCE
|
||||
doc.root.each_element("SOURCE") { |e|
|
||||
previous_source = e.text
|
||||
hash = previous_source.split('/')[-1]
|
||||
|
||||
if ( hash.length == 32 && hash =~ /^[0-9A-F]+$/i )
|
||||
e.text = "#{var_location}/datastores/1/#{hash}"
|
||||
|
||||
# TODO: create link, or mv image file?
|
||||
`ln -s #{previous_source} #{e.text}`
|
||||
# `mv #{e.text} #{previous_source}`
|
||||
end
|
||||
}
|
||||
|
||||
@db[:image_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_image_pool;"
|
||||
|
||||
images_element << "</IMAGES>"
|
||||
|
||||
# Insert default datastore
|
||||
|
||||
xml =
|
||||
"<DATASTORE>" <<
|
||||
" <ID>1</ID>" <<
|
||||
" <UID>0</UID>" <<
|
||||
" <GID>0</GID>" <<
|
||||
" <UNAME>#{oneadmin_uname}</UNAME>" <<
|
||||
" <GNAME>#{oneadmin_gname}</GNAME>" <<
|
||||
" <NAME>default</NAME>" <<
|
||||
" <PERMISSIONS>" <<
|
||||
" <OWNER_U>1</OWNER_U>" <<
|
||||
" <OWNER_M>1</OWNER_M>" <<
|
||||
" <OWNER_A>0</OWNER_A>" <<
|
||||
" <GROUP_U>0</GROUP_U>" <<
|
||||
" <GROUP_M>0</GROUP_M>" <<
|
||||
" <GROUP_A>0</GROUP_A>" <<
|
||||
" <OTHER_U>0</OTHER_U>" <<
|
||||
" <OTHER_M>0</OTHER_M>" <<
|
||||
" <OTHER_A>0</OTHER_A>" <<
|
||||
" </PERMISSIONS>" <<
|
||||
" <DS_MAD>fs</DS_MAD>" << # TODO
|
||||
" <TM_MAD>shared</TM_MAD>" << # TODO
|
||||
" <BASE_PATH>#{var_location}/datastores/1</BASE_PATH>" <<
|
||||
" <CLUSTER_ID>-1</CLUSTER_ID>" <<
|
||||
" <CLUSTER>none</CLUSTER>" <<
|
||||
images_element <<
|
||||
" <TEMPLATE>" <<
|
||||
" <DS_MAD><![CDATA[fs]]></DS_MAD>" << # TODO
|
||||
" <TM_MAD><![CDATA[shared]]></TM_MAD>" << # TODO
|
||||
" </TEMPLATE>" <<
|
||||
"</DATASTORE>"
|
||||
|
||||
@db[:datastore_pool].insert(
|
||||
:oid => 1,
|
||||
:name => 'default',
|
||||
:body => xml,
|
||||
:uid => 0,
|
||||
:gid => 0,
|
||||
:owner_u => 1,
|
||||
:group_u => 0,
|
||||
:other_u => 0)
|
||||
|
||||
return true
|
||||
end
|
||||
end
|
@ -6,7 +6,7 @@
|
||||
# Daemon configuration attributes
|
||||
#-------------------------------------------------------------------------------
|
||||
# MANAGER_TIMER: Time in seconds the core uses to evaluate periodical functions.
|
||||
# HOST_MONITORING_INTERVAL and VM_POLLING_INTERVAL can not have smaller values
|
||||
# HOST_MONITORING_INTERVAL and VM_POLLING_INTERVAL cannot have smaller values
|
||||
# than MANAGER_TIMER.
|
||||
#
|
||||
# HOST_MONITORING_INTERVAL: Time in seconds between host monitorization.
|
||||
@ -390,7 +390,7 @@ HM_MAD = [
|
||||
|
||||
#-------------------------------- ebtables Hook---------------------------------
|
||||
# You can use these two hooks to isolate networks at the ethernet level so the
|
||||
# traffic generated in different virtual networks can not be seen in others.
|
||||
# traffic generated in different virtual networks cannot be seen in others.
|
||||
#
|
||||
# All the network configuration will be done in the cluster nodes, these are the
|
||||
# additional requisites:
|
||||
|
@ -6,7 +6,7 @@
|
||||
# Daemon configuration attributes
|
||||
#-------------------------------------------------------------------------------
|
||||
# MANAGER_TIMER: Time in seconds the core uses to evaluate periodical functions.
|
||||
# HOST_MONITORING_INTERVAL and VM_POLLING_INTERVAL can not have smaller values
|
||||
# HOST_MONITORING_INTERVAL and VM_POLLING_INTERVAL cannot have smaller values
|
||||
# than MANAGER_TIMER.
|
||||
#
|
||||
# HOST_MONITORING_INTERVAL: Time in seconds between host monitorization.
|
||||
@ -390,7 +390,7 @@ HM_MAD = [
|
||||
|
||||
#-------------------------------- ebtables Hook---------------------------------
|
||||
# You can use these two hooks to isolate networks at the ethernet level so the
|
||||
# traffic generated in different virtual networks can not be seen in others.
|
||||
# traffic generated in different virtual networks cannot be seen in others.
|
||||
#
|
||||
# All the network configuration will be done in the cluster nodes, these are the
|
||||
# additional requisites:
|
||||
|
@ -54,7 +54,7 @@ setup()
|
||||
start()
|
||||
{
|
||||
if [ ! -f "$OZONES_SERVER" ]; then
|
||||
echo "Can not find $OZONES_SERVER."
|
||||
echo "Cannot find $OZONES_SERVER."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
@ -217,7 +217,7 @@ class OzonesServer < CloudServer
|
||||
vdc = OZones::OpenNebulaVdc.new(id)
|
||||
rc = vdc.destroy
|
||||
rescue => e
|
||||
return [404, OZones::Error.new("Error: Can not delete vdc. " \
|
||||
return [404, OZones::Error.new("Error: Cannot delete vdc. " \
|
||||
"Reason: #{e.message}").to_json]
|
||||
end
|
||||
|
||||
@ -237,7 +237,7 @@ class OzonesServer < CloudServer
|
||||
rc = zone.destroy
|
||||
else
|
||||
return [404,
|
||||
OZones::Error.new("Error: Can not delete " \
|
||||
OZones::Error.new("Error: Cannot delete " \
|
||||
"zone. Reason: zone #{id} not found").to_json]
|
||||
end
|
||||
|
||||
|
@ -172,7 +172,7 @@ int PoolObjectSQL::replace_template(const string& tmpl_str, string& error)
|
||||
|
||||
if ( new_tmpl == 0 )
|
||||
{
|
||||
error = "Can not allocate a new template";
|
||||
error = "Cannot allocate a new template";
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -114,7 +114,7 @@ int RequestManager::setup_socket()
|
||||
{
|
||||
ostringstream oss;
|
||||
|
||||
oss << "Can not open server socket: " << strerror(errno);
|
||||
oss << "Cannot open server socket: " << strerror(errno);
|
||||
NebulaLog::log("ReM",Log::ERROR,oss);
|
||||
|
||||
return -1;
|
||||
@ -126,7 +126,7 @@ int RequestManager::setup_socket()
|
||||
{
|
||||
ostringstream oss;
|
||||
|
||||
oss << "Can not set socket options: " << strerror(errno);
|
||||
oss << "Cannot set socket options: " << strerror(errno);
|
||||
NebulaLog::log("ReM",Log::ERROR,oss);
|
||||
|
||||
close(socket_fd);
|
||||
@ -146,7 +146,7 @@ int RequestManager::setup_socket()
|
||||
{
|
||||
ostringstream oss;
|
||||
|
||||
oss << "Can not bind to port " << port << " : " << strerror(errno);
|
||||
oss << "Cannot bind to port " << port << " : " << strerror(errno);
|
||||
NebulaLog::log("ReM",Log::ERROR,oss);
|
||||
|
||||
close(socket_fd);
|
||||
|
@ -91,7 +91,7 @@ void RequestManagerDelete::request_execute(xmlrpc_c::paramList const& paramList,
|
||||
if ( rc != 0 )
|
||||
{
|
||||
failure_response(INTERNAL,
|
||||
request_error("Can not delete "+object_name(auth_object),error_msg),
|
||||
request_error("Cannot delete "+object_name(auth_object),error_msg),
|
||||
att);
|
||||
return;
|
||||
}
|
||||
@ -163,7 +163,7 @@ int ImageDelete::drop(int oid, PoolObjectSQL * object, string& error_msg)
|
||||
|
||||
if ( ds == 0 )
|
||||
{
|
||||
error_msg = "Datastore no longer exists can not remove image";
|
||||
error_msg = "Datastore no longer exists cannot remove image";
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -199,7 +199,7 @@ int UserDelete::drop(int oid, PoolObjectSQL * object, string& error_msg)
|
||||
|
||||
if (oid == 0)
|
||||
{
|
||||
error_msg = "oneadmin can not be deleted.";
|
||||
error_msg = "oneadmin cannot be deleted.";
|
||||
|
||||
object->unlock();
|
||||
return -1;
|
||||
|
@ -54,7 +54,7 @@ void RequestManagerUpdateTemplate::request_execute(
|
||||
if ( rc != 0 )
|
||||
{
|
||||
failure_response(INTERNAL,
|
||||
request_error("Can not update template",error_str),
|
||||
request_error("Cannot update template",error_str),
|
||||
att);
|
||||
object->unlock();
|
||||
|
||||
|
@ -160,7 +160,7 @@ int RequestManagerVirtualMachine::add_history(VirtualMachine * vm,
|
||||
if ( rc != 0 )
|
||||
{
|
||||
failure_response(INTERNAL,
|
||||
request_error("Can not update virtual machine history",""),
|
||||
request_error("Cannot update virtual machine history",""),
|
||||
att);
|
||||
|
||||
return -1;
|
||||
@ -442,7 +442,7 @@ void VirtualMachineSaveDisk::request_execute(xmlrpc_c::paramList const& paramLis
|
||||
if ( iid_orig == -1 )
|
||||
{
|
||||
failure_response(INTERNAL,
|
||||
request_error("Can not used selected DISK", error_str),
|
||||
request_error("Cannot use selected DISK", error_str),
|
||||
att);
|
||||
return;
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ setup()
|
||||
start()
|
||||
{
|
||||
if [ ! -f "$SUNSTONE_SERVER" ]; then
|
||||
echo "Can not find $SUNSTONE_SERVER."
|
||||
echo "Cannot find $SUNSTONE_SERVER."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
@ -1288,14 +1288,17 @@ void TransferManager::load_mads(int uid)
|
||||
int rc;
|
||||
string name;
|
||||
|
||||
const VectorAttribute * vattr;
|
||||
const VectorAttribute * vattr = 0;
|
||||
TransferManagerDriver * tm_driver = 0;
|
||||
|
||||
oss << "Loading Transfer Manager driver.";
|
||||
|
||||
NebulaLog::log("TM",Log::INFO,oss);
|
||||
|
||||
vattr = static_cast<const VectorAttribute *>(mad_conf[0]);
|
||||
if ( mad_conf.size() > 0 )
|
||||
{
|
||||
vattr = static_cast<const VectorAttribute *>(mad_conf[0]);
|
||||
}
|
||||
|
||||
if ( vattr == 0 )
|
||||
{
|
||||
|
68
src/tm_mad/iscsi/clone
Executable file
68
src/tm_mad/iscsi/clone
Executable file
@ -0,0 +1,68 @@
|
||||
#!/bin/bash
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
# clone fe:SOURCE host:remote_system_ds/disk.i size
|
||||
# - fe is the front-end hostname
|
||||
# - SOURCE is the path of the disk image in the form DS_BASE_PATH/disk
|
||||
# - host is the target host to deploy the VM
|
||||
# - remote_system_ds is the path for the system datastore in the host
|
||||
|
||||
SRC=$1 # iqn.2012-02.org.opennebula:o200.vg-one.lv-one-0
|
||||
DST=$2 # o202:/var/lib/one//datastores/0/0/disk.0
|
||||
|
||||
if [ -z "${ONE_LOCATION}" ]; then
|
||||
TMCOMMON=/var/lib/one/remotes/tm/tm_common.sh
|
||||
else
|
||||
TMCOMMON=$ONE_LOCATION/var/remotes/tm/tm_common.sh
|
||||
fi
|
||||
|
||||
. $TMCOMMON
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Set dst path and dir
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
TARGET=`arg_path $SRC` # o200.vg-one.lv-one-0
|
||||
DST_PATH=`arg_path $DST` # /var/lib/one/datastores/0/0/disk.0
|
||||
DST_HOST=`arg_host $DST` # o202
|
||||
DST_DIR=`dirname $DST_PATH` # /var/lib/one/datastores/0/0
|
||||
|
||||
BASE_IQN=`echo $SRC|$CUT -d: -f1`
|
||||
TARGET=`echo $SRC|$CUT -d: -f2`
|
||||
LV_NAME=`echo $TARGET|$AWK -F. '{print $(NF)}'`
|
||||
VG_NAME=`echo $TARGET|$AWK -F. '{print $(NF-1)}'`
|
||||
DEV="/dev/$VG_NAME/$LV_NAME"
|
||||
|
||||
exit
|
||||
ssh_make_path $DST_HOST $DST_DIR
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Copy files to the remote host
|
||||
#-------------------------------------------------------------------------------
|
||||
case $SRC in
|
||||
http://*)
|
||||
log "Downloading $SRC"
|
||||
RMT_CMD="$WGET -O $DST_PATH $SRC"
|
||||
ssh_exec_and_log "$DST_HOST" "$RMT_CMD" "Error downloading $SRC"
|
||||
;;
|
||||
|
||||
*)
|
||||
log "Cloning $SRC in $DST_PATH"
|
||||
exec_and_log "$SCP $SRC $DST" "Error copying $SRC to $DST"
|
||||
;;
|
||||
esac
|
59
src/tm_mad/iscsi/delete
Executable file
59
src/tm_mad/iscsi/delete
Executable file
@ -0,0 +1,59 @@
|
||||
#!/bin/bash
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
# DELETE <host:remote_system_ds/disk.i|host:remote_system_ds/>
|
||||
# - host is the target host to deploy the VM
|
||||
# - remote_system_ds is the path for the system datastore in the host
|
||||
|
||||
DST=$1 # o202:/var/lib/one//datastores/0/0/disk.0
|
||||
|
||||
if [ -z "${ONE_LOCATION}" ]; then
|
||||
TMCOMMON=/var/lib/one/remotes/tm/tm_common.sh
|
||||
else
|
||||
TMCOMMON=$ONE_LOCATION/var/remotes/tm/tm_common.sh
|
||||
fi
|
||||
|
||||
. $TMCOMMON
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Return if deleting a disk, we will delete them when removing the
|
||||
# remote_system_ds directory for the VM (remotely)
|
||||
#-------------------------------------------------------------------------------
|
||||
DST_PATH=`arg_path $DST`
|
||||
DST_HOST=`arg_host $DST`
|
||||
|
||||
if [ `is_disk $DST_PATH` -eq 1 ]; then
|
||||
# Disk
|
||||
LOGOUT_CMD=$(cat <<EOF
|
||||
set -e
|
||||
IQN=\$(readlink $DST_PATH |grep -o 'iqn.*$')
|
||||
IQN=\${IQN%-lun-1}
|
||||
$SUDO $(iscsiadm_logout "\$IQN")
|
||||
EOF
|
||||
)
|
||||
ssh_exec_and_log "$DST_HOST" "$LOGOUT_CMD" \
|
||||
"Error logging out $IQN"
|
||||
else
|
||||
# Directory
|
||||
log "Deleting $DST_PATH"
|
||||
ssh_exec_and_log "$DST_HOST" "rm -rf $DST_PATH" "Error deleting $DST_PATH"
|
||||
fi
|
||||
|
||||
ssh_exec_and_log "$DST_HOST" "$DISCOVER_CMD" \
|
||||
"Error registering $DST_HOST:$DEV"
|
||||
exit 0
|
80
src/tm_mad/iscsi/ln
Executable file
80
src/tm_mad/iscsi/ln
Executable file
@ -0,0 +1,80 @@
|
||||
#!/bin/bash
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
# clone fe:SOURCE host:remote_system_ds/disk.i size
|
||||
# - fe is the front-end hostname
|
||||
# - SOURCE is the path of the disk image in the form DS_BASE_PATH/disk
|
||||
# - host is the target host to deploy the VM
|
||||
# - remote_system_ds is the path for the system datastore in the host
|
||||
|
||||
SRC=$1 # iqn.2012-02.org.opennebula:o200.vg-one.lv-one-0
|
||||
DST=$2 # o202:/var/lib/one//datastores/0/0/disk.0
|
||||
|
||||
if [ -z "${ONE_LOCATION}" ]; then
|
||||
TMCOMMON=/var/lib/one/remotes/tm/tm_common.sh
|
||||
else
|
||||
TMCOMMON=$ONE_LOCATION/var/remotes/tm/tm_common.sh
|
||||
fi
|
||||
|
||||
. $TMCOMMON
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Set dst path and dir
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
IQN=$SRC
|
||||
TARGET=`arg_path $SRC` # o200.vg-one.lv-one-0
|
||||
DST_PATH=`arg_path $DST` # /var/lib/one/datastores/0/0/disk.0
|
||||
DST_HOST=`arg_host $DST` # o202
|
||||
DST_DIR=`dirname $DST_PATH` # /var/lib/one/datastores/0/0
|
||||
|
||||
BASE_IQN=`echo $IQN|$CUT -d: -f1`
|
||||
TARGET=`echo $IQN|$CUT -d: -f2`
|
||||
LV_NAME=`echo $TARGET|$AWK -F. '{print $(NF)}'`
|
||||
VG_NAME=`echo $TARGET|$AWK -F. '{print $(NF-1)}'`
|
||||
TARGET_DEV="/dev/$VG_NAME/$LV_NAME"
|
||||
TARGET_HOST=${TARGET%.$VG_NAME.$LV_NAME}
|
||||
|
||||
DISCOVER_CMD=$(cat <<EOF
|
||||
set -e
|
||||
mkdir -p $DST_DIR
|
||||
$SUDO $(iscsiadm_discovery "$TARGET_HOST")
|
||||
$SUDO $(iscsiadm_login "$IQN" "$TARGET_HOST")
|
||||
sleep 1
|
||||
DISK_BY_PATH=\$(ls /dev/disk/by-path/*$IQN-lun-1)
|
||||
ln -s "\$DISK_BY_PATH" "$DST_PATH"
|
||||
EOF
|
||||
)
|
||||
|
||||
ssh_exec_and_log "$DST_HOST" "$DISCOVER_CMD" \
|
||||
"Error registering $DST_HOST:$DEV"
|
||||
exit 0
|
||||
|
||||
|
||||
# case $SRC in
|
||||
# http://*)
|
||||
# log "Downloading $SRC"
|
||||
# RMT_CMD="$WGET -O $DST_PATH $SRC"
|
||||
# ssh_exec_and_log "$DST_HOST" "$RMT_CMD" "Error downloading $SRC"
|
||||
# ;;
|
||||
|
||||
# *)
|
||||
# log "Cloning $SRC in $DST_PATH"
|
||||
# exec_and_log "$SCP $SRC $DST" "Error copying $SRC to $DST"
|
||||
# ;;
|
||||
# esac
|
74
src/tm_mad/iscsi/mv
Executable file
74
src/tm_mad/iscsi/mv
Executable file
@ -0,0 +1,74 @@
|
||||
#!/bin/bash
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
# MV <hostA:system_ds/disk.i|hostB:system_ds/disk.i>
|
||||
# <hostA:system_ds/|hostB:system_ds/>
|
||||
# - hostX is the target host to deploy the VM
|
||||
# - system_ds is the path for the system datastore in the host
|
||||
|
||||
SRC=$1
|
||||
DST=$2
|
||||
|
||||
if [ -z "${ONE_LOCATION}" ]; then
|
||||
TMCOMMON=/var/lib/one/remotes/tm/tm_common.sh
|
||||
else
|
||||
TMCOMMON=$ONE_LOCATION/var/remotes/tm/tm_common.sh
|
||||
fi
|
||||
|
||||
. $TMCOMMON
|
||||
|
||||
# {:path=>
|
||||
# "/var/lib/one/remotes/tm/iscsi/mv o202:/var/lib/one//datastores/0/3/disk.0 rama:/var/lib/one/datastores/0/3/disk.0",
|
||||
# :result=>"SUCCESS",
|
||||
# :info=>"-"}
|
||||
|
||||
# {:path=>
|
||||
# "/var/lib/one/remotes/tm/shared/mv o202:/var/lib/one//datastores/0/3 rama:/var/lib/one/datastores/0/3",
|
||||
# :result=>"SUCCESS",
|
||||
# :info=>"-"}
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Return if moving a disk, we will move them when moving the whole system_ds
|
||||
# directory for the VM
|
||||
#-------------------------------------------------------------------------------
|
||||
SRC_PATH=`arg_path $SRC`
|
||||
SRC_HOST=`arg_host $SRC`
|
||||
|
||||
if [ `is_disk $SRC_PATH` -eq 0 ]; then
|
||||
log "Removing directory"
|
||||
ssh_exec_and_log "$SRC_HOST" "rm -rf $SRC_PATH"
|
||||
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$SRC" == "$DST" ]; then
|
||||
log "Not moving $SRC to $DST, they are the same path"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
log "Logging out $IQN"
|
||||
|
||||
LOGOUT_CMD=$(cat <<EOF
|
||||
set -e
|
||||
$SUDO $(iscsiadm_logout "$IQN")
|
||||
EOF
|
||||
)
|
||||
ssh_exec_and_log "$SRC_HOST" "$LOGOUT_CMD" \
|
||||
"Error logging out $IQN"
|
||||
|
||||
exit 0
|
50
src/tm_mad/iscsi/mvds
Executable file
50
src/tm_mad/iscsi/mvds
Executable file
@ -0,0 +1,50 @@
|
||||
#!/bin/bash
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
# mvds host:remote_system_ds/disk.i fe:SOURCE
|
||||
# - fe is the front-end hostname
|
||||
# - SOURCE is the path of the disk image in the form DS_BASE_PATH/disk
|
||||
# - host is the target host to deploy the VM
|
||||
# - remote_system_ds is the path for the system datastore in the host
|
||||
|
||||
SRC=$1 # o202:/var/lib/one//datastores/0/1/disk.0
|
||||
DST=$2 # iqn.2012-02.org.opennebula:o200.vg-one.lv-one-0
|
||||
|
||||
if [ -z "${ONE_LOCATION}" ]; then
|
||||
TMCOMMON=/var/lib/one/remotes/tm/tm_common.sh
|
||||
else
|
||||
TMCOMMON=$ONE_LOCATION/var/remotes/tm/tm_common.sh
|
||||
fi
|
||||
|
||||
. $TMCOMMON
|
||||
|
||||
SRC_HOST=`arg_host $SRC` # o202
|
||||
IQN=$DST
|
||||
|
||||
log "Logging out $IQN"
|
||||
|
||||
LOGOUT_CMD=$(cat <<EOF
|
||||
set -e
|
||||
$SUDO $(iscsiadm_logout "$IQN")
|
||||
EOF
|
||||
)
|
||||
|
||||
ssh_exec_and_log "$SRC_HOST" "$LOGOUT_CMD" \
|
||||
"Error logging out $IQN"
|
||||
|
||||
exit 0
|
@ -24,7 +24,7 @@ function fix_iso {
|
||||
if [ $? -eq 0 ]; then
|
||||
bname=`basename $dst_path`
|
||||
exec_and_log "ln -s $bname $dst_path/$bname.iso" \
|
||||
"Can not link ISO file."
|
||||
"Cannot link ISO file."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
@ -183,7 +183,7 @@ int VirtualMachine::select(SqlDB * db)
|
||||
return 0;
|
||||
|
||||
error_previous_history:
|
||||
ose << "Can not get previous history record (seq:" << history->seq
|
||||
ose << "Cannot get previous history record (seq:" << history->seq
|
||||
<< ") for VM id: " << oid;
|
||||
|
||||
log("ONE", Log::ERROR, ose);
|
||||
@ -380,7 +380,7 @@ int VirtualMachine::parse_context(string& error_str)
|
||||
|
||||
if (str == 0)
|
||||
{
|
||||
NebulaLog::log("ONE",Log::ERROR, "Can not marshall CONTEXT");
|
||||
NebulaLog::log("ONE",Log::ERROR, "Cannot marshall CONTEXT");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -535,9 +535,7 @@ int VirtualMachine::automatic_requirements(string& error_str)
|
||||
ostringstream oss;
|
||||
string requirements;
|
||||
string cluster_id;
|
||||
bool error = false;
|
||||
|
||||
oss << "Incompatible cluster IDs.";
|
||||
string vatt_cluster_id;
|
||||
|
||||
// Get cluster id from all DISK vector attributes
|
||||
|
||||
@ -552,23 +550,16 @@ int VirtualMachine::automatic_requirements(string& error_str)
|
||||
continue;
|
||||
}
|
||||
|
||||
string vatt_cluster_id = vatt->vector_value("CLUSTER_ID");
|
||||
vatt_cluster_id = vatt->vector_value("CLUSTER_ID");
|
||||
|
||||
if ( !vatt_cluster_id.empty() )
|
||||
{
|
||||
oss << endl << "DISK [" << i << "]: IMAGE ["
|
||||
<< vatt->vector_value("IMAGE_ID") << "] from DATASTORE ["
|
||||
<< vatt->vector_value("DATASTORE_ID") << "] requires CLUSTER ["
|
||||
<< vatt_cluster_id << "]";
|
||||
if ( cluster_id != vatt_cluster_id )
|
||||
{
|
||||
goto error;
|
||||
}
|
||||
|
||||
if ( cluster_id.empty() )
|
||||
{
|
||||
cluster_id = vatt_cluster_id;
|
||||
}
|
||||
else if ( cluster_id != vatt_cluster_id )
|
||||
{
|
||||
error = true;
|
||||
}
|
||||
cluster_id = vatt_cluster_id;
|
||||
}
|
||||
}
|
||||
|
||||
@ -586,32 +577,19 @@ int VirtualMachine::automatic_requirements(string& error_str)
|
||||
continue;
|
||||
}
|
||||
|
||||
string vatt_cluster_id = vatt->vector_value("CLUSTER_ID");
|
||||
vatt_cluster_id = vatt->vector_value("CLUSTER_ID");
|
||||
|
||||
if ( !vatt_cluster_id.empty() )
|
||||
{
|
||||
oss << endl << "NIC [" << i << "]: NETWORK ["
|
||||
<< vatt->vector_value("NETWORK_ID") << "] requires CLUSTER ["
|
||||
<< vatt_cluster_id << "]";
|
||||
if ( cluster_id != vatt_cluster_id )
|
||||
{
|
||||
goto error;
|
||||
}
|
||||
|
||||
if ( cluster_id.empty() )
|
||||
{
|
||||
cluster_id = vatt_cluster_id;
|
||||
}
|
||||
else if ( cluster_id != vatt_cluster_id )
|
||||
{
|
||||
error = true;
|
||||
}
|
||||
cluster_id = vatt_cluster_id;
|
||||
}
|
||||
}
|
||||
|
||||
if ( error == true )
|
||||
{
|
||||
error_str = oss.str();
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ( !cluster_id.empty() )
|
||||
{
|
||||
oss.str("");
|
||||
@ -628,6 +606,63 @@ int VirtualMachine::automatic_requirements(string& error_str)
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
|
||||
oss << "Incompatible cluster IDs.";
|
||||
|
||||
// Get cluster id from all DISK vector attributes
|
||||
|
||||
v_attributes.clear();
|
||||
num_vatts = obj_template->get("DISK",v_attributes);
|
||||
|
||||
for(int i=0; i<num_vatts; i++)
|
||||
{
|
||||
vatt = dynamic_cast<VectorAttribute * >(v_attributes[i]);
|
||||
|
||||
if ( vatt == 0 )
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
vatt_cluster_id = vatt->vector_value("CLUSTER_ID");
|
||||
|
||||
if ( !vatt_cluster_id.empty() )
|
||||
{
|
||||
oss << endl << "DISK [" << i << "]: IMAGE ["
|
||||
<< vatt->vector_value("IMAGE_ID") << "] from DATASTORE ["
|
||||
<< vatt->vector_value("DATASTORE_ID") << "] requires CLUSTER ["
|
||||
<< vatt_cluster_id << "]";
|
||||
}
|
||||
}
|
||||
|
||||
// Get cluster id from all NIC vector attributes
|
||||
|
||||
v_attributes.clear();
|
||||
num_vatts = obj_template->get("NIC",v_attributes);
|
||||
|
||||
for(int i=0; i<num_vatts; i++)
|
||||
{
|
||||
vatt = dynamic_cast<VectorAttribute * >(v_attributes[i]);
|
||||
|
||||
if ( vatt == 0 )
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
vatt_cluster_id = vatt->vector_value("CLUSTER_ID");
|
||||
|
||||
if ( !vatt_cluster_id.empty() )
|
||||
{
|
||||
oss << endl << "NIC [" << i << "]: NETWORK ["
|
||||
<< vatt->vector_value("NETWORK_ID") << "] requires CLUSTER ["
|
||||
<< vatt_cluster_id << "]";
|
||||
}
|
||||
}
|
||||
|
||||
error_str = oss.str();
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------ */
|
||||
@ -920,15 +955,15 @@ int VirtualMachine::get_disk_images(string& error_str)
|
||||
return 0;
|
||||
|
||||
error_max_os:
|
||||
error_str = "VM can not use more than one OS image.";
|
||||
error_str = "VM cannot use more than one OS image.";
|
||||
goto error_common;
|
||||
|
||||
error_max_cd:
|
||||
error_str = "VM can not use more than one CDROM image.";
|
||||
error_str = "VM cannot use more than one CDROM image.";
|
||||
goto error_common;
|
||||
|
||||
error_max_db:
|
||||
error_str = "VM can not use more than 10 DATABLOCK images.";
|
||||
error_str = "VM cannot use more than 10 DATABLOCK images.";
|
||||
goto error_common;
|
||||
|
||||
error_common:
|
||||
|
@ -170,7 +170,7 @@ VirtualMachinePool::VirtualMachinePool(SqlDB * db,
|
||||
{
|
||||
ostringstream oss;
|
||||
|
||||
oss << "Unkown VM_HOOK " << on << ". Hook not registered!";
|
||||
oss << "Unknown VM_HOOK " << on << ". Hook not registered!";
|
||||
NebulaLog::log("VM",Log::WARNING,oss);
|
||||
}
|
||||
}
|
||||
|
@ -185,7 +185,7 @@ class EC2Driver < VirtualMachineDriver
|
||||
return unless ec2_info
|
||||
|
||||
if !ec2_value(ec2_info, 'AMI')
|
||||
msg = "Can not find AMI in deployment file"
|
||||
msg = "Cannot find AMI in deployment file"
|
||||
send_message(ACTION[:deploy], RESULT[:failure], id, msg)
|
||||
return
|
||||
end
|
||||
@ -294,7 +294,7 @@ private
|
||||
|
||||
if !local_dfile
|
||||
send_message(ACTION[:deploy],RESULT[:failure],id,
|
||||
"Can not open deployment file #{local_dfile}")
|
||||
"Cannot open deployment file #{local_dfile}")
|
||||
return
|
||||
end
|
||||
|
||||
@ -322,7 +322,7 @@ private
|
||||
ec2 = all_ec2_elements[0]
|
||||
else
|
||||
send_message(ACTION[:deploy],RESULT[:failure],id,
|
||||
"Can not find EC2 element in deployment file "<<
|
||||
"Cannot find EC2 element in deployment file "<<
|
||||
"#{local_dfile} or couldn't find any EC2 site matching "<<
|
||||
"one of the template.")
|
||||
return
|
||||
|
@ -259,7 +259,7 @@ class ExecDriver < VirtualMachineDriver
|
||||
|
||||
if !local_dfile || File.zero?(local_dfile)
|
||||
send_message(ACTION[:deploy],RESULT[:failure],id,
|
||||
"Can not open deployment file #{local_dfile}")
|
||||
"Cannot open deployment file #{local_dfile}")
|
||||
return
|
||||
end
|
||||
|
||||
|
@ -167,7 +167,7 @@ class VMwareDriver
|
||||
# Define the VM
|
||||
dfile = File.dirname(File.dirname(checkpoint)) + "/deployment.0"
|
||||
rescue => e
|
||||
OpenNebula.log_error("Can not open checkpoint #{e.message}")
|
||||
OpenNebula.log_error("Cannot open checkpoint #{e.message}")
|
||||
exit -1
|
||||
end
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user