mirror of
https://github.com/OpenNebula/one.git
synced 2025-03-16 22:50:10 +03:00
Merge branch 'master' into foundation5
Conflicts: src/sunstone/public/js/plugins/templates-tab.js src/sunstone/public/js/sunstone-util.js
This commit is contained in:
commit
0128fbd068
@ -38,16 +38,13 @@ class AclManager : public Callbackable, public ActionListener
|
||||
public:
|
||||
|
||||
/**
|
||||
*
|
||||
* @param _db pointer to the DB
|
||||
* @param zone_id of the Zone
|
||||
* @param is_federation_enabled true is this oned is part of a federation
|
||||
* @param is_federation_slave true is this oned is a federation slave. It
|
||||
* it is true, it will reload periodically rules from the DB
|
||||
* @param is_federation_slave true is this oned is a federation slave. If
|
||||
* it is true, it will reload periodically rules from the DB
|
||||
* @param timer_period period to reload the rules
|
||||
*/
|
||||
AclManager(SqlDB * _db, int zone_id, bool is_federation_enabled,
|
||||
bool is_federation_slave, time_t timer);
|
||||
AclManager(SqlDB * _db, int zone_id, bool is_federation_slave, time_t timer);
|
||||
|
||||
virtual ~AclManager();
|
||||
|
||||
@ -145,6 +142,13 @@ public:
|
||||
*/
|
||||
void del_cid_rules(int cid);
|
||||
|
||||
/**
|
||||
* Deletes rules that apply to this cluster id
|
||||
*
|
||||
* @param zid The zone id
|
||||
*/
|
||||
void del_zid_rules(int zid);
|
||||
|
||||
/**
|
||||
* Deletes all rules that apply to this resource
|
||||
*
|
||||
@ -214,8 +218,7 @@ protected:
|
||||
* from DB)
|
||||
*/
|
||||
AclManager(int _zone_id)
|
||||
:zone_id(_zone_id), db(0),lastOID(0), is_federation_enabled(false),
|
||||
is_federation_slave(false)
|
||||
:zone_id(_zone_id), db(0),lastOID(0), is_federation_slave(false)
|
||||
{
|
||||
pthread_mutex_init(&mutex, 0);
|
||||
};
|
||||
@ -312,6 +315,13 @@ private:
|
||||
long long resource_req,
|
||||
long long resource_mask);
|
||||
|
||||
/**
|
||||
* Deletes all rules that match the zone mask
|
||||
*
|
||||
* @param zone_req Mask to match
|
||||
*/
|
||||
void del_zone_matching_rules(long long zone_req);
|
||||
|
||||
// ----------------------------------------
|
||||
// Local zone
|
||||
// ----------------------------------------
|
||||
@ -420,11 +430,6 @@ private:
|
||||
// Refresh loop thread
|
||||
// ----------------------------------------
|
||||
|
||||
/**
|
||||
* Flag to know if this oned is part of a federation
|
||||
*/
|
||||
bool is_federation_enabled;
|
||||
|
||||
/**
|
||||
* Flag to refresh the cache periodically
|
||||
*/
|
||||
|
@ -170,6 +170,19 @@ public:
|
||||
return vnets.get_collection_copy();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the default reserved capacity for hosts in the cluster. It can be
|
||||
* overridden if defined in the host template.
|
||||
* @param cpu reserved cpu (in percentage)
|
||||
* @param mem reserved mem (in KB)
|
||||
*/
|
||||
void get_reserved_capacity(long long &cpu, long long& mem)
|
||||
{
|
||||
get_template_attribute("RESERVED_CPU", cpu);
|
||||
|
||||
get_template_attribute("RESERVED_MEM", mem);
|
||||
}
|
||||
|
||||
// *************************************************************************
|
||||
// DataBase implementation (Public)
|
||||
// *************************************************************************
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "ObjectCollection.h"
|
||||
#include "User.h"
|
||||
#include "QuotasSQL.h"
|
||||
#include "Template.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
@ -109,6 +110,14 @@ public:
|
||||
return quota.update(oid, db);
|
||||
};
|
||||
|
||||
/**
|
||||
* Factory method for Group templates
|
||||
*/
|
||||
Template * get_new_template() const
|
||||
{
|
||||
return new Template;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
@ -128,9 +137,14 @@ private:
|
||||
{
|
||||
// Allow users in this group to see it
|
||||
group_u = 1;
|
||||
|
||||
obj_template = new Template;
|
||||
};
|
||||
|
||||
virtual ~Group(){};
|
||||
virtual ~Group()
|
||||
{
|
||||
delete obj_template;
|
||||
};
|
||||
|
||||
// *************************************************************************
|
||||
// Attributes (Private)
|
||||
|
@ -157,13 +157,17 @@ public:
|
||||
* @param with_vm_info if monitoring contains VM information
|
||||
* @param lost set of VMs that should be in the host and were not found
|
||||
* @param found VMs running in the host (as expected) and info.
|
||||
* @param reserved_cpu from cluster defaults
|
||||
* @param reserved_mem from cluster defaults
|
||||
* @return 0 on success
|
||||
**/
|
||||
int update_info(Template &tmpl,
|
||||
bool &with_vm_info,
|
||||
set<int> &lost,
|
||||
map<int,string> &found,
|
||||
const set<int> &non_shared_ds);
|
||||
const set<int> &non_shared_ds,
|
||||
long long reserved_cpu,
|
||||
long long reserved_mem);
|
||||
/**
|
||||
* Extracts the DS attributes from the given template
|
||||
* @param parse_str string with values to be parsed
|
||||
@ -268,11 +272,33 @@ public:
|
||||
return last_monitored;
|
||||
};
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
/**
|
||||
* Get the reserved capacity for this host. Parameters will be only updated
|
||||
* if values are defined in the host. Reserved capacity will be subtracted
|
||||
* from the Host total capacity.
|
||||
* @param cpu reserved cpu (in percentage)
|
||||
* @param mem reserved mem (in KB)
|
||||
*/
|
||||
void get_reserved_capacity(long long &cpu, long long& mem)
|
||||
{
|
||||
long long tcpu;
|
||||
long long tmem;
|
||||
|
||||
if (get_template_attribute("RESERVED_CPU", tcpu))
|
||||
{
|
||||
cpu = tcpu;
|
||||
}
|
||||
|
||||
if (get_template_attribute("RESERVED_MEM", tmem))
|
||||
{
|
||||
mem = tmem;
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Share functions. Returns the value associated with each host share
|
||||
// metric
|
||||
// ------------------------------------------------------------------------
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
long long get_share_running_vms()
|
||||
{
|
||||
return host_share.running_vms;
|
||||
|
@ -43,6 +43,7 @@ public:
|
||||
SqlDB * db,
|
||||
const string& __default_type,
|
||||
const string& __default_dev_prefix,
|
||||
const string& __default_cdrom_dev_prefix,
|
||||
vector<const Attribute *>& restricted_attrs,
|
||||
vector<const Attribute *> hook_mads,
|
||||
const string& remotes_location,
|
||||
|
@ -207,7 +207,7 @@ private:
|
||||
|
||||
void attach_success_action(int vid);
|
||||
|
||||
void attach_failure_action(int vid);
|
||||
void attach_failure_action(int vid, bool release_save_as);
|
||||
|
||||
void detach_success_action(int vid);
|
||||
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <pthread.h>
|
||||
|
||||
class HostPool;
|
||||
class ClusterPool;
|
||||
class DatastorePool;
|
||||
class LifeCycleManager;
|
||||
|
||||
@ -53,6 +54,8 @@ private:
|
||||
// Pointers shared by all the MonitorThreads, init by MonitorThreadPool
|
||||
static HostPool * hpool;
|
||||
|
||||
static ClusterPool *cpool;
|
||||
|
||||
static DatastorePool * dspool;
|
||||
|
||||
static LifeCycleManager *lcm;
|
||||
|
@ -363,10 +363,32 @@ public:
|
||||
*/
|
||||
static string version()
|
||||
{
|
||||
return "OpenNebula 4.5.0";
|
||||
return "OpenNebula " + code_version();
|
||||
};
|
||||
|
||||
static string db_version()
|
||||
/**
|
||||
* Returns the version of oned
|
||||
* @return
|
||||
*/
|
||||
static string code_version()
|
||||
{
|
||||
return "4.5.80"; // bump version
|
||||
}
|
||||
|
||||
/**
|
||||
* Version needed for the DB, shared tables
|
||||
* @return
|
||||
*/
|
||||
static string shared_db_version()
|
||||
{
|
||||
return "4.5.0";
|
||||
}
|
||||
|
||||
/**
|
||||
* Version needed for the DB, local tables
|
||||
* @return
|
||||
*/
|
||||
static string local_db_version()
|
||||
{
|
||||
return "4.5.0";
|
||||
}
|
||||
|
@ -310,6 +310,8 @@ public:
|
||||
};
|
||||
|
||||
~ZoneDelete(){};
|
||||
|
||||
int drop(int oid, PoolObjectSQL * object, string& error_msg);
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
@ -104,9 +104,6 @@ protected:
|
||||
|
||||
virtual int edit_resource_provider(
|
||||
Group* group, int zone_id, int cluster_id, string& error_msg) = 0;
|
||||
|
||||
virtual int edit_acl_rules(
|
||||
int group_id, int zone_id, int cluster_id, string& error_msg) = 0;
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
@ -125,9 +122,6 @@ public:
|
||||
|
||||
int edit_resource_provider(
|
||||
Group* group, int zone_id, int cluster_id, string& error_msg);
|
||||
|
||||
int edit_acl_rules(
|
||||
int group_id, int zone_id, int cluster_id, string& error_msg);
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
@ -146,9 +140,6 @@ public:
|
||||
|
||||
int edit_resource_provider(
|
||||
Group* group, int zone_id, int cluster_id, string& error_msg);
|
||||
|
||||
int edit_acl_rules(
|
||||
int group_id, int zone_id, int cluster_id, string& error_msg);
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
@ -239,6 +239,24 @@ public:
|
||||
~ZoneUpdateTemplate(){};
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class GroupUpdateTemplate : public RequestManagerUpdateTemplate
|
||||
{
|
||||
public:
|
||||
GroupUpdateTemplate():
|
||||
RequestManagerUpdateTemplate("GroupUpdateTemplate",
|
||||
"Updates a Group template")
|
||||
{
|
||||
Nebula& nd = Nebula::instance();
|
||||
pool = nd.get_gpool();
|
||||
auth_object = PoolObjectSQL::GROUP;
|
||||
};
|
||||
|
||||
~GroupUpdateTemplate(){};
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
@ -80,18 +80,18 @@ private:
|
||||
static const char * pc_table;
|
||||
|
||||
// DB versioning table
|
||||
static const char * ver_names;
|
||||
static const char * shared_ver_names;
|
||||
|
||||
static const char * ver_bootstrap;
|
||||
static const char * shared_ver_bootstrap;
|
||||
|
||||
static const char * ver_table;
|
||||
static const char * shared_ver_table;
|
||||
|
||||
// DB slave versioning table
|
||||
static const char * slave_ver_names;
|
||||
static const char * local_ver_names;
|
||||
|
||||
static const char * slave_ver_bootstrap;
|
||||
static const char * local_ver_bootstrap;
|
||||
|
||||
static const char * slave_ver_table;
|
||||
static const char * local_ver_table;
|
||||
|
||||
// System attributes table
|
||||
static const char * sys_names;
|
||||
@ -115,18 +115,18 @@ private:
|
||||
bool replace,
|
||||
string& error_str);
|
||||
/**
|
||||
* Bootstraps the database control tables
|
||||
* Bootstraps the database control tables for shared tables
|
||||
*
|
||||
* @return 0 on success
|
||||
*/
|
||||
int bootstrap();
|
||||
int shared_bootstrap();
|
||||
|
||||
/**
|
||||
* Bootstraps the database control tables for a slave DB
|
||||
* Bootstraps the database control tables for a local DB tables
|
||||
*
|
||||
* @return 0 on success
|
||||
*/
|
||||
int slave_bootstrap();
|
||||
int local_bootstrap();
|
||||
|
||||
/**
|
||||
* Callback function for the check_db_version method. Stores the read
|
||||
@ -151,13 +151,23 @@ private:
|
||||
/**
|
||||
* Reads the current DB version.
|
||||
* @param is_federation_slave
|
||||
* @param local_bs boostrap local DB tables
|
||||
* @param shared_bs boostrap shared DB tables
|
||||
*
|
||||
* @return 0 on success,
|
||||
* -1 if there is a version mismatch,
|
||||
* -2 if the DB needs a bootstrap from the master
|
||||
* -3 if the DB needs a bootstrap from the slave
|
||||
* -1 if there is a version mismatch, or replica config error.
|
||||
*/
|
||||
int check_db_version(bool is_federation_slave);
|
||||
int check_db_version(bool is_slave, bool& local_bs, bool& shared_bs);
|
||||
|
||||
/**
|
||||
* check_db_version to check versioning
|
||||
* @param table name of the DB table
|
||||
* @param version target DB version
|
||||
* @return 0 success, -1 upgrade needed, -2 bootstrap needed
|
||||
*/
|
||||
int check_db_version(const string& table,
|
||||
const string& version,
|
||||
string& error);
|
||||
};
|
||||
|
||||
#endif //SYSTEM_DB_H
|
||||
|
@ -112,22 +112,7 @@ public:
|
||||
* @param error_str Returns the error reason, if any
|
||||
* @returns -1 if the password is not valid
|
||||
*/
|
||||
int set_password(const string& passwd, string& error_str)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (pass_is_valid(passwd, error_str))
|
||||
{
|
||||
password = passwd;
|
||||
invalidate_session();
|
||||
}
|
||||
else
|
||||
{
|
||||
rc = -1;
|
||||
}
|
||||
|
||||
return rc;
|
||||
};
|
||||
int set_password(const string& passwd, string& error_str);
|
||||
|
||||
/**
|
||||
* Returns user password
|
||||
|
@ -35,4 +35,4 @@ public:
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
#endif /*IMAGE_TEMPLATE_H_*/
|
||||
#endif /*USER_TEMPLATE_H_*/
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* ------------------------------------------------------------------------ */
|
||||
/* Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs */
|
||||
/* Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs */
|
||||
/* */
|
||||
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
||||
/* not use this file except in compliance with the License. You may obtain */
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs */
|
||||
/* Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs */
|
||||
/* */
|
||||
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
||||
/* not use this file except in compliance with the License. You may obtain */
|
||||
|
92
install.sh
92
install.sh
@ -227,6 +227,8 @@ LIB_DIRS="$LIB_LOCATION/ruby \
|
||||
$LIB_LOCATION/ruby/cloud/marketplace \
|
||||
$LIB_LOCATION/ruby/cloud/CloudAuth \
|
||||
$LIB_LOCATION/ruby/onedb \
|
||||
$LIB_LOCATION/ruby/onedb/shared \
|
||||
$LIB_LOCATION/ruby/onedb/local \
|
||||
$LIB_LOCATION/ruby/vendors \
|
||||
$LIB_LOCATION/ruby/vendors/rbvmomi \
|
||||
$LIB_LOCATION/ruby/vendors/rbvmomi/lib \
|
||||
@ -394,7 +396,9 @@ INSTALL_FILES=(
|
||||
MAD_RUBY_LIB_FILES:$VAR_LOCATION/remotes
|
||||
MAD_SH_LIB_FILES:$LIB_LOCATION/sh
|
||||
MAD_SH_LIB_FILES:$VAR_LOCATION/remotes
|
||||
ONEDB_MIGRATOR_FILES:$LIB_LOCATION/ruby/onedb
|
||||
ONEDB_FILES:$LIB_LOCATION/ruby/onedb
|
||||
ONEDB_SHARED_MIGRATOR_FILES:$LIB_LOCATION/ruby/onedb/shared
|
||||
ONEDB_LOCAL_MIGRATOR_FILES:$LIB_LOCATION/ruby/onedb/local
|
||||
MADS_LIB_FILES:$LIB_LOCATION/mads
|
||||
IM_PROBES_FILES:$VAR_LOCATION/remotes/im
|
||||
IM_PROBES_KVM_FILES:$VAR_LOCATION/remotes/im/kvm.d
|
||||
@ -795,7 +799,8 @@ IM_PROBES_KVM_PROBES_FILES="src/im_mad/remotes/kvm-probes.d/kvm.rb \
|
||||
src/im_mad/remotes/kvm-probes.d/poll.sh \
|
||||
src/im_mad/remotes/kvm-probes.d/name.sh \
|
||||
src/im_mad/remotes/common.d/monitor_ds.sh \
|
||||
src/im_mad/remotes/common.d/version.sh"
|
||||
src/im_mad/remotes/common.d/version.sh \
|
||||
src/im_mad/remotes/common.d/collectd-client-shepherd.sh"
|
||||
|
||||
IM_PROBES_XEN3_FILES="src/im_mad/remotes/xen.d/collectd-client_control.sh \
|
||||
src/im_mad/remotes/xen.d/collectd-client.rb"
|
||||
@ -806,7 +811,8 @@ IM_PROBES_XEN3_PROBES_FILES="src/im_mad/remotes/xen-probes.d/xen.rb \
|
||||
src/im_mad/remotes/xen-probes.d/poll3.sh \
|
||||
src/im_mad/remotes/xen-probes.d/name.sh
|
||||
src/im_mad/remotes/common.d/monitor_ds.sh \
|
||||
src/im_mad/remotes/common.d/version.sh"
|
||||
src/im_mad/remotes/common.d/version.sh \
|
||||
src/im_mad/remotes/common.d/collectd-client-shepherd.sh"
|
||||
|
||||
IM_PROBES_XEN4_FILES="src/im_mad/remotes/xen.d/collectd-client_control.sh \
|
||||
src/im_mad/remotes/xen.d/collectd-client.rb"
|
||||
@ -817,7 +823,8 @@ IM_PROBES_XEN4_PROBES_FILES="src/im_mad/remotes/xen-probes.d/xen.rb \
|
||||
src/im_mad/remotes/xen-probes.d/poll4.sh \
|
||||
src/im_mad/remotes/xen-probes.d/name.sh \
|
||||
src/im_mad/remotes/common.d/monitor_ds.sh \
|
||||
src/im_mad/remotes/common.d/version.sh"
|
||||
src/im_mad/remotes/common.d/version.sh \
|
||||
src/im_mad/remotes/common.d/collectd-client-shepherd.sh"
|
||||
|
||||
IM_PROBES_VMWARE_FILES="src/im_mad/remotes/vmware.d/vmware.rb"
|
||||
|
||||
@ -1038,41 +1045,48 @@ DATASTORE_DRIVER_CEPH_SCRIPTS="src/datastore_mad/remotes/ceph/cp \
|
||||
#-------------------------------------------------------------------------------
|
||||
# Migration scripts for onedb command, to be installed under $LIB_LOCATION
|
||||
#-------------------------------------------------------------------------------
|
||||
ONEDB_MIGRATOR_FILES="src/onedb/2.0_to_2.9.80.rb \
|
||||
src/onedb/2.9.80_to_2.9.85.rb \
|
||||
src/onedb/2.9.85_to_2.9.90.rb \
|
||||
src/onedb/2.9.90_to_3.0.0.rb \
|
||||
src/onedb/3.0.0_to_3.1.0.rb \
|
||||
src/onedb/3.1.0_to_3.1.80.rb \
|
||||
src/onedb/3.1.80_to_3.2.0.rb \
|
||||
src/onedb/3.2.0_to_3.2.1.rb \
|
||||
src/onedb/3.2.1_to_3.3.0.rb \
|
||||
src/onedb/3.3.0_to_3.3.80.rb \
|
||||
src/onedb/3.3.80_to_3.4.0.rb \
|
||||
src/onedb/3.4.0_to_3.4.1.rb \
|
||||
src/onedb/3.4.1_to_3.5.80.rb \
|
||||
src/onedb/3.5.80_to_3.6.0.rb \
|
||||
src/onedb/3.6.0_to_3.7.80.rb \
|
||||
src/onedb/3.7.80_to_3.8.0.rb \
|
||||
src/onedb/3.8.0_to_3.8.1.rb \
|
||||
src/onedb/3.8.1_to_3.8.2.rb \
|
||||
src/onedb/3.8.2_to_3.8.3.rb \
|
||||
src/onedb/3.8.3_to_3.8.4.rb \
|
||||
src/onedb/3.8.4_to_3.8.5.rb \
|
||||
src/onedb/3.8.5_to_3.9.80.rb \
|
||||
src/onedb/3.9.80_to_3.9.90.rb \
|
||||
src/onedb/3.9.90_to_4.0.0.rb \
|
||||
src/onedb/4.0.0_to_4.0.1.rb \
|
||||
src/onedb/4.0.1_to_4.1.80.rb \
|
||||
src/onedb/4.1.80_to_4.2.0.rb \
|
||||
src/onedb/4.2.0_to_4.3.80.rb \
|
||||
src/onedb/4.3.80_to_4.3.85.rb \
|
||||
src/onedb/4.3.85_to_4.3.90.rb \
|
||||
src/onedb/4.3.90_to_4.4.0.rb \
|
||||
src/onedb/fsck.rb \
|
||||
src/onedb/import_slave.rb \
|
||||
src/onedb/onedb.rb \
|
||||
src/onedb/onedb_backend.rb"
|
||||
|
||||
|
||||
ONEDB_FILES="src/onedb/fsck.rb \
|
||||
src/onedb/import_slave.rb \
|
||||
src/onedb/onedb.rb \
|
||||
src/onedb/onedb_backend.rb"
|
||||
|
||||
ONEDB_SHARED_MIGRATOR_FILES="src/onedb/shared/2.0_to_2.9.80.rb \
|
||||
src/onedb/shared/2.9.80_to_2.9.85.rb \
|
||||
src/onedb/shared/2.9.85_to_2.9.90.rb \
|
||||
src/onedb/shared/2.9.90_to_3.0.0.rb \
|
||||
src/onedb/shared/3.0.0_to_3.1.0.rb \
|
||||
src/onedb/shared/3.1.0_to_3.1.80.rb \
|
||||
src/onedb/shared/3.1.80_to_3.2.0.rb \
|
||||
src/onedb/shared/3.2.0_to_3.2.1.rb \
|
||||
src/onedb/shared/3.2.1_to_3.3.0.rb \
|
||||
src/onedb/shared/3.3.0_to_3.3.80.rb \
|
||||
src/onedb/shared/3.3.80_to_3.4.0.rb \
|
||||
src/onedb/shared/3.4.0_to_3.4.1.rb \
|
||||
src/onedb/shared/3.4.1_to_3.5.80.rb \
|
||||
src/onedb/shared/3.5.80_to_3.6.0.rb \
|
||||
src/onedb/shared/3.6.0_to_3.7.80.rb \
|
||||
src/onedb/shared/3.7.80_to_3.8.0.rb \
|
||||
src/onedb/shared/3.8.0_to_3.8.1.rb \
|
||||
src/onedb/shared/3.8.1_to_3.8.2.rb \
|
||||
src/onedb/shared/3.8.2_to_3.8.3.rb \
|
||||
src/onedb/shared/3.8.3_to_3.8.4.rb \
|
||||
src/onedb/shared/3.8.4_to_3.8.5.rb \
|
||||
src/onedb/shared/3.8.5_to_3.9.80.rb \
|
||||
src/onedb/shared/3.9.80_to_3.9.90.rb \
|
||||
src/onedb/shared/3.9.90_to_4.0.0.rb \
|
||||
src/onedb/shared/4.0.0_to_4.0.1.rb \
|
||||
src/onedb/shared/4.0.1_to_4.1.80.rb \
|
||||
src/onedb/shared/4.1.80_to_4.2.0.rb \
|
||||
src/onedb/shared/4.2.0_to_4.3.80.rb \
|
||||
src/onedb/shared/4.3.80_to_4.3.85.rb \
|
||||
src/onedb/shared/4.3.85_to_4.3.90.rb \
|
||||
src/onedb/shared/4.3.90_to_4.4.0.rb \
|
||||
src/onedb/shared/4.4.0_to_4.4.1.rb \
|
||||
src/onedb/shared/4.4.1_to_4.5.80.rb"
|
||||
|
||||
ONEDB_LOCAL_MIGRATOR_FILES=""
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Configuration files for OpenNebula, to be installed under $ETC_LOCATION
|
||||
|
@ -176,11 +176,14 @@ MAC_PREFIX = "02:00"
|
||||
# CDROM Image file holding a CDROM
|
||||
# DATABLOCK Image file holding a datablock,
|
||||
# always created as an empty block
|
||||
#
|
||||
# DEFAULT_DEVICE_PREFIX: This can be set to
|
||||
# hd IDE prefix
|
||||
# sd SCSI
|
||||
# xvd XEN Virtual Disk
|
||||
# vd KVM virtual disk
|
||||
#
|
||||
# DEFAULT_CDROM_DEVICE_PREFIX: Same as above but for CDROM devices.
|
||||
#*******************************************************************************
|
||||
|
||||
#DATASTORE_LOCATION = /var/lib/one/datastores
|
||||
@ -192,6 +195,7 @@ DATASTORE_CAPACITY_CHECK = "yes"
|
||||
DEFAULT_IMAGE_TYPE = "OS"
|
||||
DEFAULT_DEVICE_PREFIX = "hd"
|
||||
|
||||
DEFAULT_CDROM_DEVICE_PREFIX = "hd"
|
||||
|
||||
#*******************************************************************************
|
||||
# Information Driver Configuration
|
||||
@ -689,6 +693,7 @@ IMAGE_RESTRICTED_ATTR = "SOURCE"
|
||||
INHERIT_DATASTORE_ATTR = "CEPH_HOST"
|
||||
INHERIT_DATASTORE_ATTR = "CEPH_SECRET"
|
||||
INHERIT_DATASTORE_ATTR = "CEPH_USER"
|
||||
INHERIT_DATASTORE_ATTR = "RBD_FORMAT"
|
||||
|
||||
INHERIT_DATASTORE_ATTR = "GLUSTER_HOST"
|
||||
INHERIT_DATASTORE_ATTR = "GLUSTER_VOLUME"
|
||||
|
@ -21,7 +21,7 @@ require 'tmpdir'
|
||||
|
||||
|
||||
DEFAULTS={
|
||||
:version => "4.5.0",
|
||||
:version => "4.5.80",
|
||||
:date => Time.now.strftime("%Y-%m-%d"),
|
||||
:dependencies => []
|
||||
}
|
||||
|
@ -16,7 +16,7 @@
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
VERSION=${VERSION:-4.5.0}
|
||||
VERSION=${VERSION:-4.5.80}
|
||||
MAINTAINER=${MAINTAINER:-C12G Labs <support@c12g.com>}
|
||||
LICENSE=${LICENSE:-Apache 2.0}
|
||||
PACKAGE_NAME=${PACKAGE_NAME:-one-context}
|
||||
|
@ -51,11 +51,9 @@ int AclManager::init_cb(void *nil, int num, char **values, char **names)
|
||||
AclManager::AclManager(
|
||||
SqlDB * _db,
|
||||
int _zone_id,
|
||||
bool _is_federation_enabled,
|
||||
bool _is_federation_slave,
|
||||
time_t _timer_period)
|
||||
:zone_id(_zone_id), db(_db), lastOID(-1),
|
||||
is_federation_enabled(_is_federation_enabled),
|
||||
is_federation_slave(_is_federation_slave), timer_period(_timer_period)
|
||||
{
|
||||
ostringstream oss;
|
||||
@ -85,32 +83,25 @@ AclManager::AclManager(
|
||||
string error_str;
|
||||
|
||||
// Users in group USERS can create standard resources
|
||||
// @1 VM+NET+IMAGE+TEMPLATE/* CREATE
|
||||
// @1 VM+NET+IMAGE+TEMPLATE+DOCUMENT/* CREATE #<local-zone>
|
||||
add_rule(AclRule::GROUP_ID |
|
||||
1,
|
||||
AclRule::ALL_ID |
|
||||
PoolObjectSQL::VM |
|
||||
PoolObjectSQL::NET |
|
||||
PoolObjectSQL::IMAGE |
|
||||
PoolObjectSQL::TEMPLATE,
|
||||
AuthRequest::CREATE,
|
||||
AclRule::ALL_ID,
|
||||
error_str);
|
||||
|
||||
// Users in USERS can deploy VMs in any HOST
|
||||
// @1 HOST/* MANAGE
|
||||
add_rule(AclRule::GROUP_ID |
|
||||
1,
|
||||
AclRule::ALL_ID |
|
||||
PoolObjectSQL::HOST,
|
||||
AuthRequest::MANAGE,
|
||||
AclRule::ALL_ID,
|
||||
error_str);
|
||||
|
||||
add_rule(AclRule::ALL_ID,
|
||||
AclRule::ALL_ID |
|
||||
PoolObjectSQL::TEMPLATE |
|
||||
PoolObjectSQL::DOCUMENT,
|
||||
AuthRequest::CREATE,
|
||||
AclRule::INDIVIDUAL_ID |
|
||||
zone_id,
|
||||
error_str);
|
||||
|
||||
// * ZONE/* USE *
|
||||
add_rule(AclRule::ALL_ID,
|
||||
AclRule::ALL_ID |
|
||||
PoolObjectSQL::ZONE,
|
||||
AuthRequest::USE,
|
||||
AclRule::ALL_ID,
|
||||
error_str);
|
||||
}
|
||||
@ -529,11 +520,6 @@ int AclManager::add_rule(long long user, long long resource, long long rights,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!is_federation_enabled)
|
||||
{
|
||||
zone = AclRule::INDIVIDUAL_ID | zone_id;
|
||||
}
|
||||
|
||||
lock();
|
||||
|
||||
if (lastOID == INT_MAX)
|
||||
@ -799,6 +785,18 @@ void AclManager::del_cid_rules(int cid)
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
void AclManager::del_zid_rules(int zid)
|
||||
{
|
||||
long long request = AclRule::INDIVIDUAL_ID | zid;
|
||||
|
||||
// Delete rules that match
|
||||
// __ __/__ __ #zid
|
||||
del_zone_matching_rules(request);
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
void AclManager::del_resource_rules(int oid, PoolObjectSQL::ObjectType obj_type)
|
||||
{
|
||||
long long request = obj_type |
|
||||
@ -876,6 +874,35 @@ void AclManager::del_resource_matching_rules(long long resource_req,
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
void AclManager::del_zone_matching_rules(long long zone_req)
|
||||
{
|
||||
multimap<long long, AclRule *>::iterator it;
|
||||
|
||||
vector<int> oids;
|
||||
vector<int>::iterator oid_it;
|
||||
string error_str;
|
||||
|
||||
lock();
|
||||
|
||||
for ( it = acl_rules.begin(); it != acl_rules.end(); it++ )
|
||||
{
|
||||
if ( it->second->zone == zone_req )
|
||||
{
|
||||
oids.push_back(it->second->oid);
|
||||
}
|
||||
}
|
||||
|
||||
unlock();
|
||||
|
||||
for ( oid_it = oids.begin() ; oid_it < oids.end(); oid_it++ )
|
||||
{
|
||||
del_rule(*oid_it, error_str);
|
||||
}
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
void AclManager::reverse_search(int uid,
|
||||
const set<int>& user_groups,
|
||||
PoolObjectSQL::ObjectType obj_type,
|
||||
|
@ -14,6 +14,8 @@
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
require 'csv'
|
||||
|
||||
module CLIHelper
|
||||
LIST = {
|
||||
:name => "list",
|
||||
@ -23,6 +25,12 @@ module CLIHelper
|
||||
:description => "Selects columns to display with list command"
|
||||
}
|
||||
|
||||
CSV = {
|
||||
:name => "csv",
|
||||
:large => "--csv",
|
||||
:description => "Write table in csv format"
|
||||
}
|
||||
|
||||
#ORDER = {
|
||||
# :name => "order",
|
||||
# :short => "-o x,y,z",
|
||||
@ -56,7 +64,7 @@ module CLIHelper
|
||||
}
|
||||
|
||||
#OPTIONS = [LIST, ORDER, FILTER, HEADER, DELAY]
|
||||
OPTIONS = [LIST, DELAY, FILTER]
|
||||
OPTIONS = [LIST, DELAY, FILTER, CSV]
|
||||
|
||||
# Sets bold font
|
||||
def CLIHelper.scr_bold
|
||||
@ -154,6 +162,8 @@ module CLIHelper
|
||||
class ShowTable
|
||||
require 'yaml'
|
||||
|
||||
attr_reader :default_columns
|
||||
|
||||
def initialize(conf=nil, ext=nil, &block)
|
||||
@columns = Hash.new
|
||||
@default_columns = Array.new
|
||||
@ -241,7 +251,7 @@ module CLIHelper
|
||||
private
|
||||
|
||||
def print_table(data, options)
|
||||
CLIHelper.print_header(header_str)
|
||||
CLIHelper.print_header(header_str) if !options[:csv]
|
||||
data ? print_data(data, options) : puts
|
||||
end
|
||||
|
||||
@ -257,17 +267,24 @@ module CLIHelper
|
||||
end
|
||||
|
||||
begin
|
||||
res_data.each{|l|
|
||||
puts (0..ncolumns-1).collect{ |i|
|
||||
dat=l[i]
|
||||
col=@default_columns[i]
|
||||
if options[:csv]
|
||||
CSV($stdout, :write_headers => true,
|
||||
:headers => @default_columns) do |csv|
|
||||
res_data.each {|l| csv << l }
|
||||
end
|
||||
else
|
||||
res_data.each{|l|
|
||||
puts (0..ncolumns-1).collect{ |i|
|
||||
dat=l[i]
|
||||
col=@default_columns[i]
|
||||
|
||||
str=format_str(col, dat)
|
||||
str=CLIHelper.color_state(str) if i==stat_column
|
||||
str=format_str(col, dat)
|
||||
str=CLIHelper.color_state(str) if i==stat_column
|
||||
|
||||
str
|
||||
}.join(' ').rstrip
|
||||
}
|
||||
str
|
||||
}.join(' ').rstrip
|
||||
}
|
||||
end
|
||||
rescue Errno::EPIPE
|
||||
end
|
||||
end
|
||||
|
@ -100,19 +100,31 @@ EOT
|
||||
:name => 'user',
|
||||
:large => '--user name',
|
||||
:description => 'User name used to connect to OpenNebula',
|
||||
:format => String
|
||||
:format => String,
|
||||
:proc => lambda do |o, options|
|
||||
OneHelper.set_user(o)
|
||||
[0, o]
|
||||
end
|
||||
},
|
||||
{
|
||||
:name => 'password',
|
||||
:large => '--password password',
|
||||
:description => 'Password to authenticate with OpenNebula',
|
||||
:format => String
|
||||
:format => String,
|
||||
:proc => lambda do |o, options|
|
||||
OneHelper.set_password(o)
|
||||
[0, o]
|
||||
end
|
||||
},
|
||||
{
|
||||
:name => 'endpoint',
|
||||
:large => '--endpoint endpoint',
|
||||
:description => 'URL of OpenNebula xmlrpc frontend',
|
||||
:format => String
|
||||
:format => String,
|
||||
:proc => lambda do |o, options|
|
||||
OneHelper.set_endpoint(o)
|
||||
[0, o]
|
||||
end
|
||||
}
|
||||
]
|
||||
|
||||
@ -340,18 +352,31 @@ EOT
|
||||
class OneHelper
|
||||
attr_accessor :client
|
||||
|
||||
def self.get_client(options)
|
||||
if defined?(@@client)
|
||||
def self.get_client(options={}, force=false)
|
||||
if !force && defined?(@@client)
|
||||
@@client
|
||||
else
|
||||
|
||||
secret=nil
|
||||
user=options[:user]
|
||||
password=nil
|
||||
|
||||
if defined?(@@user)
|
||||
user=@@user
|
||||
password=@@password if defined?(@@password)
|
||||
else
|
||||
user=options[:user]
|
||||
end
|
||||
|
||||
if user
|
||||
password=options[:password]||self.get_password
|
||||
password=password||options[:password]||self.get_password
|
||||
secret="#{user}:#{password}"
|
||||
end
|
||||
|
||||
endpoint=options[:endpoint]
|
||||
if defined?(@@endpoint)
|
||||
endpoint=@@endpoint
|
||||
else
|
||||
endpoint=options[:endpoint]
|
||||
end
|
||||
|
||||
@@client=OpenNebula::Client.new(secret, endpoint)
|
||||
end
|
||||
@ -361,10 +386,22 @@ EOT
|
||||
if defined?(@@client)
|
||||
@@client
|
||||
else
|
||||
self.get_client({})
|
||||
self.get_client
|
||||
end
|
||||
end
|
||||
|
||||
def self.set_user(user)
|
||||
@@user=user
|
||||
end
|
||||
|
||||
def self.set_password(password)
|
||||
@@password=password
|
||||
end
|
||||
|
||||
def self.set_endpoint(endpoint)
|
||||
@@endpoint=endpoint
|
||||
end
|
||||
|
||||
if RUBY_VERSION>="1.9.3"
|
||||
require 'io/console'
|
||||
def self.get_password
|
||||
@ -374,6 +411,7 @@ EOT
|
||||
puts
|
||||
|
||||
pass.chop! if pass
|
||||
@@password=pass
|
||||
pass
|
||||
end
|
||||
else
|
||||
@ -381,8 +419,9 @@ EOT
|
||||
def self.get_password
|
||||
print "Password: "
|
||||
system("stty", "-echo")
|
||||
@@password=gets.chop
|
||||
begin
|
||||
return gets.chop
|
||||
return @@password
|
||||
ensure
|
||||
system("stty", "echo")
|
||||
print "\n"
|
||||
@ -397,7 +436,7 @@ EOT
|
||||
end
|
||||
|
||||
def set_client(options)
|
||||
@client=OpenNebulaHelper::OneHelper.get_client(options)
|
||||
@client=OpenNebulaHelper::OneHelper.get_client(options, true)
|
||||
end
|
||||
|
||||
def create_resource(options, &block)
|
||||
@ -565,7 +604,7 @@ EOT
|
||||
end
|
||||
|
||||
def self.name_to_id(name, pool, ename)
|
||||
if ename=="CLUSTER" and name=="ALL"
|
||||
if ename=="CLUSTER" and name.upcase=="ALL"
|
||||
return 0, "ALL"
|
||||
end
|
||||
|
||||
|
@ -87,7 +87,7 @@ class AcctHelper < OpenNebulaHelper::OneHelper
|
||||
:name => "json",
|
||||
:short => "-j",
|
||||
:large => "--json",
|
||||
:description => "Show the resource in xml format"
|
||||
:description => "Show the resource in json format"
|
||||
}
|
||||
|
||||
SPLIT={
|
||||
@ -100,6 +100,10 @@ class AcctHelper < OpenNebulaHelper::OneHelper
|
||||
|
||||
|
||||
ACCT_TABLE = CLIHelper::ShowTable.new("oneacct.yaml", nil) do
|
||||
column :UID, "User ID", :size=>4 do |d|
|
||||
d["UID"]
|
||||
end
|
||||
|
||||
column :VID, "Virtual Machine ID", :size=>4 do |d|
|
||||
d["OID"]
|
||||
end
|
||||
@ -181,4 +185,4 @@ class AcctHelper < OpenNebulaHelper::OneHelper
|
||||
CLIHelper.scr_restore
|
||||
puts
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -41,6 +41,10 @@ class OneGroupHelper < OpenNebulaHelper::OneHelper
|
||||
|
||||
exit_code , msg = group.create_default_acls
|
||||
|
||||
if OpenNebula.is_error?(exit_code)
|
||||
return -1, exit_code.message
|
||||
end
|
||||
|
||||
exit_code.to_i
|
||||
end
|
||||
|
||||
@ -62,21 +66,27 @@ class OneGroupHelper < OpenNebulaHelper::OneHelper
|
||||
def format_pool(options)
|
||||
config_file = self.class.table_conf
|
||||
|
||||
prefix = '/GROUP_POOL/DEFAULT_GROUP_QUOTAS/'
|
||||
group_pool = @group_pool
|
||||
|
||||
quotas = group_pool.get_hash()['GROUP_POOL']['QUOTAS']
|
||||
quotas_hash = Hash.new
|
||||
|
||||
if (!quotas.nil?)
|
||||
quotas = [quotas].flatten
|
||||
|
||||
quotas.each do |q|
|
||||
quotas_hash[q['ID']] = q
|
||||
end
|
||||
end
|
||||
|
||||
table = CLIHelper::ShowTable.new(config_file, self) do
|
||||
def pool_default_quotas(path)
|
||||
@data.dsearch('/GROUP_POOL/DEFAULT_GROUP_QUOTAS/'+path)
|
||||
end
|
||||
|
||||
def quotas
|
||||
if !defined?(@quotas)
|
||||
quotas = @data.dsearch('GROUP_POOL/QUOTAS')
|
||||
@quotas = Hash.new
|
||||
|
||||
if (!quotas.nil?)
|
||||
quotas = [quotas].flatten
|
||||
|
||||
quotas.each do |q|
|
||||
@quotas[q['ID']] = q
|
||||
end
|
||||
end
|
||||
end
|
||||
@quotas
|
||||
end
|
||||
|
||||
column :ID, "ONE identifier for the Group", :size=>4 do |d|
|
||||
d["ID"]
|
||||
end
|
||||
@ -100,11 +110,11 @@ class OneGroupHelper < OpenNebulaHelper::OneHelper
|
||||
|
||||
column :VMS , "Number of VMS", :size=>9 do |d|
|
||||
begin
|
||||
q = quotas_hash[d['ID']]
|
||||
q = quotas[d['ID']]
|
||||
limit = q['VM_QUOTA']['VM']["VMS"]
|
||||
|
||||
if limit == "-1"
|
||||
limit = group_pool["#{prefix}VM_QUOTA/VM/VMS"]
|
||||
limit = pool_default_quotas["#{prefix}VM_QUOTA/VM/VMS"]
|
||||
limit = "0" if limit.nil? || limit == ""
|
||||
end
|
||||
|
||||
@ -117,11 +127,11 @@ class OneGroupHelper < OpenNebulaHelper::OneHelper
|
||||
|
||||
column :MEMORY, "Total memory allocated to user VMs", :size=>17 do |d|
|
||||
begin
|
||||
q = quotas_hash[d['ID']]
|
||||
q = quotas[d['ID']]
|
||||
limit = q['VM_QUOTA']['VM']["MEMORY"]
|
||||
|
||||
if limit == "-1"
|
||||
limit = group_pool["#{prefix}VM_QUOTA/VM/MEMORY"]
|
||||
limit = pool_default_quotas["#{prefix}VM_QUOTA/VM/MEMORY"]
|
||||
limit = "0" if limit.nil? || limit == ""
|
||||
end
|
||||
|
||||
@ -135,11 +145,11 @@ class OneGroupHelper < OpenNebulaHelper::OneHelper
|
||||
|
||||
column :CPU, "Total CPU allocated to user VMs", :size=>11 do |d|
|
||||
begin
|
||||
q = quotas_hash[d['ID']]
|
||||
q = quotas[d['ID']]
|
||||
limit = q['VM_QUOTA']['VM']["CPU"]
|
||||
|
||||
if limit == "-1"
|
||||
limit = group_pool["#{prefix}VM_QUOTA/VM/CPU"]
|
||||
limit = pool_default_quotas["#{prefix}VM_QUOTA/VM/CPU"]
|
||||
limit = "0" if limit.nil? || limit == ""
|
||||
end
|
||||
|
||||
@ -213,6 +223,10 @@ class OneGroupHelper < OpenNebulaHelper::OneHelper
|
||||
puts str % ["NAME", group.name]
|
||||
puts
|
||||
|
||||
CLIHelper.print_header(str_h1 % "GROUP TEMPLATE",false)
|
||||
puts group.template_str
|
||||
puts
|
||||
|
||||
CLIHelper.print_header(str_h1 % "USERS", false)
|
||||
CLIHelper.print_header("%-15s" % ["ID"])
|
||||
group.user_ids.each do |uid|
|
||||
|
@ -307,7 +307,7 @@ class OneHostHelper < OpenNebulaHelper::OneHelper
|
||||
str="#{bar} #{info} "
|
||||
name=host[0..(79-str.length)]
|
||||
str=str+name
|
||||
str=str+" "*(79-str.length)
|
||||
str=str+" "*(80-str.length)
|
||||
|
||||
print "#{str}\r"
|
||||
STDOUT.flush
|
||||
|
@ -1,5 +1,5 @@
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs #
|
||||
# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
@ -43,11 +43,11 @@ class OneZoneHelper < OpenNebulaHelper::OneHelper
|
||||
d["NAME"]
|
||||
end
|
||||
|
||||
column :ENDPOINT, "Endpoint of the Zone", :left, :size=>50 do |d|
|
||||
column :ENDPOINT, "Endpoint of the Zone", :left, :size=>45 do |d|
|
||||
d["TEMPLATE"]['ENDPOINT']
|
||||
end
|
||||
|
||||
default :ID, :NAME, :ENDPOINT
|
||||
default :CURRENT, :ID, :NAME, :ENDPOINT
|
||||
end
|
||||
|
||||
table
|
||||
|
@ -44,9 +44,15 @@ cmd = CommandParser::CmdParser.new(ARGV) do
|
||||
end
|
||||
|
||||
option AcctHelper::ACCT_OPTIONS + CommandParser::OPTIONS +
|
||||
[OpenNebulaHelper::DESCRIBE, CLIHelper::LIST, CLIHelper::CSV] +
|
||||
OpenNebulaHelper::CLIENT_OPTIONS
|
||||
|
||||
main do
|
||||
if options[:describe]
|
||||
AcctHelper::ACCT_TABLE.describe_columns
|
||||
exit(0)
|
||||
end
|
||||
|
||||
filter_flag = (options[:userfilter] || VirtualMachinePool::INFO_ALL)
|
||||
start_time = options[:start_time] ? options[:start_time].to_i : -1
|
||||
end_time = options[:end_time] ? options[:end_time].to_i : -1
|
||||
@ -81,7 +87,10 @@ cmd = CommandParser::CmdParser.new(ARGV) do
|
||||
else
|
||||
order_by = Hash.new
|
||||
order_by[:order_by_1] = 'VM/UID'
|
||||
order_by[:order_by_2] = 'VM/ID' if options[:split]
|
||||
|
||||
if options[:split] && !options[:csv]
|
||||
order_by[:order_by_2] = 'VM/ID'
|
||||
end
|
||||
|
||||
acct_hash = pool.accounting(filter_flag,
|
||||
common_opts.merge(order_by))
|
||||
@ -90,6 +99,21 @@ cmd = CommandParser::CmdParser.new(ARGV) do
|
||||
exit -1
|
||||
end
|
||||
|
||||
if options[:csv]
|
||||
a=Array.new
|
||||
acct_hash.each do |user_id, value|
|
||||
value['HISTORY_RECORDS']['HISTORY'].each do |l|
|
||||
l['UID']=user_id
|
||||
a << l
|
||||
end
|
||||
end
|
||||
|
||||
cols=AcctHelper::ACCT_TABLE.default_columns
|
||||
AcctHelper::ACCT_TABLE.default(:UID, *cols)
|
||||
|
||||
AcctHelper::ACCT_TABLE.show(a, options)
|
||||
exit(0)
|
||||
end
|
||||
|
||||
if ( start_time != -1 or end_time != -1 )
|
||||
AcctHelper.print_start_end_time_header(start_time, end_time)
|
||||
@ -102,13 +126,13 @@ cmd = CommandParser::CmdParser.new(ARGV) do
|
||||
# Use one table for each VM
|
||||
value.each { |vm_id, history_array|
|
||||
array = history_array['HISTORY_RECORDS']['HISTORY']
|
||||
AcctHelper::ACCT_TABLE.show(array)
|
||||
AcctHelper::ACCT_TABLE.show(array, options)
|
||||
puts
|
||||
}
|
||||
else
|
||||
# Use the same table for all the VMs
|
||||
array = value['HISTORY_RECORDS']['HISTORY']
|
||||
AcctHelper::ACCT_TABLE.show(array)
|
||||
AcctHelper::ACCT_TABLE.show(array, options)
|
||||
puts
|
||||
end
|
||||
}
|
||||
|
@ -135,6 +135,24 @@ cmd=CommandParser::CmdParser.new(ARGV) do
|
||||
end
|
||||
end
|
||||
|
||||
update_desc = <<-EOT.unindent
|
||||
Update the template contents. If a path is not provided the editor will
|
||||
be launched to modify the current content.
|
||||
EOT
|
||||
|
||||
command :update, update_desc, :groupid, [:file, nil],
|
||||
:options=>OpenNebulaHelper::APPEND do
|
||||
helper.perform_action(args[0],options,"modified") do |obj|
|
||||
if options[:append]
|
||||
str = OpenNebulaHelper.append_template(args[0], obj, args[1])
|
||||
else
|
||||
str = OpenNebulaHelper.update_template(args[0], obj, args[1])
|
||||
end
|
||||
|
||||
obj.update(str, options[:append])
|
||||
end
|
||||
end
|
||||
|
||||
delete_desc = <<-EOT.unindent
|
||||
Deletes the given Group
|
||||
EOT
|
||||
|
@ -20,7 +20,7 @@ ONE_LOCATION=ENV["ONE_LOCATION"]
|
||||
|
||||
if !ONE_LOCATION
|
||||
RUBY_LIB_LOCATION="/usr/lib/one/ruby"
|
||||
REMOTES_LOCATION="/var/lib/one/remotes"
|
||||
REMOTES_LOCATION="/var/lib/one/remotes/"
|
||||
else
|
||||
RUBY_LIB_LOCATION=ONE_LOCATION+"/lib/ruby"
|
||||
REMOTES_LOCATION=ONE_LOCATION+"/var/remotes/"
|
||||
|
@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env ruby
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs #
|
||||
# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
|
@ -50,7 +50,7 @@ end
|
||||
module CloudClient
|
||||
|
||||
# OpenNebula version
|
||||
VERSION = '4.5.0'
|
||||
VERSION = '4.5.80'
|
||||
|
||||
# #########################################################################
|
||||
# Default location for the authentication file
|
||||
|
@ -88,7 +88,7 @@ module Keypair
|
||||
erb_private_key = rsa_kp
|
||||
erb_public_key = rsa_kp.public_key
|
||||
|
||||
erb_key_fingerprint = Digest::MD5.hexdigest(rsa_kp.to_der)
|
||||
erb_key_fingerprint = Digest::MD5.hexdigest(rsa_kp.to_blob)
|
||||
erb_key_fingerprint.gsub!(/(.{2})(?=.)/, '\1:\2')
|
||||
|
||||
erb_ssh_public_key = erb_public_key.ssh_type <<
|
||||
|
@ -118,18 +118,7 @@ string& Cluster::get_ds_location(string &ds_location)
|
||||
|
||||
int Cluster::add_datastore(int id, Datastore::DatastoreType ds_type, string& error_msg)
|
||||
{
|
||||
if ( id == DatastorePool::SYSTEM_DS_ID )
|
||||
{
|
||||
ostringstream oss;
|
||||
oss << "Datastore "<< DatastorePool::SYSTEM_DS_ID
|
||||
<< " cannot be added to any cluster.";
|
||||
|
||||
error_msg = oss.str();
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
int rc = datastores.add_collection_id(id);
|
||||
int rc = datastores.add_collection_id(id);
|
||||
|
||||
if ( rc < 0 )
|
||||
{
|
||||
|
@ -103,7 +103,7 @@ DatastorePool::DatastorePool(SqlDB * db):
|
||||
GroupPool::ONEADMIN_ID,
|
||||
UserPool::oneadmin_name,
|
||||
GroupPool::ONEADMIN_NAME,
|
||||
0133,
|
||||
0137,
|
||||
ds_tmpl,
|
||||
&rc,
|
||||
ClusterPool::NONE_CLUSTER_ID,
|
||||
@ -137,7 +137,7 @@ DatastorePool::DatastorePool(SqlDB * db):
|
||||
GroupPool::ONEADMIN_ID,
|
||||
UserPool::oneadmin_name,
|
||||
GroupPool::ONEADMIN_NAME,
|
||||
0133,
|
||||
0137,
|
||||
ds_tmpl,
|
||||
&rc,
|
||||
ClusterPool::NONE_CLUSTER_ID,
|
||||
@ -241,14 +241,6 @@ int DatastorePool::drop(PoolObjectSQL * objsql, string& error_msg)
|
||||
|
||||
int rc;
|
||||
|
||||
// Return error if the datastore is a default one.
|
||||
if( datastore->get_oid() < 100 )
|
||||
{
|
||||
error_msg = "System Datastores (ID < 100) cannot be deleted.";
|
||||
NebulaLog::log("DATASTORE", Log::ERROR, error_msg);
|
||||
return -2;
|
||||
}
|
||||
|
||||
if( datastore->get_collection_size() > 0 )
|
||||
{
|
||||
ostringstream oss;
|
||||
|
@ -22,3 +22,14 @@ POOL_NAME=one
|
||||
# temporarily during the create/mkfs processes. This directoy MUST exist,
|
||||
# have enough space and be writeable by 'oneadmin'
|
||||
STAGING_DIR=/var/tmp
|
||||
|
||||
# Default RBD_FORMAT. By default RBD format 1 will be used. Uncomment the
|
||||
# following options to enable support for RBD 2. This value affects all the ceph
|
||||
# datastores, however it can be enabled per ceph datastore using the same
|
||||
# option in the datastore template
|
||||
# RBD_FORMAT=2
|
||||
|
||||
# Extra arguments send to "qemu-img convert". Depending on the qemu-img version
|
||||
# it using "-O rbd" can be either recommended or may cause segfaults. Uncomment
|
||||
# the following line to add "-O rbd" to the qemu-img convert command
|
||||
# QEMU_IMG_CONVERT_ARGS="-O rbd"
|
||||
|
@ -54,6 +54,7 @@ done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/BRIDGE_LIST \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/POOL_NAME \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/STAGING_DIR \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/RBD_FORMAT \
|
||||
/DS_DRIVER_ACTION_DATA/IMAGE/PATH \
|
||||
/DS_DRIVER_ACTION_DATA/IMAGE/SIZE \
|
||||
/DS_DRIVER_ACTION_DATA/IMAGE/TEMPLATE/MD5 \
|
||||
@ -69,6 +70,7 @@ SAFE_DIRS="${XPATH_ELEMENTS[i++]}"
|
||||
BRIDGE_LIST="${XPATH_ELEMENTS[i++]}"
|
||||
POOL_NAME="${XPATH_ELEMENTS[i++]:-$POOL_NAME}"
|
||||
STAGING_DIR="${XPATH_ELEMENTS[i++]:-$STAGING_DIR}"
|
||||
RBD_FORMAT="${XPATH_ELEMENTS[i++]:-$RBD_FORMAT}"
|
||||
SRC="${XPATH_ELEMENTS[i++]}"
|
||||
SIZE="${XPATH_ELEMENTS[i++]}"
|
||||
MD5="${XPATH_ELEMENTS[i++]}"
|
||||
@ -121,8 +123,18 @@ exec_and_log "eval $DUMP | $SSH $DST_HOST $DD of=$TMP_DST bs=64k" \
|
||||
REGISTER_CMD=$(cat <<EOF
|
||||
set -e
|
||||
|
||||
# create rbd
|
||||
$QEMU_IMG convert $TMP_DST rbd:$RBD_SOURCE
|
||||
if [ "$RBD_FORMAT" = "2" ]; then
|
||||
FORMAT=\$($QEMU_IMG info $TMP_DST | grep "^file format:" | awk '{print $3}')
|
||||
|
||||
if [ "\$FORMAT" != "raw" ]; then
|
||||
$QEMU_IMG convert -O raw $TMP_DST $TMP_DST.raw
|
||||
mv $TMP_DST.raw $TMP_DST
|
||||
fi
|
||||
|
||||
$RBD import --format 2 $TMP_DST $RBD_SOURCE
|
||||
else
|
||||
$QEMU_IMG convert $QEMU_IMG_CONVERT_ARGS $TMP_DST rbd:$RBD_SOURCE
|
||||
fi
|
||||
|
||||
# remove original
|
||||
$RM -f $TMP_DST
|
||||
|
@ -96,15 +96,19 @@ REGISTER_CMD=$(cat <<EOF
|
||||
set -e
|
||||
export PATH=/usr/sbin:/sbin:\$PATH
|
||||
|
||||
# create and format
|
||||
$DD if=/dev/zero of=$TMP_DST bs=1 count=1 seek=${SIZE}M
|
||||
$MKFS_CMD
|
||||
if [ "$FSTYPE" = "raw" ]; then
|
||||
$QEMU_IMG create rbd:$RBD_SOURCE ${SIZE}M
|
||||
else
|
||||
# create and format
|
||||
$DD if=/dev/zero of=$TMP_DST bs=1 count=1 seek=${SIZE}M
|
||||
$MKFS_CMD
|
||||
|
||||
# create rbd
|
||||
$QEMU_IMG convert $TMP_DST rbd:$RBD_SOURCE
|
||||
# create rbd
|
||||
$QEMU_IMG convert $QEMU_IMG_CONVERT_ARGS $TMP_DST rbd:$RBD_SOURCE
|
||||
|
||||
# remove original
|
||||
$RM -f $TMP_DST
|
||||
# remove original
|
||||
$RM -f $TMP_DST
|
||||
fi
|
||||
EOF
|
||||
)
|
||||
|
||||
|
@ -188,8 +188,10 @@ esac
|
||||
file_type=$(get_type "$command")
|
||||
decompressor=$(get_decompressor "$file_type")
|
||||
|
||||
# Note: the 'cat' at the end of this pipeline forces the pipe to wait until
|
||||
# all the 'tee' processes are finished
|
||||
$command | tee >( decompress "$decompressor" "$TO" ) \
|
||||
>( hasher $HASH_TYPE ) >/dev/null
|
||||
>( hasher $HASH_TYPE ) | cat >/dev/null
|
||||
|
||||
if [ "$?" != "0" ]; then
|
||||
echo "Error copying" >&2
|
||||
|
@ -214,6 +214,7 @@ string& Group::to_xml_extended(string& xml, bool extended) const
|
||||
{
|
||||
ostringstream oss;
|
||||
string collection_xml;
|
||||
string template_xml;
|
||||
|
||||
set<pair<int,int> >::const_iterator it;
|
||||
|
||||
@ -221,8 +222,9 @@ string& Group::to_xml_extended(string& xml, bool extended) const
|
||||
|
||||
oss <<
|
||||
"<GROUP>" <<
|
||||
"<ID>" << oid << "</ID>" <<
|
||||
"<NAME>" << name << "</NAME>" <<
|
||||
"<ID>" << oid << "</ID>" <<
|
||||
"<NAME>" << name << "</NAME>" <<
|
||||
obj_template->to_xml(template_xml) <<
|
||||
collection_xml;
|
||||
|
||||
for (it = providers.begin(); it != providers.end(); it++)
|
||||
@ -285,6 +287,19 @@ int Group::from_xml(const string& xml)
|
||||
ObjectXML::free_nodes(content);
|
||||
content.clear();
|
||||
|
||||
// Get associated metadata for the group
|
||||
ObjectXML::get_nodes("/GROUP/TEMPLATE", content);
|
||||
|
||||
if (content.empty())
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
rc += obj_template->from_xml_node(content[0]);
|
||||
|
||||
ObjectXML::free_nodes(content);
|
||||
content.clear();
|
||||
|
||||
// Set of resource providers
|
||||
ObjectXML::get_nodes("/GROUP/RESOURCE_PROVIDER", content);
|
||||
|
||||
@ -317,6 +332,11 @@ int Group::from_xml(const string& xml)
|
||||
|
||||
int Group::add_resource_provider(int zone_id, int cluster_id, string& error_msg)
|
||||
{
|
||||
AclManager* aclm = Nebula::instance().get_aclm();
|
||||
|
||||
int rc = 0;
|
||||
long long mask_prefix;
|
||||
|
||||
pair<set<pair<int, int> >::iterator,bool> ret;
|
||||
|
||||
ret = providers.insert(pair<int,int>(zone_id, cluster_id));
|
||||
@ -327,6 +347,56 @@ int Group::add_resource_provider(int zone_id, int cluster_id, string& error_msg)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (cluster_id == ClusterPool::ALL_RESOURCES)
|
||||
{
|
||||
mask_prefix = AclRule::ALL_ID;
|
||||
}
|
||||
else
|
||||
{
|
||||
mask_prefix = AclRule::CLUSTER_ID | cluster_id;
|
||||
}
|
||||
|
||||
// @<gid> HOST/%<cid> MANAGE #<zone>
|
||||
rc += aclm->add_rule(
|
||||
AclRule::GROUP_ID |
|
||||
oid,
|
||||
|
||||
mask_prefix |
|
||||
PoolObjectSQL::HOST,
|
||||
|
||||
AuthRequest::MANAGE,
|
||||
|
||||
AclRule::INDIVIDUAL_ID |
|
||||
zone_id,
|
||||
|
||||
error_msg);
|
||||
|
||||
if (rc < 0)
|
||||
{
|
||||
NebulaLog::log("GROUP",Log::ERROR,error_msg);
|
||||
}
|
||||
|
||||
// @<gid> DATASTORE+NET/%<cid> USE #<zone>
|
||||
rc += aclm->add_rule(
|
||||
AclRule::GROUP_ID |
|
||||
oid,
|
||||
|
||||
mask_prefix |
|
||||
PoolObjectSQL::DATASTORE |
|
||||
PoolObjectSQL::NET,
|
||||
|
||||
AuthRequest::USE,
|
||||
|
||||
AclRule::INDIVIDUAL_ID |
|
||||
zone_id,
|
||||
|
||||
error_msg);
|
||||
|
||||
if (rc < 0)
|
||||
{
|
||||
NebulaLog::log("GROUP",Log::ERROR,error_msg);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -335,11 +405,68 @@ int Group::add_resource_provider(int zone_id, int cluster_id, string& error_msg)
|
||||
|
||||
int Group::del_resource_provider(int zone_id, int cluster_id, string& error_msg)
|
||||
{
|
||||
AclManager* aclm = Nebula::instance().get_aclm();
|
||||
|
||||
int rc = 0;
|
||||
|
||||
long long mask_prefix;
|
||||
|
||||
if( providers.erase(pair<int,int>(zone_id, cluster_id)) != 1 )
|
||||
{
|
||||
error_msg = "Resource provider is not assigned to this group";
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (cluster_id == ClusterPool::ALL_RESOURCES)
|
||||
{
|
||||
mask_prefix = AclRule::ALL_ID;
|
||||
}
|
||||
else
|
||||
{
|
||||
mask_prefix = AclRule::CLUSTER_ID | cluster_id;
|
||||
}
|
||||
|
||||
// @<gid> HOST/%<cid> MANAGE #<zid>
|
||||
rc += aclm->del_rule(
|
||||
AclRule::GROUP_ID |
|
||||
oid,
|
||||
|
||||
mask_prefix |
|
||||
PoolObjectSQL::HOST,
|
||||
|
||||
AuthRequest::MANAGE,
|
||||
|
||||
AclRule::INDIVIDUAL_ID |
|
||||
zone_id,
|
||||
|
||||
error_msg);
|
||||
|
||||
if (rc < 0)
|
||||
{
|
||||
NebulaLog::log("GROUP",Log::ERROR,error_msg);
|
||||
}
|
||||
|
||||
// @<gid> DATASTORE+NET/%<cid> USE #<zid>
|
||||
rc += aclm->del_rule(
|
||||
AclRule::GROUP_ID |
|
||||
oid,
|
||||
|
||||
mask_prefix |
|
||||
PoolObjectSQL::DATASTORE |
|
||||
PoolObjectSQL::NET,
|
||||
|
||||
AuthRequest::USE,
|
||||
|
||||
AclRule::INDIVIDUAL_ID |
|
||||
zone_id,
|
||||
|
||||
error_msg);
|
||||
|
||||
if (rc < 0)
|
||||
{
|
||||
NebulaLog::log("GROUP",Log::ERROR,error_msg);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -77,6 +77,12 @@ GroupPool::GroupPool(SqlDB * db,
|
||||
goto error_groups;
|
||||
}
|
||||
|
||||
group = get(rc, true);
|
||||
|
||||
group->add_resource_provider(Nebula::instance().get_zone_id(), ClusterPool::ALL_RESOURCES, error_str);
|
||||
|
||||
group->unlock();
|
||||
|
||||
set_update_lastOID(99);
|
||||
}
|
||||
|
||||
|
@ -238,7 +238,9 @@ int Host::update_info(Template &tmpl,
|
||||
bool &with_vm_info,
|
||||
set<int> &lost,
|
||||
map<int,string> &found,
|
||||
const set<int> &non_shared_ds)
|
||||
const set<int> &non_shared_ds,
|
||||
long long reserved_cpu,
|
||||
long long reserved_mem)
|
||||
{
|
||||
VectorAttribute* vatt;
|
||||
vector<Attribute*>::iterator it;
|
||||
@ -289,10 +291,12 @@ int Host::update_info(Template &tmpl,
|
||||
|
||||
if (isEnabled())
|
||||
{
|
||||
get_reserved_capacity(reserved_cpu, reserved_mem);
|
||||
|
||||
erase_template_attribute("TOTALCPU", val);
|
||||
host_share.max_cpu = val;
|
||||
host_share.max_cpu = val - reserved_cpu;
|
||||
erase_template_attribute("TOTALMEMORY", val);
|
||||
host_share.max_mem = val;
|
||||
host_share.max_mem = val - reserved_mem;
|
||||
erase_template_attribute("DS_LOCATION_TOTAL_MB", val);
|
||||
host_share.max_disk = val;
|
||||
|
||||
|
@ -36,6 +36,8 @@ LifeCycleManager * MonitorThread::lcm;
|
||||
|
||||
MonitorThreadPool * MonitorThread::mthpool;
|
||||
|
||||
ClusterPool * MonitorThread::cpool;
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
@ -94,7 +96,7 @@ void MonitorThread::do_message()
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Get DS Information from Moniroting Information
|
||||
// Get DS Information from Moniroting Information & Reserved Capacity
|
||||
// -------------------------------------------------------------------------
|
||||
map<int,const VectorAttribute*> datastores;
|
||||
map<int, const VectorAttribute*>::iterator itm;
|
||||
@ -104,7 +106,13 @@ void MonitorThread::do_message()
|
||||
|
||||
set<int> non_shared_ds;
|
||||
|
||||
int rc = host->extract_ds_info(*hinfo, tmpl, datastores);
|
||||
int rc = host->extract_ds_info(*hinfo, tmpl, datastores);
|
||||
|
||||
int cid = host->get_cluster_id();
|
||||
|
||||
long long reserved_cpu = 0;
|
||||
|
||||
long long reserved_mem = 0;
|
||||
|
||||
delete hinfo;
|
||||
|
||||
@ -115,6 +123,18 @@ void MonitorThread::do_message()
|
||||
return;
|
||||
}
|
||||
|
||||
if (cid != -1)
|
||||
{
|
||||
Cluster *cluster = cpool->get(cid, true);
|
||||
|
||||
if (cluster != 0)
|
||||
{
|
||||
cluster->get_reserved_capacity(reserved_cpu, reserved_mem);
|
||||
|
||||
cluster->unlock();
|
||||
}
|
||||
}
|
||||
|
||||
for (itm = datastores.begin(); itm != datastores.end(); itm++)
|
||||
{
|
||||
ds = dspool->get(itm->first, true);
|
||||
@ -170,7 +190,8 @@ void MonitorThread::do_message()
|
||||
return;
|
||||
}
|
||||
|
||||
rc = host->update_info(tmpl, vm_poll, lost, found, non_shared_ds);
|
||||
rc = host->update_info(tmpl, vm_poll, lost, found, non_shared_ds,
|
||||
reserved_cpu, reserved_mem);
|
||||
|
||||
hpool->update(host);
|
||||
|
||||
@ -220,6 +241,8 @@ MonitorThreadPool::MonitorThreadPool(int max_thr):concurrent_threads(max_thr),
|
||||
|
||||
MonitorThread::lcm = Nebula::instance().get_lcm();
|
||||
|
||||
MonitorThread::cpool = Nebula::instance().get_clpool();
|
||||
|
||||
MonitorThread::mthpool= this;
|
||||
|
||||
//Initialize concurrency variables
|
||||
|
@ -1 +1 @@
|
||||
4.5.0
|
||||
4.5.80
|
29
src/im_mad/remotes/common.d/collectd-client-shepherd.sh
Executable file
29
src/im_mad/remotes/common.d/collectd-client-shepherd.sh
Executable file
@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
(
|
||||
|
||||
running_pid=$(cat /tmp/one-collectd-client.pid)
|
||||
pids=$(ps axuwww | grep /collectd-client.rb | grep -v grep | awk '{ print $2 }' | grep -v "^${running_pid}$")
|
||||
|
||||
if [ -n "$pids" ]; then
|
||||
kill -6 $pids
|
||||
fi
|
||||
|
||||
) > /dev/null
|
||||
|
@ -36,6 +36,7 @@ ImagePool::ImagePool(
|
||||
SqlDB * db,
|
||||
const string& __default_type,
|
||||
const string& __default_dev_prefix,
|
||||
const string& __default_cdrom_dev_prefix,
|
||||
vector<const Attribute *>& restricted_attrs,
|
||||
vector<const Attribute *> hook_mads,
|
||||
const string& remotes_location,
|
||||
@ -47,7 +48,7 @@ ImagePool::ImagePool(
|
||||
_default_type = __default_type;
|
||||
_default_dev_prefix = __default_dev_prefix;
|
||||
|
||||
_default_cdrom_dev_prefix = "hd";
|
||||
_default_cdrom_dev_prefix = __default_cdrom_dev_prefix;
|
||||
|
||||
// Init inherit attributes
|
||||
vector<const Attribute *>::const_iterator it;
|
||||
|
@ -356,7 +356,7 @@ void LifeCycleManager::do_action(const string &action, void * arg)
|
||||
}
|
||||
else if (action == "ATTACH_FAILURE")
|
||||
{
|
||||
attach_failure_action(vid);
|
||||
attach_failure_action(vid, false);
|
||||
}
|
||||
else if (action == "DETACH_SUCCESS")
|
||||
{
|
||||
|
@ -1340,7 +1340,7 @@ void LifeCycleManager::attach_success_action(int vid)
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
void LifeCycleManager::attach_failure_action(int vid)
|
||||
void LifeCycleManager::attach_failure_action(int vid, bool release_save_as)
|
||||
{
|
||||
VirtualMachine * vm;
|
||||
VectorAttribute * disk;
|
||||
@ -1385,6 +1385,17 @@ void LifeCycleManager::attach_failure_action(int vid)
|
||||
Quotas::quota_del(Quotas::IMAGE, uid, gid, &tmpl);
|
||||
|
||||
imagem->release_image(oid, image_id, false);
|
||||
|
||||
// Release non-persistent images in the detach event
|
||||
if (release_save_as)
|
||||
{
|
||||
int save_as_id;
|
||||
|
||||
if ( disk->vector_value("SAVE_AS", save_as_id) == 0 )
|
||||
{
|
||||
imagem->release_image(oid, save_as_id, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
else // Volatile disk
|
||||
{
|
||||
@ -1408,7 +1419,7 @@ void LifeCycleManager::attach_failure_action(int vid)
|
||||
|
||||
void LifeCycleManager::detach_success_action(int vid)
|
||||
{
|
||||
attach_failure_action(vid);
|
||||
attach_failure_action(vid, true);
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
@ -300,66 +300,68 @@ void Nebula::start(bool bootstrap_only)
|
||||
// Prepare the SystemDB and check versions
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
bool local_bootstrap;
|
||||
bool shared_bootstrap;
|
||||
|
||||
NebulaLog::log("ONE",Log::INFO,"Checking database version.");
|
||||
|
||||
system_db = new SystemDB(db);
|
||||
|
||||
rc = system_db->check_db_version(is_federation_slave());
|
||||
|
||||
rc = system_db->check_db_version(is_federation_slave(),
|
||||
local_bootstrap,
|
||||
shared_bootstrap);
|
||||
if( rc == -1 )
|
||||
{
|
||||
throw runtime_error("Database version mismatch.");
|
||||
throw runtime_error("Database version mismatch. Check oned.log.");
|
||||
}
|
||||
|
||||
if( is_federation_slave() && rc == -2 )
|
||||
{
|
||||
throw runtime_error(
|
||||
"Either the database was not bootstrapped by the "
|
||||
"federation master, or the replication was "
|
||||
"not configured.");
|
||||
}
|
||||
rc = 0;
|
||||
|
||||
if( rc == -2 || rc == -3 )
|
||||
if (local_bootstrap)
|
||||
{
|
||||
rc = 0;
|
||||
|
||||
NebulaLog::log("ONE",Log::INFO,
|
||||
"Bootstrapping OpenNebula database.");
|
||||
"Bootstrapping OpenNebula database, stage 1.");
|
||||
|
||||
rc += VirtualMachinePool::bootstrap(db);
|
||||
rc += HostPool::bootstrap(db);
|
||||
rc += VirtualNetworkPool::bootstrap(db);
|
||||
rc += GroupPool::bootstrap(db);
|
||||
rc += UserPool::bootstrap(db);
|
||||
rc += ImagePool::bootstrap(db);
|
||||
rc += VMTemplatePool::bootstrap(db);
|
||||
rc += AclManager::bootstrap(db);
|
||||
rc += DatastorePool::bootstrap(db);
|
||||
rc += ClusterPool::bootstrap(db);
|
||||
rc += DocumentPool::bootstrap(db);
|
||||
|
||||
// Create the system tables only if bootstrap went well
|
||||
if (rc == 0)
|
||||
{
|
||||
rc += system_db->local_bootstrap();
|
||||
}
|
||||
}
|
||||
|
||||
if (shared_bootstrap)
|
||||
{
|
||||
NebulaLog::log("ONE",Log::INFO,
|
||||
"Bootstrapping OpenNebula database, stage 2.");
|
||||
|
||||
rc += GroupPool::bootstrap(db);
|
||||
rc += UserPool::bootstrap(db);
|
||||
rc += AclManager::bootstrap(db);
|
||||
rc += ZonePool::bootstrap(db);
|
||||
|
||||
// Create the system tables only if bootstrap went well
|
||||
if ( rc == 0 )
|
||||
{
|
||||
if (is_federation_slave())
|
||||
{
|
||||
rc += system_db->slave_bootstrap();
|
||||
}
|
||||
else
|
||||
{
|
||||
rc += system_db->bootstrap();
|
||||
}
|
||||
rc += system_db->shared_bootstrap();
|
||||
}
|
||||
|
||||
// Insert default system attributes
|
||||
rc += default_user_quota.insert();
|
||||
rc += default_group_quota.insert();
|
||||
}
|
||||
|
||||
if ( rc != 0 )
|
||||
{
|
||||
throw runtime_error("Error bootstrapping database.");
|
||||
}
|
||||
if ( rc != 0 )
|
||||
{
|
||||
throw runtime_error("Error bootstrapping database.");
|
||||
}
|
||||
}
|
||||
catch (exception&)
|
||||
@ -377,6 +379,59 @@ void Nebula::start(bool bootstrap_only)
|
||||
return;
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------
|
||||
// Close stds, we no longer need them
|
||||
// -----------------------------------------------------------
|
||||
|
||||
fd = open("/dev/null", O_RDWR);
|
||||
|
||||
dup2(fd,0);
|
||||
dup2(fd,1);
|
||||
dup2(fd,2);
|
||||
|
||||
close(fd);
|
||||
|
||||
fcntl(0,F_SETFD,0); // Keep them open across exec funcs
|
||||
fcntl(1,F_SETFD,0);
|
||||
fcntl(2,F_SETFD,0);
|
||||
|
||||
// -----------------------------------------------------------
|
||||
// Block all signals before creating any Nebula thread
|
||||
// -----------------------------------------------------------
|
||||
|
||||
sigfillset(&mask);
|
||||
|
||||
pthread_sigmask(SIG_BLOCK, &mask, NULL);
|
||||
|
||||
// -----------------------------------------------------------
|
||||
//Managers
|
||||
// -----------------------------------------------------------
|
||||
|
||||
MadManager::mad_manager_system_init();
|
||||
|
||||
time_t timer_period;
|
||||
time_t monitor_period;
|
||||
|
||||
nebula_configuration->get("MANAGER_TIMER", timer_period);
|
||||
nebula_configuration->get("MONITORING_INTERVAL", monitor_period);
|
||||
|
||||
// ---- ACL Manager ----
|
||||
try
|
||||
{
|
||||
aclm = new AclManager(db, zone_id, is_federation_slave(), timer_period);
|
||||
}
|
||||
catch (bad_alloc&)
|
||||
{
|
||||
throw;
|
||||
}
|
||||
|
||||
rc = aclm->start();
|
||||
|
||||
if ( rc != 0 )
|
||||
{
|
||||
throw runtime_error("Could not start the ACL Manager");
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------
|
||||
// Pools
|
||||
// -----------------------------------------------------------
|
||||
@ -387,6 +442,7 @@ void Nebula::start(bool bootstrap_only)
|
||||
string mac_prefix;
|
||||
string default_image_type;
|
||||
string default_device_prefix;
|
||||
string default_cdrom_device_prefix;
|
||||
|
||||
time_t expiration_time;
|
||||
time_t vm_expiration;
|
||||
@ -465,10 +521,12 @@ void Nebula::start(bool bootstrap_only)
|
||||
nebula_configuration->get("DEFAULT_IMAGE_TYPE", default_image_type);
|
||||
nebula_configuration->get("DEFAULT_DEVICE_PREFIX",
|
||||
default_device_prefix);
|
||||
|
||||
nebula_configuration->get("DEFAULT_CDROM_DEVICE_PREFIX",
|
||||
default_cdrom_device_prefix);
|
||||
ipool = new ImagePool(db,
|
||||
default_image_type,
|
||||
default_device_prefix,
|
||||
default_cdrom_device_prefix,
|
||||
img_restricted_attrs,
|
||||
image_hooks,
|
||||
remotes_location,
|
||||
@ -487,41 +545,6 @@ void Nebula::start(bool bootstrap_only)
|
||||
throw;
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------
|
||||
// Close stds, we no longer need them
|
||||
// -----------------------------------------------------------
|
||||
|
||||
fd = open("/dev/null", O_RDWR);
|
||||
|
||||
dup2(fd,0);
|
||||
dup2(fd,1);
|
||||
dup2(fd,2);
|
||||
|
||||
close(fd);
|
||||
|
||||
fcntl(0,F_SETFD,0); // Keep them open across exec funcs
|
||||
fcntl(1,F_SETFD,0);
|
||||
fcntl(2,F_SETFD,0);
|
||||
|
||||
// -----------------------------------------------------------
|
||||
// Block all signals before creating any Nebula thread
|
||||
// -----------------------------------------------------------
|
||||
|
||||
sigfillset(&mask);
|
||||
|
||||
pthread_sigmask(SIG_BLOCK, &mask, NULL);
|
||||
|
||||
// -----------------------------------------------------------
|
||||
//Managers
|
||||
// -----------------------------------------------------------
|
||||
|
||||
MadManager::mad_manager_system_init();
|
||||
|
||||
time_t timer_period;
|
||||
time_t monitor_period;
|
||||
|
||||
nebula_configuration->get("MANAGER_TIMER", timer_period);
|
||||
nebula_configuration->get("MONITORING_INTERVAL", monitor_period);
|
||||
|
||||
// ---- Virtual Machine Manager ----
|
||||
try
|
||||
@ -698,24 +721,6 @@ void Nebula::start(bool bootstrap_only)
|
||||
}
|
||||
}
|
||||
|
||||
// ---- ACL Manager ----
|
||||
try
|
||||
{
|
||||
aclm = new AclManager(db, zone_id, is_federation_enabled(),
|
||||
is_federation_slave(), timer_period);
|
||||
}
|
||||
catch (bad_alloc&)
|
||||
{
|
||||
throw;
|
||||
}
|
||||
|
||||
rc = aclm->start();
|
||||
|
||||
if ( rc != 0 )
|
||||
{
|
||||
throw runtime_error("Could not start the ACL Manager");
|
||||
}
|
||||
|
||||
// ---- Image Manager ----
|
||||
try
|
||||
{
|
||||
|
@ -282,6 +282,7 @@ void OpenNebulaTemplate::set_conf_default()
|
||||
# DATASTORE_CAPACITY_CHECK
|
||||
# DEFAULT_IMAGE_TYPE
|
||||
# DEFAULT_DEVICE_PREFIX
|
||||
# DEFAULT_CDROM_DEVICE_PREFIX
|
||||
#*******************************************************************************
|
||||
*/
|
||||
//DATASTORE_LOCATION
|
||||
@ -311,6 +312,10 @@ void OpenNebulaTemplate::set_conf_default()
|
||||
|
||||
attribute = new SingleAttribute("DEFAULT_DEVICE_PREFIX",value);
|
||||
conf_default.insert(make_pair(attribute->name(),attribute));
|
||||
|
||||
//DEFAULT_CDROM_DEVICE_PREFIX
|
||||
attribute = new SingleAttribute("DEFAULT_CDROM_DEVICE_PREFIX",value);
|
||||
conf_default.insert(make_pair(attribute->name(),attribute));
|
||||
/*
|
||||
#*******************************************************************************
|
||||
# Auth Manager Configuration
|
||||
|
@ -30,21 +30,21 @@ const char * SystemDB::pc_bootstrap = "CREATE TABLE pool_control "
|
||||
"(tablename VARCHAR(32) PRIMARY KEY, last_oid BIGINT UNSIGNED)";
|
||||
|
||||
|
||||
// DB versioning table
|
||||
const char * SystemDB::ver_table = "db_versioning";
|
||||
const char * SystemDB::ver_names = "oid, version, timestamp, comment";
|
||||
// DB versioning table, shared (federation) tables
|
||||
const char * SystemDB::shared_ver_table = "db_versioning";
|
||||
const char * SystemDB::shared_ver_names = "oid, version, timestamp, comment";
|
||||
|
||||
const char * SystemDB::ver_bootstrap = "CREATE TABLE db_versioning "
|
||||
const char * SystemDB::shared_ver_bootstrap = "CREATE TABLE db_versioning "
|
||||
"(oid INTEGER PRIMARY KEY, version VARCHAR(256), timestamp INTEGER, "
|
||||
"comment VARCHAR(256))";
|
||||
|
||||
// DB slave versioning table
|
||||
const char * SystemDB::slave_ver_table = "slave_db_versioning";
|
||||
const char * SystemDB::slave_ver_names = "oid, version, timestamp, comment";
|
||||
// DB versioning table, local tables
|
||||
const char * SystemDB::local_ver_table = "local_db_versioning";
|
||||
const char * SystemDB::local_ver_names = "oid, version, timestamp, comment, is_slave";
|
||||
|
||||
const char * SystemDB::slave_ver_bootstrap = "CREATE TABLE slave_db_versioning "
|
||||
const char * SystemDB::local_ver_bootstrap = "CREATE TABLE local_db_versioning "
|
||||
"(oid INTEGER PRIMARY KEY, version VARCHAR(256), timestamp INTEGER, "
|
||||
"comment VARCHAR(256))";
|
||||
"comment VARCHAR(256), is_slave BOOLEAN)";
|
||||
|
||||
// System attributes table
|
||||
const char * SystemDB::sys_table = "system_attributes";
|
||||
@ -56,10 +56,34 @@ const char * SystemDB::sys_bootstrap = "CREATE TABLE IF NOT EXISTS"
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
int SystemDB::bootstrap()
|
||||
int SystemDB::shared_bootstrap()
|
||||
{
|
||||
int rc;
|
||||
ostringstream oss;
|
||||
int rc;
|
||||
ostringstream oss;
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
// db versioning, version of OpenNebula.
|
||||
// ---------------------------------------------------------------------
|
||||
oss.str(shared_ver_bootstrap);
|
||||
rc = db->exec(oss);
|
||||
|
||||
oss.str("");
|
||||
oss << "INSERT INTO " << shared_ver_table << " (" << shared_ver_names << ") "
|
||||
<< "VALUES (0, '" << Nebula::shared_db_version() << "', " << time(0)
|
||||
<< ", '" << Nebula::version() << " daemon bootstrap')";
|
||||
|
||||
rc += db->exec(oss);
|
||||
|
||||
return rc;
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
int SystemDB::local_bootstrap()
|
||||
{
|
||||
int rc;
|
||||
ostringstream oss;
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
// pool control, tracks the last ID's assigned to objects
|
||||
@ -68,15 +92,17 @@ int SystemDB::bootstrap()
|
||||
rc = db->exec(oss);
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
// db versioning, version of OpenNebula.
|
||||
// local db versioning, version of tables that are not replicated in a
|
||||
// slave OpenNebula.
|
||||
// ------------------------------------------------------------------------
|
||||
oss.str(ver_bootstrap);
|
||||
oss.str(local_ver_bootstrap);
|
||||
rc += db->exec(oss);
|
||||
|
||||
oss.str("");
|
||||
oss << "INSERT INTO " << ver_table << " (" << ver_names << ") "
|
||||
<< "VALUES (0, '" << Nebula::db_version() << "', " << time(0)
|
||||
<< ", '" << Nebula::version() << " daemon bootstrap')";
|
||||
oss << "INSERT INTO " << local_ver_table << " (" << local_ver_names << ") "
|
||||
<< "VALUES (0, '" << Nebula::local_db_version() << "', " << time(0)
|
||||
<< ", '" << Nebula::version() << " daemon bootstrap', "
|
||||
<< Nebula::instance().is_federation_slave() << ")";
|
||||
|
||||
rc += db->exec(oss);
|
||||
|
||||
@ -87,43 +113,7 @@ int SystemDB::bootstrap()
|
||||
rc += db->exec(oss);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
int SystemDB::slave_bootstrap()
|
||||
{
|
||||
int rc;
|
||||
ostringstream oss;
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
// pool control, tracks the last ID's assigned to objects
|
||||
// ------------------------------------------------------------------------
|
||||
oss.str(pc_bootstrap);
|
||||
rc = db->exec(oss);
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
// db versioning, version of OpenNebula.
|
||||
// ------------------------------------------------------------------------
|
||||
oss.str(slave_ver_bootstrap);
|
||||
rc += db->exec(oss);
|
||||
|
||||
oss.str("");
|
||||
oss << "INSERT INTO " << slave_ver_table << " (" << slave_ver_names << ") "
|
||||
<< "VALUES (0, '" << Nebula::db_version() << "', " << time(0)
|
||||
<< ", '" << Nebula::version() << " daemon bootstrap')";
|
||||
|
||||
rc += db->exec(oss);
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
// system
|
||||
// ------------------------------------------------------------------------
|
||||
oss.str(sys_bootstrap);
|
||||
rc += db->exec(oss);
|
||||
|
||||
return rc;
|
||||
}
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
@ -146,98 +136,128 @@ int SystemDB::select_cb(void *_loaded_db_version, int num, char **values,
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
int SystemDB::check_db_version(bool is_federation_slave)
|
||||
int SystemDB::check_db_version(const string& table,
|
||||
const string& version,
|
||||
string& error)
|
||||
{
|
||||
int rc;
|
||||
ostringstream oss;
|
||||
ostringstream oss;
|
||||
string db_version;
|
||||
|
||||
string loaded_db_version = "";
|
||||
error.clear();
|
||||
|
||||
// Try to read latest version
|
||||
set_callback( static_cast<Callbackable::Callback>(&SystemDB::select_cb),
|
||||
static_cast<void *>(&loaded_db_version) );
|
||||
set_callback(static_cast<Callbackable::Callback>(&SystemDB::select_cb),
|
||||
static_cast<void *>(&db_version));
|
||||
|
||||
oss << "SELECT version FROM " << ver_table
|
||||
<< " WHERE oid=(SELECT MAX(oid) FROM " << ver_table << ")";
|
||||
oss << "SELECT version FROM " << table <<" WHERE oid=(SELECT MAX(oid) FROM "
|
||||
<< table << ")";
|
||||
|
||||
db->exec(oss, this, true);
|
||||
int rc = db->exec(oss, this, true);
|
||||
|
||||
oss.str("");
|
||||
unset_callback();
|
||||
|
||||
if( loaded_db_version == "" )
|
||||
if( rc != 0 || db_version.empty() )//DB needs bootstrap or replica config.
|
||||
{
|
||||
// Table user_pool is present for all OpenNebula versions, and it
|
||||
// always contains at least the oneadmin user.
|
||||
oss << "SELECT MAX(oid) FROM user_pool";
|
||||
rc = db->exec(oss, 0, true);
|
||||
|
||||
oss.str("");
|
||||
|
||||
if( rc != 0 ) // Database needs bootstrap
|
||||
{
|
||||
return -2;
|
||||
}
|
||||
return -2;
|
||||
}
|
||||
|
||||
if( Nebula::db_version() != loaded_db_version )
|
||||
{
|
||||
if (!is_federation_slave)
|
||||
{
|
||||
oss << "Database version mismatch. "
|
||||
<< "Installed " << Nebula::version() << " uses DB version '"
|
||||
<< Nebula::db_version() << "', and existing DB version is '"
|
||||
<< loaded_db_version << "'.";
|
||||
}
|
||||
else
|
||||
{
|
||||
oss << "Database version mismatch. "
|
||||
<< "Installed slave " << Nebula::version() << " uses DB version '"
|
||||
<< Nebula::db_version() << "', and existing master DB version is '"
|
||||
<< loaded_db_version << "'.";
|
||||
}
|
||||
oss.str("");
|
||||
|
||||
if(version != db_version)//DB needs upgrade
|
||||
{
|
||||
oss << "Database version mismatch ( " << table << "). "
|
||||
<< "Installed " << Nebula::version() << " needs DB version '"
|
||||
<< version << "', and existing DB version is '"<< db_version << "'.";
|
||||
|
||||
error = oss.str();
|
||||
|
||||
NebulaLog::log("ONE",Log::ERROR,oss);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (is_federation_slave)
|
||||
oss << "oned is using version " << version << " for " << table;
|
||||
|
||||
NebulaLog::log("ONE", Log::INFO, oss);
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
int SystemDB::check_db_version(bool is_slave, bool &local_bs, bool &shared_bs)
|
||||
{
|
||||
int rc;
|
||||
string error;
|
||||
|
||||
local_bs = false;
|
||||
shared_bs = false;
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
/* Check DB version for local tables */
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
rc = check_db_version(local_ver_table, Nebula::local_db_version(), error);
|
||||
|
||||
switch(rc)
|
||||
{
|
||||
string loaded_db_version = "";
|
||||
case 0:// All ok continue
|
||||
break;
|
||||
|
||||
// Try to read latest version from the slave db version table
|
||||
set_callback( static_cast<Callbackable::Callback>(&SystemDB::select_cb),
|
||||
static_cast<void *>(&loaded_db_version) );
|
||||
|
||||
oss << "SELECT version FROM " << slave_ver_table
|
||||
<< " WHERE oid=(SELECT MAX(oid) FROM " << slave_ver_table << ")";
|
||||
|
||||
db->exec(oss, this, true);
|
||||
|
||||
oss.str("");
|
||||
unset_callback();
|
||||
|
||||
if( loaded_db_version == "" )
|
||||
{
|
||||
return -3;
|
||||
}
|
||||
|
||||
if( Nebula::db_version() != loaded_db_version )
|
||||
{
|
||||
oss << "Database version mismatch. "
|
||||
<< "Installed slave " << Nebula::version() << " uses DB version '"
|
||||
<< Nebula::db_version() << "', and existing slave DB version is '"
|
||||
<< loaded_db_version << "'.";
|
||||
|
||||
NebulaLog::log("ONE",Log::ERROR,oss);
|
||||
case -1:// Version missmatch (same for master/slave/standalone)
|
||||
NebulaLog::log("ONE", Log::ERROR, error);
|
||||
NebulaLog::log("ONE", Log::ERROR, "Use onedb to upgrade DB.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
case -2: //Cannot access DB table or empty, bootstrap
|
||||
local_bs = true;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
/* Check DB version for shared (federation) tables */
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
rc = check_db_version(shared_ver_table, Nebula::shared_db_version(), error);
|
||||
|
||||
switch(rc)
|
||||
{
|
||||
case 0:// All ok continue
|
||||
break;
|
||||
|
||||
case -1:// Version missmatch
|
||||
NebulaLog::log("ONE", Log::ERROR, error);
|
||||
|
||||
if (is_slave)
|
||||
{
|
||||
NebulaLog::log("ONE", Log::ERROR,
|
||||
"Cannot join federation, oned master needs upgrade.");
|
||||
}
|
||||
else
|
||||
{
|
||||
NebulaLog::log("ONE", Log::ERROR, "Use onedb to upgrade DB.");
|
||||
}
|
||||
|
||||
return -1;
|
||||
|
||||
case -2: //Cannot access DB table or empty, bootstrap (only master/standalone)
|
||||
if (is_slave)
|
||||
{
|
||||
NebulaLog::log("ONE", Log::ERROR, "Cannot access shared DB"
|
||||
" tables. Check DB replica configuration.");
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
shared_bs = true;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
@ -32,7 +32,7 @@ public class OneSystem
|
||||
private static final String GROUP_QUOTA_INFO = "groupquota.info";
|
||||
private static final String GROUP_QUOTA_UPDATE = "groupquota.update";
|
||||
|
||||
public static final String VERSION = "4.5.0";
|
||||
public static final String VERSION = "4.5.80";
|
||||
|
||||
public OneSystem(Client client)
|
||||
{
|
||||
|
@ -56,5 +56,5 @@ require 'opennebula/system'
|
||||
module OpenNebula
|
||||
|
||||
# OpenNebula version
|
||||
VERSION = '4.5.0'
|
||||
VERSION = '4.5.80'
|
||||
end
|
||||
|
@ -101,6 +101,8 @@ module OpenNebula
|
||||
# @param [Hash] options
|
||||
# @option params [Integer] :timeout connection timeout in seconds,
|
||||
# defaults to 30
|
||||
# @option params [String] :http_proxy HTTP proxy string used for
|
||||
# connecting to the endpoint; defaults to no proxy
|
||||
#
|
||||
# @return [OpenNebula::Client]
|
||||
def initialize(secret=nil, endpoint=nil, options={})
|
||||
@ -130,7 +132,10 @@ module OpenNebula
|
||||
timeout=nil
|
||||
timeout=options[:timeout] if options[:timeout]
|
||||
|
||||
@server = XMLRPC::Client.new2(@one_endpoint, nil, timeout)
|
||||
http_proxy=nil
|
||||
http_proxy=options[:http_proxy] if options[:http_proxy]
|
||||
|
||||
@server = XMLRPC::Client.new2(@one_endpoint, http_proxy, timeout)
|
||||
|
||||
if defined?(OxStreamParser)
|
||||
@server.set_parser(OxStreamParser.new)
|
||||
|
@ -26,6 +26,7 @@ module OpenNebula
|
||||
GROUP_METHODS = {
|
||||
:info => "group.info",
|
||||
:allocate => "group.allocate",
|
||||
:update => "group.update",
|
||||
:delete => "group.delete",
|
||||
:quota => "group.quota",
|
||||
:add_provider => "group.addprovider",
|
||||
@ -36,7 +37,7 @@ module OpenNebula
|
||||
SELF = -1
|
||||
|
||||
# Default resource ACL's for group users (create)
|
||||
GROUP_DEFAULT_ACLS = "VM+IMAGE+NET+TEMPLATE"
|
||||
GROUP_DEFAULT_ACLS = "VM+IMAGE+NET+TEMPLATE+DOCUMENT"
|
||||
ALL_CLUSTERS_IN_ZONE = 10
|
||||
|
||||
# Creates a Group description with just its identifier
|
||||
@ -222,6 +223,18 @@ module OpenNebula
|
||||
super(GROUP_METHODS[:allocate], groupname)
|
||||
end
|
||||
|
||||
# Replaces the template contents
|
||||
#
|
||||
# @param new_template [String] New template contents
|
||||
# @param append [true, false] True to append new attributes instead of
|
||||
# replace the whole template
|
||||
#
|
||||
# @return [nil, OpenNebula::Error] nil in case of success, Error
|
||||
# otherwise
|
||||
def update(new_template=nil, append=false)
|
||||
super(GROUP_METHODS[:update], new_template, append ? 1 : 0)
|
||||
end
|
||||
|
||||
# Deletes the Group
|
||||
def delete()
|
||||
super(GROUP_METHODS[:delete])
|
||||
|
@ -110,6 +110,7 @@ module OpenNebula
|
||||
include ParsePoolBase
|
||||
|
||||
alias :text :characters
|
||||
alias :cdata :characters
|
||||
end
|
||||
end
|
||||
elsif NOKOGIRI
|
||||
|
@ -1,5 +1,5 @@
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs #
|
||||
# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
|
@ -1,5 +1,5 @@
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs #
|
||||
# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
|
@ -1,600 +0,0 @@
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
require 'set'
|
||||
require "rexml/document"
|
||||
include REXML
|
||||
|
||||
class String
|
||||
def red
|
||||
colorize(31)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def colorize(color_code)
|
||||
"\e[#{color_code}m#{self}\e[0m"
|
||||
end
|
||||
end
|
||||
|
||||
module Migrator
|
||||
def db_version
|
||||
"3.9.80"
|
||||
end
|
||||
|
||||
def one_version
|
||||
"OpenNebula 3.9.80"
|
||||
end
|
||||
|
||||
def up
|
||||
|
||||
########################################################################
|
||||
# Add Cloning Image ID collection to Images
|
||||
########################################################################
|
||||
|
||||
counters = {}
|
||||
counters[:image] = {}
|
||||
|
||||
# Init image counters
|
||||
@db.fetch("SELECT oid,body FROM image_pool") do |row|
|
||||
if counters[:image][row[:oid]].nil?
|
||||
counters[:image][row[:oid]] = {
|
||||
:clones => Set.new
|
||||
}
|
||||
end
|
||||
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
doc.root.each_element("CLONING_ID") do |e|
|
||||
img_id = e.text.to_i
|
||||
|
||||
if counters[:image][img_id].nil?
|
||||
counters[:image][img_id] = {
|
||||
:clones => Set.new
|
||||
}
|
||||
end
|
||||
|
||||
counters[:image][img_id][:clones].add(row[:oid])
|
||||
end
|
||||
end
|
||||
|
||||
########################################################################
|
||||
# Image
|
||||
#
|
||||
# IMAGE/CLONING_OPS
|
||||
# IMAGE/CLONES/ID
|
||||
########################################################################
|
||||
|
||||
@db.run "CREATE TABLE image_pool_new (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name,uid) );"
|
||||
|
||||
@db[:image_pool].each do |row|
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
oid = row[:oid]
|
||||
|
||||
n_cloning_ops = counters[:image][oid][:clones].size
|
||||
|
||||
# Rewrite number of clones
|
||||
doc.root.each_element("CLONING_OPS") { |e|
|
||||
if e.text != n_cloning_ops.to_s
|
||||
warn("Image #{oid} CLONING_OPS has #{e.text} \tis\t#{n_cloning_ops}")
|
||||
e.text = n_cloning_ops
|
||||
end
|
||||
}
|
||||
|
||||
# re-do list of Images cloning this one
|
||||
clones_new_elem = doc.root.add_element("CLONES")
|
||||
|
||||
counters[:image][oid][:clones].each do |id|
|
||||
clones_new_elem.add_element("ID").text = id.to_s
|
||||
end
|
||||
|
||||
row[:body] = doc.to_s
|
||||
|
||||
# commit
|
||||
@db[:image_pool_new].insert(row)
|
||||
end
|
||||
|
||||
# Rename table
|
||||
@db.run("DROP TABLE image_pool")
|
||||
@db.run("ALTER TABLE image_pool_new RENAME TO image_pool")
|
||||
|
||||
########################################################################
|
||||
# Feature #1565: New cid column in host, ds and vnet tables
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE host_pool RENAME TO old_host_pool;"
|
||||
@db.run "CREATE TABLE host_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, state INTEGER, last_mon_time INTEGER, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));"
|
||||
|
||||
@db.fetch("SELECT * FROM old_host_pool") do |row|
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
cluster_id = doc.root.get_text('CLUSTER_ID').to_s
|
||||
|
||||
@db[:host_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => row[:body],
|
||||
:state => row[:state],
|
||||
:last_mon_time => row[:last_mon_time],
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u],
|
||||
:cid => cluster_id)
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_host_pool;"
|
||||
|
||||
########################################################################
|
||||
# Feature #1565: New cid column
|
||||
# Feature #471: IPv6 addresses
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE network_pool RENAME TO old_network_pool;"
|
||||
@db.run "CREATE TABLE network_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name,uid));"
|
||||
|
||||
@db.fetch("SELECT * FROM old_network_pool") do |row|
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
cluster_id = doc.root.get_text('CLUSTER_ID').to_s
|
||||
|
||||
doc.root.add_element("GLOBAL_PREFIX")
|
||||
doc.root.add_element("SITE_PREFIX")
|
||||
|
||||
@db[:network_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u],
|
||||
:cid => cluster_id)
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_network_pool;"
|
||||
|
||||
########################################################################
|
||||
# Feature #1617
|
||||
# New datastore, 2 "files"
|
||||
# DATASTORE/SYSTEM is now DATASTORE/TYPE
|
||||
#
|
||||
# Feature #1565: New cid column in host, ds and vnet tables
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE datastore_pool RENAME TO old_datastore_pool;"
|
||||
@db.run "CREATE TABLE datastore_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));"
|
||||
|
||||
@db.fetch("SELECT * FROM old_datastore_pool") do |row|
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
type = "0" # IMAGE_DS
|
||||
|
||||
system_elem = doc.root.delete_element("SYSTEM")
|
||||
|
||||
if ( !system_elem.nil? && system_elem.text == "1" )
|
||||
type = "1" # SYSTEM_DS
|
||||
end
|
||||
|
||||
doc.root.add_element("TYPE").text = type
|
||||
|
||||
doc.root.each_element("TEMPLATE") do |e|
|
||||
e.delete_element("SYSTEM")
|
||||
e.add_element("TYPE").text = type == "0" ? "IMAGE_DS" : "SYSTEM_DS"
|
||||
end
|
||||
|
||||
cluster_id = doc.root.get_text('CLUSTER_ID').to_s
|
||||
|
||||
@db[:datastore_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u],
|
||||
:cid => cluster_id)
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_datastore_pool;"
|
||||
|
||||
|
||||
user_0_name = "oneadmin"
|
||||
|
||||
@db.fetch("SELECT name FROM user_pool WHERE oid=0") do |row|
|
||||
user_0_name = row[:name]
|
||||
end
|
||||
|
||||
group_0_name = "oneadmin"
|
||||
|
||||
@db.fetch("SELECT name FROM group_pool WHERE oid=0") do |row|
|
||||
group_0_name = row[:name]
|
||||
end
|
||||
|
||||
base_path = "/var/lib/one/datastores/2"
|
||||
|
||||
@db.fetch("SELECT body FROM datastore_pool WHERE oid=0") do |row|
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
doc.root.each_element("BASE_PATH") do |e|
|
||||
base_path = e.text
|
||||
base_path[-1] = "2"
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "INSERT INTO datastore_pool VALUES(2,'files','<DATASTORE><ID>2</ID><UID>0</UID><GID>0</GID><UNAME>#{user_0_name}</UNAME><GNAME>#{group_0_name}</GNAME><NAME>files</NAME><PERMISSIONS><OWNER_U>1</OWNER_U><OWNER_M>1</OWNER_M><OWNER_A>0</OWNER_A><GROUP_U>1</GROUP_U><GROUP_M>0</GROUP_M><GROUP_A>0</GROUP_A><OTHER_U>1</OTHER_U><OTHER_M>0</OTHER_M><OTHER_A>0</OTHER_A></PERMISSIONS><DS_MAD>fs</DS_MAD><TM_MAD>ssh</TM_MAD><BASE_PATH>#{base_path}</BASE_PATH><TYPE>2</TYPE><DISK_TYPE>0</DISK_TYPE><CLUSTER_ID>-1</CLUSTER_ID><CLUSTER></CLUSTER><IMAGES></IMAGES><TEMPLATE><DS_MAD><![CDATA[fs]]></DS_MAD><TM_MAD><![CDATA[ssh]]></TM_MAD><TYPE><![CDATA[FILE_DS]]></TYPE></TEMPLATE></DATASTORE>',0,0,1,1,1,-1);"
|
||||
|
||||
|
||||
########################################################################
|
||||
# Feature #1611: Default quotas
|
||||
########################################################################
|
||||
|
||||
@db.run("CREATE TABLE IF NOT EXISTS system_attributes (name VARCHAR(128) PRIMARY KEY, body MEDIUMTEXT)")
|
||||
@db.run("INSERT INTO system_attributes VALUES('DEFAULT_GROUP_QUOTAS','<DEFAULT_GROUP_QUOTAS><DATASTORE_QUOTA></DATASTORE_QUOTA><NETWORK_QUOTA></NETWORK_QUOTA><VM_QUOTA></VM_QUOTA><IMAGE_QUOTA></IMAGE_QUOTA></DEFAULT_GROUP_QUOTAS>');")
|
||||
@db.run("INSERT INTO system_attributes VALUES('DEFAULT_USER_QUOTAS','<DEFAULT_USER_QUOTAS><DATASTORE_QUOTA></DATASTORE_QUOTA><NETWORK_QUOTA></NETWORK_QUOTA><VM_QUOTA></VM_QUOTA><IMAGE_QUOTA></IMAGE_QUOTA></DEFAULT_USER_QUOTAS>');")
|
||||
|
||||
|
||||
@db.run "ALTER TABLE user_pool RENAME TO old_user_pool;"
|
||||
@db.run "CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));"
|
||||
|
||||
# oneadmin does not have quotas
|
||||
@db.fetch("SELECT * FROM old_user_pool WHERE oid=0") do |row|
|
||||
@db[:user_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => row[:body],
|
||||
:uid => row[:oid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
|
||||
@db.fetch("SELECT * FROM old_user_pool WHERE oid>0") do |row|
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
set_default_quotas(doc)
|
||||
|
||||
@db[:user_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:oid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_user_pool;"
|
||||
|
||||
|
||||
@db.run "ALTER TABLE group_pool RENAME TO old_group_pool;"
|
||||
@db.run "CREATE TABLE group_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));"
|
||||
|
||||
|
||||
# oneadmin group does not have quotas
|
||||
@db.fetch("SELECT * FROM old_group_pool WHERE oid=0") do |row|
|
||||
@db[:group_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => row[:body],
|
||||
:uid => row[:oid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
|
||||
@db.fetch("SELECT * FROM old_group_pool WHERE oid>0") do |row|
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
set_default_quotas(doc)
|
||||
|
||||
@db[:group_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:oid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_group_pool;"
|
||||
|
||||
########################################################################
|
||||
# Bug #1694: SYSTEM_DS is now set with the method adddatastore
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE cluster_pool RENAME TO old_cluster_pool;"
|
||||
@db.run "CREATE TABLE cluster_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));"
|
||||
|
||||
@db.fetch("SELECT * FROM old_cluster_pool") do |row|
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
system_ds = 0
|
||||
|
||||
doc.root.each_element("TEMPLATE") do |e|
|
||||
elem = e.delete_element("SYSTEM_DS")
|
||||
|
||||
if !elem.nil?
|
||||
system_ds = elem.text.to_i
|
||||
end
|
||||
end
|
||||
|
||||
if system_ds != 0
|
||||
updated_body = nil
|
||||
|
||||
@db.fetch("SELECT body FROM datastore_pool WHERE oid=#{system_ds}") do |ds_row|
|
||||
ds_doc = Document.new(ds_row[:body])
|
||||
|
||||
type = "0" # IMAGE_DS
|
||||
|
||||
ds_doc.root.each_element("TYPE") do |e|
|
||||
type = e.text
|
||||
end
|
||||
|
||||
if type != "1"
|
||||
puts " > Cluster #{row[:oid]} has the "<<
|
||||
"System Datastore set to Datastore #{system_ds}, "<<
|
||||
"but its type is not SYSTEM_DS. The System Datastore "<<
|
||||
"for this Cluster will be set to 0"
|
||||
|
||||
system_ds = 0
|
||||
else
|
||||
cluster_id = "-1"
|
||||
|
||||
ds_doc.root.each_element("CLUSTER_ID") do |e|
|
||||
cluster_id = e.text
|
||||
end
|
||||
|
||||
if row[:oid] != cluster_id.to_i
|
||||
puts " > Cluster #{row[:oid]} has the "<<
|
||||
"System Datastore set to Datastore #{system_ds}, "<<
|
||||
"but it is not part of the Cluster. It will be added now."
|
||||
|
||||
ds_doc.root.each_element("CLUSTER_ID") do |e|
|
||||
e.text = row[:oid]
|
||||
end
|
||||
|
||||
ds_doc.root.each_element("CLUSTER") do |e|
|
||||
e.text = row[:name]
|
||||
end
|
||||
|
||||
updated_body = ds_doc.root.to_s
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if !updated_body.nil?
|
||||
@db[:datastore_pool].where(:oid => system_ds).update(
|
||||
:body => updated_body)
|
||||
end
|
||||
end
|
||||
|
||||
doc.root.add_element("SYSTEM_DS").text = system_ds.to_s
|
||||
|
||||
@db[:cluster_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_cluster_pool;"
|
||||
|
||||
|
||||
########################################################################
|
||||
# Feature #1556: New elem USER_TEMPLATE
|
||||
#
|
||||
# Feature #1483: Move scheduling attributes
|
||||
# /VM/TEMPLATE/REQUIREMENTS -> USER_TEMPLATE/SCHED_REQUIREMENTS
|
||||
# /VM/TEMPLATE/RANK -> USER_TEMPLATE/SCHED_RANK
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE vm_pool RENAME TO old_vm_pool;"
|
||||
@db.run "CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, last_poll INTEGER, state INTEGER, lcm_state INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);"
|
||||
|
||||
@db.fetch("SELECT * FROM old_vm_pool") do |row|
|
||||
|
||||
doc = Document.new(row[:body])
|
||||
user_template = doc.root.add_element("USER_TEMPLATE")
|
||||
|
||||
doc.root.each_element("TEMPLATE") do |e|
|
||||
elem = e.delete_element("REQUIREMENTS")
|
||||
|
||||
if !elem.nil?
|
||||
user_template.add_element("SCHED_REQUIREMENTS").text = elem.text
|
||||
end
|
||||
|
||||
elem = e.delete_element("RANK")
|
||||
|
||||
if !elem.nil?
|
||||
user_template.add_element("SCHED_RANK").text = elem.text
|
||||
end
|
||||
end
|
||||
|
||||
@db[:vm_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:last_poll => row[:last_poll],
|
||||
:state => row[:state],
|
||||
:lcm_state => row[:lcm_state],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_vm_pool;"
|
||||
|
||||
|
||||
########################################################################
|
||||
# Feature #1483: Move scheduling attributes
|
||||
# /VMTEMPLATE/TEMPLATE/REQUIREMENTS -> /VMTEMPLATE/TEMPLATE/SCHED_REQUIREMENTS
|
||||
# /VMTEMPLATE/TEMPLATE/RANK -> /VMTEMPLATE/TEMPLATE/SCHED_RANK
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE template_pool RENAME TO old_template_pool;"
|
||||
@db.run "CREATE TABLE template_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);"
|
||||
|
||||
@db.fetch("SELECT * FROM old_template_pool") do |row|
|
||||
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
template = nil
|
||||
|
||||
doc.root.each_element("TEMPLATE") do |e|
|
||||
template = e
|
||||
end
|
||||
|
||||
doc.root.each_element("TEMPLATE") do |e|
|
||||
elem = e.delete_element("REQUIREMENTS")
|
||||
|
||||
if !elem.nil?
|
||||
template.add_element("SCHED_REQUIREMENTS").text = elem.text
|
||||
end
|
||||
|
||||
elem = e.delete_element("RANK")
|
||||
|
||||
if !elem.nil?
|
||||
template.add_element("SCHED_RANK").text = elem.text
|
||||
end
|
||||
end
|
||||
|
||||
@db[:template_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_template_pool;"
|
||||
|
||||
########################################################################
|
||||
# Feature #1691 Add new attribute NIC/NIC_ID
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE vm_pool RENAME TO old_vm_pool;"
|
||||
@db.run "CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, last_poll INTEGER, state INTEGER, lcm_state INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);"
|
||||
|
||||
@db.fetch("SELECT * FROM old_vm_pool") do |row|
|
||||
if ( row[:state] != 6 ) # DONE
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
nic_id = 0
|
||||
|
||||
doc.root.each_element("TEMPLATE/NIC") { |e|
|
||||
e.delete_element("NIC_ID")
|
||||
e.add_element("NIC_ID").text = (nic_id).to_s
|
||||
|
||||
nic_id += 1
|
||||
}
|
||||
|
||||
row[:body] = doc.root.to_s
|
||||
end
|
||||
|
||||
@db[:vm_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => row[:body],
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:last_poll => row[:last_poll],
|
||||
:state => row[:state],
|
||||
:lcm_state => row[:lcm_state],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_vm_pool;"
|
||||
|
||||
########################################################################
|
||||
#
|
||||
# Banner for the new /var/lib/one/vms directory
|
||||
#
|
||||
########################################################################
|
||||
|
||||
puts
|
||||
puts "ATTENTION: manual intervention required".red
|
||||
puts <<-END.gsub(/^ {8}/, '')
|
||||
Virtual Machine deployment files have been moved from /var/lib/one to
|
||||
/var/lib/one/vms. You need to move these files manually:
|
||||
|
||||
$ mv /var/lib/one/[0-9]* /var/lib/one/vms
|
||||
|
||||
END
|
||||
|
||||
return true
|
||||
end
|
||||
|
||||
|
||||
def set_default_quotas(doc)
|
||||
|
||||
# VM quotas
|
||||
|
||||
doc.root.each_element("VM_QUOTA/VM/CPU") do |e|
|
||||
e.text = "-1" if e.text.to_f == 0
|
||||
end
|
||||
|
||||
doc.root.each_element("VM_QUOTA/VM/MEMORY") do |e|
|
||||
e.text = "-1" if e.text.to_i == 0
|
||||
end
|
||||
|
||||
doc.root.each_element("VM_QUOTA/VM/VMS") do |e|
|
||||
e.text = "-1" if e.text.to_i == 0
|
||||
end
|
||||
|
||||
# VNet quotas
|
||||
|
||||
doc.root.each_element("NETWORK_QUOTA/NETWORK/LEASES") do |e|
|
||||
e.text = "-1" if e.text.to_i == 0
|
||||
end
|
||||
|
||||
# Image quotas
|
||||
|
||||
doc.root.each_element("IMAGE_QUOTA/IMAGE/RVMS") do |e|
|
||||
e.text = "-1" if e.text.to_i == 0
|
||||
end
|
||||
|
||||
# Datastore quotas
|
||||
|
||||
doc.root.each_element("DATASTORE_QUOTA/DATASTORE/IMAGES") do |e|
|
||||
e.text = "-1" if e.text.to_i == 0
|
||||
end
|
||||
|
||||
doc.root.each_element("DATASTORE_QUOTA/DATASTORE/SIZE") do |e|
|
||||
e.text = "-1" if e.text.to_i == 0
|
||||
end
|
||||
end
|
||||
end
|
@ -1,401 +0,0 @@
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
require 'rexml/document'
|
||||
|
||||
TM_MAD_CONF = {
|
||||
"dummy" => {
|
||||
:ln_target => "NONE",
|
||||
:clone_target => "SYSTEM"
|
||||
},
|
||||
"lvm" => {
|
||||
:ln_target => "NONE",
|
||||
:clone_target => "SELF"
|
||||
},
|
||||
"shared" => {
|
||||
:ln_target => "NONE",
|
||||
:clone_target => "SYSTEM"
|
||||
},
|
||||
"shared_lvm" => {
|
||||
:ln_target => "SYSTEM",
|
||||
:clone_target => "SYSTEM"
|
||||
},
|
||||
"qcow2" => {
|
||||
:ln_target => "NONE",
|
||||
:clone_target => "SYSTEM"
|
||||
},
|
||||
"ssh" => {
|
||||
:ln_target => "SYSTEM",
|
||||
:clone_target => "SYSTEM"
|
||||
},
|
||||
"vmfs" => {
|
||||
:ln_target => "NONE",
|
||||
:clone_target => "SYSTEM"
|
||||
},
|
||||
"iscsi" => {
|
||||
:ln_target => "NONE",
|
||||
:clone_target => "SELF"
|
||||
},
|
||||
"ceph" => {
|
||||
:ln_target => "NONE",
|
||||
:clone_target => "SELF"
|
||||
}
|
||||
}
|
||||
|
||||
class String
|
||||
def red
|
||||
colorize(31)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def colorize(color_code)
|
||||
"\e[#{color_code}m#{self}\e[0m"
|
||||
end
|
||||
end
|
||||
|
||||
module Migrator
|
||||
def db_version
|
||||
"4.3.80"
|
||||
end
|
||||
|
||||
def one_version
|
||||
"OpenNebula 4.3.80"
|
||||
end
|
||||
|
||||
def up
|
||||
|
||||
########################################################################
|
||||
# Feature #1742 & #1612
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE user_pool RENAME TO old_user_pool;"
|
||||
@db.run "CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));"
|
||||
|
||||
@db.fetch("SELECT * FROM old_user_pool") do |row|
|
||||
doc = REXML::Document.new(row[:body])
|
||||
|
||||
doc.root.add_element("GROUPS").add_element("ID").text = row[:gid].to_s
|
||||
|
||||
# oneadmin does not have quotas
|
||||
if row[:oid] != 0
|
||||
redo_vm_quotas(doc, "uid=#{row[:oid]}")
|
||||
end
|
||||
|
||||
@db[:user_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:oid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_user_pool;"
|
||||
|
||||
########################################################################
|
||||
# Feature #1612
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE group_pool RENAME TO old_group_pool;"
|
||||
@db.run "CREATE TABLE group_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));"
|
||||
|
||||
# oneadmin group does not have quotas
|
||||
@db.fetch("SELECT * FROM old_group_pool WHERE oid=0") do |row|
|
||||
@db[:group_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => row[:body],
|
||||
:uid => row[:oid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
|
||||
@db.fetch("SELECT * FROM old_group_pool WHERE oid>0") do |row|
|
||||
doc = REXML::Document.new(row[:body])
|
||||
|
||||
redo_vm_quotas(doc, "gid=#{row[:oid]}")
|
||||
|
||||
@db[:group_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:oid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_group_pool;"
|
||||
|
||||
########################################################################
|
||||
# Bug #2330 & Feature #1678
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE datastore_pool RENAME TO old_datastore_pool;"
|
||||
@db.run "CREATE TABLE datastore_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));"
|
||||
|
||||
#tm_mads = {}
|
||||
|
||||
@db.fetch("SELECT * FROM old_datastore_pool") do |row|
|
||||
doc = REXML::Document.new(row[:body])
|
||||
|
||||
doc.root.each_element("TEMPLATE/HOST") do |e|
|
||||
e.name = "BRIDGE_LIST"
|
||||
end
|
||||
|
||||
tm_mad = ""
|
||||
doc.root.each_element("TM_MAD"){ |e| tm_mad = e.text }
|
||||
|
||||
type = 0
|
||||
doc.root.each_element("TYPE"){ |e| type = e.text.to_i }
|
||||
|
||||
if (type == 1) # System DS
|
||||
doc.root.each_element("TEMPLATE") do |e|
|
||||
e.add_element("SHARED").text =
|
||||
(tm_mad == "ssh" ? "NO" : "YES")
|
||||
end
|
||||
else
|
||||
#tm_mads[row[:oid].to_i] = tm_mad
|
||||
|
||||
conf = TM_MAD_CONF[tm_mad]
|
||||
|
||||
if conf.nil?
|
||||
puts
|
||||
puts "ATTENTION: manual intervention required".red
|
||||
puts <<-END
|
||||
The Datastore ##{row[:oid]} #{row[:name]} is using the
|
||||
custom TM MAD '#{tm_mad}'. You will need to define new
|
||||
configuration parameters in oned.conf for this driver, see
|
||||
http://opennebula.org/documentation:rel4.4:upgrade
|
||||
END
|
||||
else
|
||||
doc.root.each_element("TEMPLATE") do |e|
|
||||
e.add_element("LN_TARGET").text = conf[:ln_target]
|
||||
e.add_element("CLONE_TARGET").text = conf[:clone_target]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@db[:datastore_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u],
|
||||
:cid => row[:cid])
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_datastore_pool;"
|
||||
|
||||
########################################################################
|
||||
# Feature #2392
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE vm_pool RENAME TO old_vm_pool;"
|
||||
@db.run "CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, last_poll INTEGER, state INTEGER, lcm_state INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);"
|
||||
|
||||
@db.fetch("SELECT * FROM old_vm_pool") do |row|
|
||||
doc = REXML::Document.new(row[:body])
|
||||
|
||||
doc.root.each_element("HISTORY_RECORDS/HISTORY") do |e|
|
||||
update_history(e)
|
||||
end
|
||||
|
||||
@db[:vm_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:last_poll => row[:last_poll],
|
||||
:state => row[:state],
|
||||
:lcm_state => row[:lcm_state],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_vm_pool;"
|
||||
|
||||
@db.run "ALTER TABLE history RENAME TO old_history;"
|
||||
@db.run "CREATE TABLE history (vid INTEGER, seq INTEGER, body MEDIUMTEXT, stime INTEGER, etime INTEGER,PRIMARY KEY(vid,seq));"
|
||||
|
||||
@db.fetch("SELECT * FROM old_history") do |row|
|
||||
doc = REXML::Document.new(row[:body])
|
||||
|
||||
doc.root.each_element("/HISTORY") do |e|
|
||||
update_history(e)
|
||||
end
|
||||
|
||||
@db[:history].insert(
|
||||
:vid => row[:vid],
|
||||
:seq => row[:seq],
|
||||
:body => doc.root.to_s,
|
||||
:stime => row[:stime],
|
||||
:etime => row[:etime])
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_history;"
|
||||
|
||||
########################################################################
|
||||
# Feature #1678
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE host_pool RENAME TO old_host_pool;"
|
||||
@db.run "CREATE TABLE host_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, state INTEGER, last_mon_time INTEGER, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));"
|
||||
|
||||
@db.fetch("SELECT * FROM old_host_pool") do |row|
|
||||
doc = REXML::Document.new(row[:body])
|
||||
|
||||
doc.root.each_element("HOST_SHARE") do |e|
|
||||
e.add_element("DATASTORES")
|
||||
end
|
||||
|
||||
@db[:host_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:state => row[:state],
|
||||
:last_mon_time => row[:last_mon_time],
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u],
|
||||
:cid => row[:cid])
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_host_pool;"
|
||||
|
||||
# TODO:
|
||||
# For Feature #1678, VMs have new disk elements:
|
||||
# VM/DISK/CLONE_TARGET
|
||||
# VM/DISK/LN_TARGET
|
||||
# VM/DISK/SIZE
|
||||
#
|
||||
# These elements are only used to schedule new deployments, so if we
|
||||
# don't add them it will only affect automatic deployment of VMs
|
||||
# recreated (onevm delete --recreate). Manual deployments will still
|
||||
# work without problems.
|
||||
|
||||
return true
|
||||
end
|
||||
|
||||
def redo_vm_quotas(doc, where_filter)
|
||||
cpu_limit = "-1"
|
||||
mem_limit = "-1"
|
||||
vms_limit = "-1"
|
||||
vol_limit = "-1"
|
||||
|
||||
doc.root.each_element("VM_QUOTA/VM/CPU") { |e|
|
||||
cpu_limit = e.text
|
||||
}
|
||||
|
||||
doc.root.each_element("VM_QUOTA/VM/MEMORY") { |e|
|
||||
mem_limit = e.text
|
||||
}
|
||||
|
||||
doc.root.each_element("VM_QUOTA/VM/VMS") { |e|
|
||||
vms_limit = e.text
|
||||
}
|
||||
|
||||
doc.root.delete_element("VM_QUOTA")
|
||||
vm_quota = doc.root.add_element("VM_QUOTA")
|
||||
|
||||
# VM quotas
|
||||
cpu_used = 0
|
||||
mem_used = 0
|
||||
vms_used = 0
|
||||
vol_used = 0
|
||||
|
||||
@db.fetch("SELECT body FROM vm_pool WHERE #{where_filter} AND state<>6") do |vm_row|
|
||||
vmdoc = REXML::Document.new(vm_row[:body])
|
||||
|
||||
# VM quotas
|
||||
vmdoc.root.each_element("TEMPLATE/CPU") { |e|
|
||||
cpu_used += e.text.to_f
|
||||
}
|
||||
|
||||
vmdoc.root.each_element("TEMPLATE/MEMORY") { |e|
|
||||
mem_used += e.text.to_i
|
||||
}
|
||||
|
||||
vmdoc.root.each_element("TEMPLATE/DISK") { |e|
|
||||
type = ""
|
||||
|
||||
e.each_element("TYPE") { |t_elem|
|
||||
type = t_elem.text.upcase
|
||||
}
|
||||
|
||||
if ( type == "SWAP" || type == "FS")
|
||||
e.each_element("SIZE") { |size_elem|
|
||||
vol_used += size_elem.text.to_i
|
||||
}
|
||||
end
|
||||
}
|
||||
|
||||
vms_used += 1
|
||||
end
|
||||
|
||||
if (vms_used != 0 ||
|
||||
cpu_limit != "-1" || mem_limit != "-1" || vms_limit != "-1" || vol_limit != "-1" )
|
||||
|
||||
# VM quotas
|
||||
vm_elem = vm_quota.add_element("VM")
|
||||
|
||||
vm_elem.add_element("CPU").text = cpu_limit
|
||||
vm_elem.add_element("CPU_USED").text = sprintf('%.2f', cpu_used)
|
||||
|
||||
vm_elem.add_element("MEMORY").text = mem_limit
|
||||
vm_elem.add_element("MEMORY_USED").text = mem_used.to_s
|
||||
|
||||
vm_elem.add_element("VMS").text = vms_limit
|
||||
vm_elem.add_element("VMS_USED").text = vms_used.to_s
|
||||
|
||||
vm_elem.add_element("VOLATILE_SIZE").text = vol_limit
|
||||
vm_elem.add_element("VOLATILE_SIZE_USED").text = vol_used.to_s
|
||||
end
|
||||
end
|
||||
|
||||
def update_history(history_elem)
|
||||
hid = nil
|
||||
|
||||
history_elem.each_element("HID") do |e|
|
||||
hid = e.text
|
||||
end
|
||||
|
||||
new_elem = history_elem.add_element("CID")
|
||||
new_elem.text = "-1" # Cluster None
|
||||
|
||||
if hid.nil?
|
||||
return
|
||||
end
|
||||
|
||||
@db.fetch("SELECT cid FROM host_pool WHERE oid = #{hid}") do |row|
|
||||
new_elem.text = row[:cid].to_s
|
||||
end
|
||||
end
|
||||
|
||||
end
|
1363
src/onedb/fsck.rb
1363
src/onedb/fsck.rb
File diff suppressed because it is too large
Load Diff
@ -14,8 +14,6 @@
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
ONE_LOCATION = ENV["ONE_LOCATION"]
|
||||
|
||||
if !ONE_LOCATION
|
||||
LOG_LOCATION = "/var/log/one"
|
||||
else
|
||||
@ -31,9 +29,32 @@ include OpenNebula
|
||||
|
||||
module OneDBImportSlave
|
||||
VERSION = "4.5.0"
|
||||
LOCAL_VERSION = "4.5.0"
|
||||
|
||||
def db_version
|
||||
VERSION
|
||||
def check_db_version(master_db_version, slave_db_version)
|
||||
if ( master_db_version[:version] != VERSION ||
|
||||
master_db_version[:local_version] != LOCAL_VERSION )
|
||||
|
||||
raise <<-EOT
|
||||
Version mismatch: import slave file is for version
|
||||
Shared: #{VERSION}, Local: #{LOCAL_VERSION}
|
||||
|
||||
Current master database is version
|
||||
Shared: #{master_db_version[:version]}, Local: #{master_db_version[:local_version]}
|
||||
EOT
|
||||
elsif ( slave_db_version[:version] != VERSION ||
|
||||
slave_db_version[:local_version] != LOCAL_VERSION )
|
||||
|
||||
raise <<-EOT
|
||||
Version mismatch: import slave file is for version
|
||||
Shared: #{VERSION}, Local: #{LOCAL_VERSION}
|
||||
|
||||
Current slave database is version
|
||||
Shared: #{master_db_version[:version]}, Local: #{master_db_version[:local_version]}
|
||||
EOT
|
||||
elsif master_db_version[:is_slave]
|
||||
raise "Master database is an OpenNebula federation slave"
|
||||
end
|
||||
end
|
||||
|
||||
def one_version
|
||||
@ -364,6 +385,28 @@ EOT
|
||||
end
|
||||
end
|
||||
|
||||
slave_template = slave_doc.root.at_xpath("TEMPLATE")
|
||||
master_template = master_doc.root.at_xpath("TEMPLATE")
|
||||
|
||||
# Avoid duplicated template attributes, removing
|
||||
# them from the slave template
|
||||
master_template.children.each do |e|
|
||||
if slave_template.at_xpath(e.name)
|
||||
slave_template.at_xpath(e.name).remove
|
||||
end
|
||||
end
|
||||
|
||||
# Add slave template attributes to master template
|
||||
master_template << slave_template.children
|
||||
|
||||
# Merge resource providers
|
||||
slave_doc.root.xpath("RESOURCE_PROVIDER").each do |elem|
|
||||
# Zone ID must be 0, will be changed to the target ID
|
||||
elem.at_xpath("ZONE_ID").content = zone_id
|
||||
|
||||
master_doc.root << elem
|
||||
end
|
||||
|
||||
@db[:group_pool].where(:oid => new_group[:oid]).update(
|
||||
:body => master_doc.root.to_s)
|
||||
else
|
||||
@ -382,6 +425,12 @@ EOT
|
||||
|
||||
slave_doc.root.add_child(new_elem)
|
||||
|
||||
# Update resource providers
|
||||
slave_doc.root.xpath("RESOURCE_PROVIDER").each do |elem|
|
||||
# Zone ID must be 0, will be changed to the target ID
|
||||
elem.at_xpath("ZONE_ID").content = zone_id
|
||||
end
|
||||
|
||||
@db[:group_pool].insert(
|
||||
:oid => new_group[:oid],
|
||||
:name => new_group[:name],
|
||||
@ -479,6 +528,19 @@ EOT
|
||||
((row[:resource] & 0xFFFFFFFF00000000) | groups[gid][:oid])
|
||||
end
|
||||
|
||||
elsif ( (row[:resource] & Acl::RESOURCES["GROUP"]) == Acl::RESOURCES["GROUP"] &&
|
||||
(row[:resource] & Acl::USERS["UID"]) == Acl::USERS["UID"] )
|
||||
|
||||
gid = (row[:resource] & 0xFFFFFFFF)
|
||||
|
||||
if (groups[gid].nil?)
|
||||
insert = false
|
||||
error_str = "Group ##{gid} does not exist"
|
||||
else
|
||||
new_resource =
|
||||
((row[:resource] & 0xFFFFFFFF00000000) | groups[gid][:oid])
|
||||
end
|
||||
|
||||
elsif ( (row[:resource] & Acl::RESOURCES["USER"]) == Acl::RESOURCES["USER"] &&
|
||||
(row[:resource] & Acl::USERS["UID"]) == Acl::USERS["UID"] )
|
||||
|
||||
@ -494,52 +556,63 @@ EOT
|
||||
|
||||
end
|
||||
|
||||
if ( (row[:resource] & Acl::RESOURCES["ZONE"]) == Acl::RESOURCES["ZONE"] &&
|
||||
(row[:resource] & Acl::USERS["UID"]) == Acl::USERS["UID"] )
|
||||
|
||||
zid = (row[:resource] & 0xFFFFFFFF)
|
||||
|
||||
if (zid != 0)
|
||||
insert = false
|
||||
error_str = "Zone ##{zid} is unknown for the slave"
|
||||
else
|
||||
new_resource = (Acl::USERS["UID"] | zone_id)
|
||||
end
|
||||
end
|
||||
|
||||
if ( (row[:zone] & Acl::USERS["UID"]) == Acl::USERS["UID"] )
|
||||
zid = (row[:zone] & 0xFFFFFFFF)
|
||||
|
||||
if (zid != 0)
|
||||
insert = false
|
||||
error_str = "Zone ##{zid} is unknown for the slave"
|
||||
else
|
||||
new_zone = (Acl::USERS["UID"] | zone_id)
|
||||
end
|
||||
end
|
||||
|
||||
if (!insert)
|
||||
# Avoid duplicated ACL rules
|
||||
@db.fetch("SELECT oid FROM acl WHERE "<<
|
||||
"user = #{new_user} AND resource = #{new_resource} "<<
|
||||
"AND rights = #{row[:rights]} AND "<<
|
||||
"zone = #{new_zone}") do |acl_row|
|
||||
|
||||
insert = false
|
||||
error_str = "the same Rule exists with ID ##{acl_row[:oid]}"
|
||||
end
|
||||
|
||||
|
||||
if (insert)
|
||||
last_acl_oid += 1
|
||||
|
||||
log("Slave DB ACL Rule ##{row[:oid]} imported with ID ##{last_acl_oid}")
|
||||
|
||||
@db[:acl].insert(
|
||||
:oid => last_acl_oid,
|
||||
:user => new_user,
|
||||
:resource => new_resource,
|
||||
:rights => row[:rights],
|
||||
:zone => new_zone)
|
||||
else
|
||||
log("Slave DB ACL Rule ##{row[:oid]} will not be "<<
|
||||
"imported to the master DB, " << error_str)
|
||||
else
|
||||
# Avoid duplicated ACL rules
|
||||
@db.fetch("SELECT oid FROM acl WHERE "<<
|
||||
"user = #{new_user} AND resource = #{new_resource} "<<
|
||||
"AND rights = #{row[:rights]} AND "<<
|
||||
"zone = #{row[:zone]}") do |acl_row|
|
||||
|
||||
insert = false
|
||||
end
|
||||
|
||||
if (insert)
|
||||
last_acl_oid += 1
|
||||
|
||||
log("New ACL Rule imported with ID ##{last_acl_oid}")
|
||||
|
||||
@db[:acl].insert(
|
||||
:oid => last_acl_oid,
|
||||
:user => new_user,
|
||||
:resource => new_resource,
|
||||
:rights => row[:rights],
|
||||
:zone => new_zone)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
########################################################################
|
||||
# Init slave_db_versioning table
|
||||
# Cleanup shared tables form slave DB
|
||||
########################################################################
|
||||
|
||||
@slave_db.run "CREATE TABLE slave_db_versioning (oid INTEGER PRIMARY KEY, version VARCHAR(256), timestamp INTEGER, comment VARCHAR(256));"
|
||||
@slave_db.run "INSERT INTO slave_db_versioning (oid, version, timestamp, comment) VALUES (0, '#{VERSION}', #{Time.now.to_i}, 'onedb import tool');"
|
||||
|
||||
@slave_db.run "DROP TABLE old_document_pool;"
|
||||
@slave_db.run "DROP TABLE old_image_pool;"
|
||||
@slave_db.run "DROP TABLE old_network_pool;"
|
||||
|
@ -16,6 +16,9 @@
|
||||
|
||||
require 'onedb_backend'
|
||||
|
||||
# If set to true, extra verbose time log will be printed for each migrator
|
||||
LOG_TIME = false
|
||||
|
||||
class OneDB
|
||||
def initialize(ops)
|
||||
if ops[:backend] == :sqlite
|
||||
@ -91,16 +94,32 @@ class OneDB
|
||||
end
|
||||
|
||||
def version(ops)
|
||||
version, timestamp, comment = @backend.read_db_version
|
||||
ret = @backend.read_db_version
|
||||
|
||||
if(ops[:verbose])
|
||||
puts "Version: #{version}"
|
||||
puts "Shared tables version: #{ret[:version]}"
|
||||
|
||||
time = version == "2.0" ? Time.now : Time.at(timestamp)
|
||||
time = ret[:version] == "2.0" ? Time.now : Time.at(ret[:timestamp])
|
||||
puts "Timestamp: #{time.strftime("%m/%d %H:%M:%S")}"
|
||||
puts "Comment: #{comment}"
|
||||
puts "Comment: #{ret[:comment]}"
|
||||
|
||||
if ret[:local_version]
|
||||
puts
|
||||
puts "Local tables version: #{ret[:local_version]}"
|
||||
|
||||
time = Time.at(ret[:local_timestamp])
|
||||
puts "Timestamp: #{time.strftime("%m/%d %H:%M:%S")}"
|
||||
puts "Comment: #{ret[:local_comment]}"
|
||||
|
||||
if ret[:is_slave]
|
||||
puts
|
||||
puts "This database is a federation slave"
|
||||
end
|
||||
end
|
||||
|
||||
else
|
||||
puts version
|
||||
puts "Shared: #{ret[:version]}"
|
||||
puts "Local: #{ret[:version]}"
|
||||
end
|
||||
|
||||
return 0
|
||||
@ -114,58 +133,58 @@ class OneDB
|
||||
# max_version is ignored for now, as this is the first onedb release.
|
||||
# May be used in next releases
|
||||
def upgrade(max_version, ops)
|
||||
version, timestamp, comment = @backend.read_db_version
|
||||
db_version = @backend.read_db_version
|
||||
|
||||
if ops[:verbose]
|
||||
puts "Version read:"
|
||||
puts "#{version} : #{comment}"
|
||||
pretty_print_db_version(db_version)
|
||||
|
||||
puts ""
|
||||
end
|
||||
|
||||
matches = Dir.glob("#{RUBY_LIB_LOCATION}/onedb/#{version}_to_*.rb")
|
||||
|
||||
if ( matches.size > 0 )
|
||||
# At least one upgrade will be executed, make DB backup
|
||||
backup(ops[:backup], ops)
|
||||
end
|
||||
backup(ops[:backup], ops)
|
||||
|
||||
begin
|
||||
result = nil
|
||||
i = 0
|
||||
timea = Time.now
|
||||
|
||||
while ( matches.size > 0 )
|
||||
if ( matches.size > 1 )
|
||||
raise "There are more than one file that match \
|
||||
\"#{RUBY_LIB_LOCATION}/onedb/#{version}_to_*.rb\""
|
||||
# Upgrade shared (federation) tables, only for standalone and master
|
||||
if !db_version[:is_slave]
|
||||
puts
|
||||
puts ">>> Running migrators for shared tables"
|
||||
|
||||
dir_prefix = "#{RUBY_LIB_LOCATION}/onedb/shared"
|
||||
|
||||
result = apply_migrators(dir_prefix, db_version[:version], ops)
|
||||
|
||||
# Modify db_versioning table
|
||||
if result != nil
|
||||
@backend.update_db_version(db_version[:version])
|
||||
else
|
||||
puts "Database already uses version #{db_version[:version]}"
|
||||
end
|
||||
|
||||
file = matches[0]
|
||||
|
||||
puts " > Running migrator #{file}" if ops[:verbose]
|
||||
|
||||
load(file)
|
||||
@backend.extend Migrator
|
||||
result = @backend.up
|
||||
|
||||
if !result
|
||||
raise "Error while upgrading from #{version} to " <<
|
||||
" #{@backend.db_version}"
|
||||
end
|
||||
|
||||
puts " > Done" if ops[:verbose]
|
||||
puts "" if ops[:verbose]
|
||||
|
||||
matches = Dir.glob(
|
||||
"#{RUBY_LIB_LOCATION}/onedb/#{@backend.db_version}_to_*.rb")
|
||||
end
|
||||
|
||||
db_version = @backend.read_db_version
|
||||
|
||||
# Upgrade local tables, for standalone, master, and slave
|
||||
|
||||
puts
|
||||
puts ">>> Running migrators for local tables"
|
||||
|
||||
dir_prefix = "#{RUBY_LIB_LOCATION}/onedb/local"
|
||||
|
||||
result = apply_migrators(dir_prefix, db_version[:local_version], ops)
|
||||
|
||||
# Modify db_versioning table
|
||||
if result != nil
|
||||
@backend.update_db_version(version)
|
||||
@backend.update_local_db_version(db_version[:local_version])
|
||||
else
|
||||
puts "Database already uses version #{version}"
|
||||
puts "Database already uses version #{db_version[:local_version]}"
|
||||
end
|
||||
|
||||
timeb = Time.now
|
||||
|
||||
puts "Total time: #{"%0.02f" % (timeb - timea).to_s}s" if ops[:verbose]
|
||||
|
||||
return 0
|
||||
|
||||
rescue Exception => e
|
||||
@ -182,12 +201,50 @@ class OneDB
|
||||
end
|
||||
end
|
||||
|
||||
def apply_migrators(prefix, db_version, ops)
|
||||
result = nil
|
||||
i = 0
|
||||
|
||||
matches = Dir.glob("#{prefix}/#{db_version}_to_*.rb")
|
||||
|
||||
while ( matches.size > 0 )
|
||||
if ( matches.size > 1 )
|
||||
raise "There are more than one file that match \
|
||||
\"#{prefix}/#{db_version}_to_*.rb\""
|
||||
end
|
||||
|
||||
file = matches[0]
|
||||
|
||||
puts " > Running migrator #{file}" if ops[:verbose]
|
||||
|
||||
time0 = Time.now
|
||||
|
||||
load(file)
|
||||
@backend.extend Migrator
|
||||
result = @backend.up
|
||||
|
||||
time1 = Time.now
|
||||
|
||||
if !result
|
||||
raise "Error while upgrading from #{db_version} to " <<
|
||||
" #{@backend.db_version}"
|
||||
end
|
||||
|
||||
puts " > Done in #{"%0.02f" % (time1 - time0).to_s}s" if ops[:verbose]
|
||||
puts "" if ops[:verbose]
|
||||
|
||||
matches = Dir.glob(
|
||||
"#{prefix}/#{@backend.db_version}_to_*.rb")
|
||||
end
|
||||
|
||||
return result
|
||||
end
|
||||
|
||||
def fsck(ops)
|
||||
version, timestamp, comment = @backend.read_db_version
|
||||
ret = @backend.read_db_version
|
||||
|
||||
if ops[:verbose]
|
||||
puts "Version read:"
|
||||
puts "#{version} : #{comment}"
|
||||
pretty_print_db_version(ret)
|
||||
puts ""
|
||||
end
|
||||
|
||||
@ -200,10 +257,7 @@ class OneDB
|
||||
load(file)
|
||||
@backend.extend OneDBFsck
|
||||
|
||||
if ( version != @backend.db_version )
|
||||
raise "Version mismatch: fsck file is for version "<<
|
||||
"#{@backend.db_version}, current database version is #{version}"
|
||||
end
|
||||
@backend.check_db_version()
|
||||
|
||||
# FSCK will be executed, make DB backup
|
||||
backup(ops[:backup], ops)
|
||||
@ -211,20 +265,28 @@ class OneDB
|
||||
begin
|
||||
puts " > Running fsck" if ops[:verbose]
|
||||
|
||||
time0 = Time.now
|
||||
|
||||
result = @backend.fsck
|
||||
|
||||
if !result
|
||||
raise "Error running fsck version #{version}"
|
||||
raise "Error running fsck version #{ret[:version]}"
|
||||
end
|
||||
|
||||
puts " > Done" if ops[:verbose]
|
||||
puts "" if ops[:verbose]
|
||||
|
||||
time1 = Time.now
|
||||
|
||||
if LOG_TIME
|
||||
puts " > Total time: #{time1 - time0}s" if ops[:verbose]
|
||||
end
|
||||
|
||||
return 0
|
||||
rescue Exception => e
|
||||
puts e.message
|
||||
|
||||
puts "Error running fsck version #{version}"
|
||||
puts "Error running fsck version #{ret[:version]}"
|
||||
puts "The database will be restored"
|
||||
|
||||
ops[:force] = true
|
||||
@ -256,17 +318,17 @@ class OneDB
|
||||
:db_name => ops[:slave_db_name]
|
||||
)
|
||||
|
||||
version, timestamp, comment = @backend.read_db_version
|
||||
db_version = @backend.read_db_version
|
||||
|
||||
slave_version, slave_timestamp, slave_comment =
|
||||
slave_backend.read_db_version
|
||||
slave_db_version = slave_backend.read_db_version
|
||||
|
||||
if ops[:verbose]
|
||||
puts "Master version read:"
|
||||
puts "#{version} : #{comment}"
|
||||
puts "Master database information:"
|
||||
pretty_print_db_version(db_version)
|
||||
puts ""
|
||||
puts "Slave version read:"
|
||||
puts "#{slave_version} : #{slave_comment}"
|
||||
puts ""
|
||||
puts "Slave database information:"
|
||||
pretty_print_db_version(slave_db_version)
|
||||
puts ""
|
||||
end
|
||||
|
||||
@ -279,19 +341,7 @@ class OneDB
|
||||
load(file)
|
||||
@backend.extend OneDBImportSlave
|
||||
|
||||
if ( version != @backend.db_version )
|
||||
raise "Version mismatch: import slave file is for version "<<
|
||||
"#{@backend.db_version}, current master database version is #{version}"
|
||||
end
|
||||
|
||||
if ( slave_version != @backend.db_version )
|
||||
raise "Version mismatch: import slave file is for version "<<
|
||||
"#{@backend.db_version}, current slave database version is #{version}"
|
||||
end
|
||||
|
||||
# Import will be executed, make DB backup
|
||||
backup(ops[:backup], ops)
|
||||
backup(ops[:"slave-backup"], ops, slave_backend)
|
||||
@backend.check_db_version(db_version, slave_db_version)
|
||||
|
||||
puts <<-EOT
|
||||
Before running this tool, it is required to create a new Zone in the
|
||||
@ -343,6 +393,10 @@ is preserved.
|
||||
|
||||
merge_groups = input == "Y"
|
||||
|
||||
# Import will be executed, make DB backup
|
||||
backup(ops[:backup], ops)
|
||||
backup(ops[:"slave-backup"], ops, slave_backend)
|
||||
|
||||
begin
|
||||
puts " > Running slave import" if ops[:verbose]
|
||||
|
||||
@ -350,7 +404,7 @@ is preserved.
|
||||
merge_groups, zone_id)
|
||||
|
||||
if !result
|
||||
raise "Error running slave import version #{version}"
|
||||
raise "Error running slave import"
|
||||
end
|
||||
|
||||
puts " > Done" if ops[:verbose]
|
||||
@ -360,7 +414,7 @@ is preserved.
|
||||
rescue Exception => e
|
||||
puts e.message
|
||||
|
||||
puts "Error running slave import version #{version}"
|
||||
puts "Error running slave import"
|
||||
puts "The databases will be restored"
|
||||
|
||||
ops[:force] = true
|
||||
@ -383,4 +437,18 @@ is preserved.
|
||||
raise "First stop OpenNebula. Lock file found: #{LOCK_FILE}"
|
||||
end
|
||||
end
|
||||
|
||||
def pretty_print_db_version(db_version)
|
||||
puts "Version read:"
|
||||
puts "Shared tables #{db_version[:version]} : #{db_version[:comment]}"
|
||||
|
||||
if db_version[:local_version]
|
||||
puts "Local tables #{db_version[:local_version]} : #{db_version[:local_comment]}"
|
||||
end
|
||||
|
||||
if db_version[:is_slave]
|
||||
puts
|
||||
puts "This database is a federation slave"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -28,19 +28,41 @@ class OneDBBacKEnd
|
||||
def read_db_version
|
||||
connect_db
|
||||
|
||||
ret = {}
|
||||
|
||||
begin
|
||||
version = "2.0"
|
||||
timestamp = 0
|
||||
comment = ""
|
||||
ret[:version] = "2.0"
|
||||
ret[:timestamp] = 0
|
||||
ret[:comment] = ""
|
||||
|
||||
@db.fetch("SELECT version, timestamp, comment FROM db_versioning " +
|
||||
"WHERE oid=(SELECT MAX(oid) FROM db_versioning)") do |row|
|
||||
version = row[:version]
|
||||
timestamp = row[:timestamp]
|
||||
comment = row[:comment]
|
||||
ret[:version] = row[:version]
|
||||
ret[:timestamp] = row[:timestamp]
|
||||
ret[:comment] = row[:comment]
|
||||
end
|
||||
|
||||
return [version, timestamp, comment]
|
||||
ret[:local_version] = ret[:version]
|
||||
ret[:local_timestamp] = ret[:timestamp]
|
||||
ret[:local_comment] = ret[:comment]
|
||||
ret[:is_slave] = false
|
||||
|
||||
begin
|
||||
@db.fetch("SELECT version, timestamp, comment, is_slave FROM "+
|
||||
"local_db_versioning WHERE oid=(SELECT MAX(oid) "+
|
||||
"FROM local_db_versioning)") do |row|
|
||||
ret[:local_version] = row[:version]
|
||||
ret[:local_timestamp] = row[:timestamp]
|
||||
ret[:local_comment] = row[:comment]
|
||||
ret[:is_slave] = row[:is_slave]
|
||||
end
|
||||
rescue Exception => e
|
||||
if e.class == Sequel::DatabaseConnectionError
|
||||
raise e
|
||||
end
|
||||
end
|
||||
|
||||
return ret
|
||||
|
||||
rescue Exception => e
|
||||
if e.class == Sequel::DatabaseConnectionError
|
||||
@ -62,7 +84,7 @@ class OneDBBacKEnd
|
||||
comment = "Could not read any previous db_versioning data, " <<
|
||||
"assuming it is an OpenNebula 2.0 or 2.2 DB."
|
||||
|
||||
return [version, timestamp, comment]
|
||||
return ret
|
||||
end
|
||||
end
|
||||
|
||||
@ -110,6 +132,37 @@ class OneDBBacKEnd
|
||||
puts comment
|
||||
end
|
||||
|
||||
def update_local_db_version(version)
|
||||
comment = "Database migrated from #{version} to #{db_version}"+
|
||||
" (#{one_version}) by onedb command."
|
||||
|
||||
max_oid = nil
|
||||
@db.fetch("SELECT MAX(oid) FROM local_db_versioning") do |row|
|
||||
max_oid = row[:"MAX(oid)"].to_i
|
||||
end
|
||||
|
||||
max_oid = 0 if max_oid.nil?
|
||||
|
||||
is_slave = 0
|
||||
|
||||
@db.fetch("SELECT is_slave FROM local_db_versioning "<<
|
||||
"WHERE oid=#{max_oid}") do |row|
|
||||
is_slave = row[:is_slave]
|
||||
end
|
||||
|
||||
@db.run(
|
||||
"INSERT INTO local_db_versioning (oid, version, timestamp, comment, is_slave) "<<
|
||||
"VALUES (" <<
|
||||
"#{max_oid+1}, " <<
|
||||
"'#{db_version}', " <<
|
||||
"#{Time.new.to_i}, " <<
|
||||
"'#{comment}'," <<
|
||||
"#{is_slave})"
|
||||
)
|
||||
|
||||
puts comment
|
||||
end
|
||||
|
||||
def db()
|
||||
return @db
|
||||
end
|
||||
@ -129,6 +182,20 @@ class OneDBBacKEnd
|
||||
|
||||
return found
|
||||
end
|
||||
|
||||
def init_log_time()
|
||||
@block_n = 0
|
||||
@time0 = Time.now
|
||||
end
|
||||
|
||||
def log_time()
|
||||
if LOG_TIME
|
||||
@time1 = Time.now
|
||||
puts " > #{db_version} Time for block #{@block_n}: #{"%0.02f" % (@time1 - @time0).to_s}s"
|
||||
@time0 = Time.now
|
||||
@block_n += 1
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
class BackEndMySQL < OneDBBacKEnd
|
||||
@ -166,7 +233,7 @@ class BackEndMySQL < OneDBBacKEnd
|
||||
end
|
||||
|
||||
def backup(bck_file)
|
||||
cmd = "mysqldump -u #{@user} -p#{@passwd} -h #{@server} " +
|
||||
cmd = "mysqldump -u #{@user} -p'#{@passwd}' -h #{@server} " +
|
||||
"-P #{@port} #{@db_name} > #{bck_file}"
|
||||
|
||||
rc = system(cmd)
|
||||
@ -188,7 +255,7 @@ class BackEndMySQL < OneDBBacKEnd
|
||||
" use -f to overwrite."
|
||||
end
|
||||
|
||||
mysql_cmd = "mysql -u #{@user} -p#{@passwd} -h #{@server} -P #{@port} "
|
||||
mysql_cmd = "mysql -u #{@user} -p'#{@passwd}' -h #{@server} -P #{@port} "
|
||||
|
||||
drop_cmd = mysql_cmd + "-e 'DROP DATABASE IF EXISTS #{@db_name};'"
|
||||
rc = system(drop_cmd)
|
||||
|
@ -15,6 +15,7 @@
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
require 'set'
|
||||
require 'nokogiri'
|
||||
require "rexml/document"
|
||||
include REXML
|
||||
|
||||
@ -29,6 +30,8 @@ module Migrator
|
||||
|
||||
def up
|
||||
|
||||
init_log_time()
|
||||
|
||||
########################################################################
|
||||
# Bug : Add VM IDs Collection to Hosts & Images
|
||||
########################################################################
|
||||
@ -49,17 +52,17 @@ module Migrator
|
||||
counters[:image][row[:oid]] = Set.new
|
||||
end
|
||||
|
||||
log_time()
|
||||
|
||||
# Aggregate information of the RUNNING vms
|
||||
@db.fetch("SELECT oid,body FROM vm_pool WHERE state<>6") do |row|
|
||||
vm_doc = Document.new(row[:body])
|
||||
|
||||
state = vm_doc.root.get_text('STATE').to_s.to_i
|
||||
lcm_state = vm_doc.root.get_text('LCM_STATE').to_s.to_i
|
||||
vm_doc = Nokogiri::XML(row[:body])
|
||||
|
||||
state = vm_doc.root.at_xpath('STATE').text.to_i
|
||||
lcm_state = vm_doc.root.at_xpath('LCM_STATE').text.to_i
|
||||
|
||||
# Images used by this VM
|
||||
vm_doc.root.each_element("TEMPLATE/DISK/IMAGE_ID") do |e|
|
||||
vm_doc.root.xpath("TEMPLATE/DISK/IMAGE_ID").each do |e|
|
||||
img_id = e.text.to_i
|
||||
|
||||
if counters[:image][img_id].nil?
|
||||
@ -78,7 +81,7 @@ module Migrator
|
||||
|
||||
# Get hostid
|
||||
hid = -1
|
||||
vm_doc.root.each_element("HISTORY_RECORDS/HISTORY[last()]/HID") { |e|
|
||||
vm_doc.root.xpath("HISTORY_RECORDS/HISTORY[last()]/HID").each { |e|
|
||||
hid = e.text.to_i
|
||||
}
|
||||
|
||||
@ -89,6 +92,8 @@ module Migrator
|
||||
end
|
||||
end
|
||||
|
||||
log_time()
|
||||
|
||||
########################################################################
|
||||
# Hosts
|
||||
#
|
||||
@ -103,38 +108,41 @@ module Migrator
|
||||
"UNIQUE(name));"
|
||||
|
||||
# Calculate the host's xml and write them to host_pool_new
|
||||
@db[:host_pool].each do |row|
|
||||
host_doc = Document.new(row[:body])
|
||||
@db.transaction do
|
||||
@db[:host_pool].each do |row|
|
||||
host_doc = Document.new(row[:body])
|
||||
|
||||
hid = row[:oid]
|
||||
hid = row[:oid]
|
||||
|
||||
rvms = counters[:host][hid][:rvms].size
|
||||
rvms = counters[:host][hid][:rvms].size
|
||||
|
||||
# rewrite running_vms
|
||||
host_doc.root.each_element("HOST_SHARE/RUNNING_VMS") {|e|
|
||||
if e.text != rvms.to_s
|
||||
warn("Host #{hid} RUNNING_VMS has #{e.text} \tis\t#{rvms}")
|
||||
e.text = rvms
|
||||
# rewrite running_vms
|
||||
host_doc.root.each_element("HOST_SHARE/RUNNING_VMS") {|e|
|
||||
if e.text != rvms.to_s
|
||||
warn("Host #{hid} RUNNING_VMS has #{e.text} \tis\t#{rvms}")
|
||||
e.text = rvms
|
||||
end
|
||||
}
|
||||
|
||||
# re-do list of VM IDs
|
||||
vms_new_elem = host_doc.root.add_element("VMS")
|
||||
|
||||
counters[:host][hid][:rvms].each do |id|
|
||||
vms_new_elem.add_element("ID").text = id.to_s
|
||||
end
|
||||
}
|
||||
|
||||
# re-do list of VM IDs
|
||||
vms_new_elem = host_doc.root.add_element("VMS")
|
||||
row[:body] = host_doc.to_s
|
||||
|
||||
counters[:host][hid][:rvms].each do |id|
|
||||
vms_new_elem.add_element("ID").text = id.to_s
|
||||
# commit
|
||||
@db[:host_pool_new].insert(row)
|
||||
end
|
||||
|
||||
row[:body] = host_doc.to_s
|
||||
|
||||
# commit
|
||||
@db[:host_pool_new].insert(row)
|
||||
end
|
||||
|
||||
# Rename table
|
||||
@db.run("DROP TABLE host_pool")
|
||||
@db.run("ALTER TABLE host_pool_new RENAME TO host_pool")
|
||||
|
||||
log_time()
|
||||
|
||||
########################################################################
|
||||
# Image
|
||||
@ -146,39 +154,42 @@ module Migrator
|
||||
# Create a new empty table where we will store the new calculated values
|
||||
@db.run "CREATE TABLE image_pool_new (oid INTEGER PRIMARY KEY, name VARCHAR(128), body TEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name,uid) );"
|
||||
|
||||
# Calculate the host's xml and write them to host_pool_new
|
||||
@db[:image_pool].each do |row|
|
||||
doc = Document.new(row[:body])
|
||||
@db.transaction do
|
||||
@db[:image_pool].each do |row|
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
oid = row[:oid]
|
||||
oid = row[:oid]
|
||||
|
||||
rvms = counters[:image][oid].size
|
||||
rvms = counters[:image][oid].size
|
||||
|
||||
# rewrite running_vms
|
||||
doc.root.each_element("RUNNING_VMS") {|e|
|
||||
if e.text != rvms.to_s
|
||||
warn("Image #{oid} RUNNING_VMS has #{e.text} \tis\t#{rvms}")
|
||||
e.text = rvms
|
||||
# rewrite running_vms
|
||||
doc.root.each_element("RUNNING_VMS") {|e|
|
||||
if e.text != rvms.to_s
|
||||
warn("Image #{oid} RUNNING_VMS has #{e.text} \tis\t#{rvms}")
|
||||
e.text = rvms
|
||||
end
|
||||
}
|
||||
|
||||
# re-do list of VM IDs
|
||||
vms_new_elem = doc.root.add_element("VMS")
|
||||
|
||||
counters[:image][oid].each do |id|
|
||||
vms_new_elem.add_element("ID").text = id.to_s
|
||||
end
|
||||
}
|
||||
|
||||
# re-do list of VM IDs
|
||||
vms_new_elem = doc.root.add_element("VMS")
|
||||
row[:body] = doc.to_s
|
||||
|
||||
counters[:image][oid].each do |id|
|
||||
vms_new_elem.add_element("ID").text = id.to_s
|
||||
# commit
|
||||
@db[:image_pool_new].insert(row)
|
||||
end
|
||||
|
||||
row[:body] = doc.to_s
|
||||
|
||||
# commit
|
||||
@db[:image_pool_new].insert(row)
|
||||
end
|
||||
|
||||
# Rename table
|
||||
@db.run("DROP TABLE image_pool")
|
||||
@db.run("ALTER TABLE image_pool_new RENAME TO image_pool")
|
||||
|
||||
log_time()
|
||||
|
||||
return true
|
||||
end
|
||||
end
|
654
src/onedb/shared/3.8.5_to_3.9.80.rb
Normal file
654
src/onedb/shared/3.8.5_to_3.9.80.rb
Normal file
@ -0,0 +1,654 @@
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
require 'set'
|
||||
require "rexml/document"
|
||||
include REXML
|
||||
|
||||
require 'nokogiri'
|
||||
|
||||
class String
|
||||
def red
|
||||
colorize(31)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def colorize(color_code)
|
||||
"\e[#{color_code}m#{self}\e[0m"
|
||||
end
|
||||
end
|
||||
|
||||
module Migrator
|
||||
def db_version
|
||||
"3.9.80"
|
||||
end
|
||||
|
||||
def one_version
|
||||
"OpenNebula 3.9.80"
|
||||
end
|
||||
|
||||
def up
|
||||
|
||||
init_log_time()
|
||||
|
||||
########################################################################
|
||||
# Add Cloning Image ID collection to Images
|
||||
########################################################################
|
||||
|
||||
counters = {}
|
||||
counters[:image] = {}
|
||||
|
||||
# Init image counters
|
||||
@db.fetch("SELECT oid,body FROM image_pool") do |row|
|
||||
if counters[:image][row[:oid]].nil?
|
||||
counters[:image][row[:oid]] = {
|
||||
:clones => Set.new
|
||||
}
|
||||
end
|
||||
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
doc.root.each_element("CLONING_ID") do |e|
|
||||
img_id = e.text.to_i
|
||||
|
||||
if counters[:image][img_id].nil?
|
||||
counters[:image][img_id] = {
|
||||
:clones => Set.new
|
||||
}
|
||||
end
|
||||
|
||||
counters[:image][img_id][:clones].add(row[:oid])
|
||||
end
|
||||
end
|
||||
|
||||
log_time()
|
||||
|
||||
########################################################################
|
||||
# Image
|
||||
#
|
||||
# IMAGE/CLONING_OPS
|
||||
# IMAGE/CLONES/ID
|
||||
########################################################################
|
||||
|
||||
@db.run "CREATE TABLE image_pool_new (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name,uid) );"
|
||||
|
||||
@db.transaction do
|
||||
@db[:image_pool].each do |row|
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
oid = row[:oid]
|
||||
|
||||
n_cloning_ops = counters[:image][oid][:clones].size
|
||||
|
||||
# Rewrite number of clones
|
||||
doc.root.each_element("CLONING_OPS") { |e|
|
||||
if e.text != n_cloning_ops.to_s
|
||||
warn("Image #{oid} CLONING_OPS has #{e.text} \tis\t#{n_cloning_ops}")
|
||||
e.text = n_cloning_ops
|
||||
end
|
||||
}
|
||||
|
||||
# re-do list of Images cloning this one
|
||||
clones_new_elem = doc.root.add_element("CLONES")
|
||||
|
||||
counters[:image][oid][:clones].each do |id|
|
||||
clones_new_elem.add_element("ID").text = id.to_s
|
||||
end
|
||||
|
||||
row[:body] = doc.to_s
|
||||
|
||||
# commit
|
||||
@db[:image_pool_new].insert(row)
|
||||
end
|
||||
end
|
||||
|
||||
# Rename table
|
||||
@db.run("DROP TABLE image_pool")
|
||||
@db.run("ALTER TABLE image_pool_new RENAME TO image_pool")
|
||||
|
||||
log_time()
|
||||
|
||||
########################################################################
|
||||
# Feature #1565: New cid column in host, ds and vnet tables
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE host_pool RENAME TO old_host_pool;"
|
||||
@db.run "CREATE TABLE host_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, state INTEGER, last_mon_time INTEGER, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));"
|
||||
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT * FROM old_host_pool") do |row|
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
cluster_id = doc.root.get_text('CLUSTER_ID').to_s
|
||||
|
||||
@db[:host_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => row[:body],
|
||||
:state => row[:state],
|
||||
:last_mon_time => row[:last_mon_time],
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u],
|
||||
:cid => cluster_id)
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_host_pool;"
|
||||
|
||||
log_time()
|
||||
|
||||
########################################################################
|
||||
# Feature #1565: New cid column
|
||||
# Feature #471: IPv6 addresses
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE network_pool RENAME TO old_network_pool;"
|
||||
@db.run "CREATE TABLE network_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name,uid));"
|
||||
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT * FROM old_network_pool") do |row|
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
cluster_id = doc.root.get_text('CLUSTER_ID').to_s
|
||||
|
||||
doc.root.add_element("GLOBAL_PREFIX")
|
||||
doc.root.add_element("SITE_PREFIX")
|
||||
|
||||
@db[:network_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u],
|
||||
:cid => cluster_id)
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_network_pool;"
|
||||
|
||||
log_time()
|
||||
|
||||
########################################################################
|
||||
# Feature #1617
|
||||
# New datastore, 2 "files"
|
||||
# DATASTORE/SYSTEM is now DATASTORE/TYPE
|
||||
#
|
||||
# Feature #1565: New cid column in host, ds and vnet tables
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE datastore_pool RENAME TO old_datastore_pool;"
|
||||
@db.run "CREATE TABLE datastore_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));"
|
||||
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT * FROM old_datastore_pool") do |row|
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
type = "0" # IMAGE_DS
|
||||
|
||||
system_elem = doc.root.delete_element("SYSTEM")
|
||||
|
||||
if ( !system_elem.nil? && system_elem.text == "1" )
|
||||
type = "1" # SYSTEM_DS
|
||||
end
|
||||
|
||||
doc.root.add_element("TYPE").text = type
|
||||
|
||||
doc.root.each_element("TEMPLATE") do |e|
|
||||
e.delete_element("SYSTEM")
|
||||
e.add_element("TYPE").text = type == "0" ? "IMAGE_DS" : "SYSTEM_DS"
|
||||
end
|
||||
|
||||
cluster_id = doc.root.get_text('CLUSTER_ID').to_s
|
||||
|
||||
@db[:datastore_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u],
|
||||
:cid => cluster_id)
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_datastore_pool;"
|
||||
|
||||
log_time()
|
||||
|
||||
user_0_name = "oneadmin"
|
||||
|
||||
@db.fetch("SELECT name FROM user_pool WHERE oid=0") do |row|
|
||||
user_0_name = row[:name]
|
||||
end
|
||||
|
||||
group_0_name = "oneadmin"
|
||||
|
||||
@db.fetch("SELECT name FROM group_pool WHERE oid=0") do |row|
|
||||
group_0_name = row[:name]
|
||||
end
|
||||
|
||||
base_path = "/var/lib/one/datastores/2"
|
||||
|
||||
@db.fetch("SELECT body FROM datastore_pool WHERE oid=0") do |row|
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
doc.root.each_element("BASE_PATH") do |e|
|
||||
base_path = e.text
|
||||
base_path[-1] = "2"
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "INSERT INTO datastore_pool VALUES(2,'files','<DATASTORE><ID>2</ID><UID>0</UID><GID>0</GID><UNAME>#{user_0_name}</UNAME><GNAME>#{group_0_name}</GNAME><NAME>files</NAME><PERMISSIONS><OWNER_U>1</OWNER_U><OWNER_M>1</OWNER_M><OWNER_A>0</OWNER_A><GROUP_U>1</GROUP_U><GROUP_M>0</GROUP_M><GROUP_A>0</GROUP_A><OTHER_U>1</OTHER_U><OTHER_M>0</OTHER_M><OTHER_A>0</OTHER_A></PERMISSIONS><DS_MAD>fs</DS_MAD><TM_MAD>ssh</TM_MAD><BASE_PATH>#{base_path}</BASE_PATH><TYPE>2</TYPE><DISK_TYPE>0</DISK_TYPE><CLUSTER_ID>-1</CLUSTER_ID><CLUSTER></CLUSTER><IMAGES></IMAGES><TEMPLATE><DS_MAD><![CDATA[fs]]></DS_MAD><TM_MAD><![CDATA[ssh]]></TM_MAD><TYPE><![CDATA[FILE_DS]]></TYPE></TEMPLATE></DATASTORE>',0,0,1,1,1,-1);"
|
||||
|
||||
log_time()
|
||||
|
||||
########################################################################
|
||||
# Feature #1611: Default quotas
|
||||
########################################################################
|
||||
|
||||
@db.run("CREATE TABLE IF NOT EXISTS system_attributes (name VARCHAR(128) PRIMARY KEY, body MEDIUMTEXT)")
|
||||
@db.run("INSERT INTO system_attributes VALUES('DEFAULT_GROUP_QUOTAS','<DEFAULT_GROUP_QUOTAS><DATASTORE_QUOTA></DATASTORE_QUOTA><NETWORK_QUOTA></NETWORK_QUOTA><VM_QUOTA></VM_QUOTA><IMAGE_QUOTA></IMAGE_QUOTA></DEFAULT_GROUP_QUOTAS>');")
|
||||
@db.run("INSERT INTO system_attributes VALUES('DEFAULT_USER_QUOTAS','<DEFAULT_USER_QUOTAS><DATASTORE_QUOTA></DATASTORE_QUOTA><NETWORK_QUOTA></NETWORK_QUOTA><VM_QUOTA></VM_QUOTA><IMAGE_QUOTA></IMAGE_QUOTA></DEFAULT_USER_QUOTAS>');")
|
||||
|
||||
|
||||
@db.run "ALTER TABLE user_pool RENAME TO old_user_pool;"
|
||||
@db.run "CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));"
|
||||
|
||||
@db.transaction do
|
||||
# oneadmin does not have quotas
|
||||
@db.fetch("SELECT * FROM old_user_pool WHERE oid=0") do |row|
|
||||
@db[:user_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => row[:body],
|
||||
:uid => row[:oid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
|
||||
@db.fetch("SELECT * FROM old_user_pool WHERE oid>0") do |row|
|
||||
doc = Nokogiri::XML(row[:body])
|
||||
|
||||
set_default_quotas(doc)
|
||||
|
||||
@db[:user_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:oid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_user_pool;"
|
||||
|
||||
log_time()
|
||||
|
||||
@db.run "ALTER TABLE group_pool RENAME TO old_group_pool;"
|
||||
@db.run "CREATE TABLE group_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));"
|
||||
|
||||
@db.transaction do
|
||||
# oneadmin group does not have quotas
|
||||
@db.fetch("SELECT * FROM old_group_pool WHERE oid=0") do |row|
|
||||
@db[:group_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => row[:body],
|
||||
:uid => row[:oid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
|
||||
@db.fetch("SELECT * FROM old_group_pool WHERE oid>0") do |row|
|
||||
doc = Nokogiri::XML(row[:body])
|
||||
|
||||
set_default_quotas(doc)
|
||||
|
||||
@db[:group_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:oid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_group_pool;"
|
||||
|
||||
log_time()
|
||||
|
||||
########################################################################
|
||||
# Bug #1694: SYSTEM_DS is now set with the method adddatastore
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE cluster_pool RENAME TO old_cluster_pool;"
|
||||
@db.run "CREATE TABLE cluster_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));"
|
||||
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT * FROM old_cluster_pool") do |row|
|
||||
doc = Document.new(row[:body])
|
||||
|
||||
system_ds = 0
|
||||
|
||||
doc.root.each_element("TEMPLATE") do |e|
|
||||
elem = e.delete_element("SYSTEM_DS")
|
||||
|
||||
if !elem.nil?
|
||||
system_ds = elem.text.to_i
|
||||
end
|
||||
end
|
||||
|
||||
if system_ds != 0
|
||||
updated_body = nil
|
||||
|
||||
@db.fetch("SELECT body FROM datastore_pool WHERE oid=#{system_ds}") do |ds_row|
|
||||
ds_doc = Document.new(ds_row[:body])
|
||||
|
||||
type = "0" # IMAGE_DS
|
||||
|
||||
ds_doc.root.each_element("TYPE") do |e|
|
||||
type = e.text
|
||||
end
|
||||
|
||||
if type != "1"
|
||||
puts " > Cluster #{row[:oid]} has the "<<
|
||||
"System Datastore set to Datastore #{system_ds}, "<<
|
||||
"but its type is not SYSTEM_DS. The System Datastore "<<
|
||||
"for this Cluster will be set to 0"
|
||||
|
||||
system_ds = 0
|
||||
else
|
||||
cluster_id = "-1"
|
||||
|
||||
ds_doc.root.each_element("CLUSTER_ID") do |e|
|
||||
cluster_id = e.text
|
||||
end
|
||||
|
||||
if row[:oid] != cluster_id.to_i
|
||||
puts " > Cluster #{row[:oid]} has the "<<
|
||||
"System Datastore set to Datastore #{system_ds}, "<<
|
||||
"but it is not part of the Cluster. It will be added now."
|
||||
|
||||
ds_doc.root.each_element("CLUSTER_ID") do |e|
|
||||
e.text = row[:oid]
|
||||
end
|
||||
|
||||
ds_doc.root.each_element("CLUSTER") do |e|
|
||||
e.text = row[:name]
|
||||
end
|
||||
|
||||
updated_body = ds_doc.root.to_s
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if !updated_body.nil?
|
||||
@db[:datastore_pool].where(:oid => system_ds).update(
|
||||
:body => updated_body)
|
||||
end
|
||||
end
|
||||
|
||||
doc.root.add_element("SYSTEM_DS").text = system_ds.to_s
|
||||
|
||||
@db[:cluster_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_cluster_pool;"
|
||||
|
||||
log_time()
|
||||
|
||||
########################################################################
|
||||
# Feature #1556: New elem USER_TEMPLATE
|
||||
#
|
||||
# Feature #1483: Move scheduling attributes
|
||||
# /VM/TEMPLATE/REQUIREMENTS -> USER_TEMPLATE/SCHED_REQUIREMENTS
|
||||
# /VM/TEMPLATE/RANK -> USER_TEMPLATE/SCHED_RANK
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE vm_pool RENAME TO old_vm_pool;"
|
||||
@db.run "CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, last_poll INTEGER, state INTEGER, lcm_state INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);"
|
||||
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT * FROM old_vm_pool") do |row|
|
||||
|
||||
doc = Nokogiri::XML(row[:body])
|
||||
user_template = doc.create_element("USER_TEMPLATE")
|
||||
|
||||
e = doc.root.at_xpath("TEMPLATE")
|
||||
elem = e.at_xpath("REQUIREMENTS")
|
||||
|
||||
if !elem.nil?
|
||||
new_elem = doc.create_element("SCHED_REQUIREMENTS")
|
||||
new_elem.content = elem.text
|
||||
elem.remove
|
||||
|
||||
user_template.add_child(new_elem)
|
||||
end
|
||||
|
||||
elem = e.at_xpath("RANK")
|
||||
|
||||
if !elem.nil?
|
||||
new_elem = doc.create_element("SCHED_RANK")
|
||||
new_elem.content = elem.text
|
||||
elem.remove
|
||||
|
||||
user_template.add_child(new_elem)
|
||||
end
|
||||
|
||||
doc.root << user_template
|
||||
|
||||
@db[:vm_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:last_poll => row[:last_poll],
|
||||
:state => row[:state],
|
||||
:lcm_state => row[:lcm_state],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_vm_pool;"
|
||||
|
||||
log_time()
|
||||
|
||||
########################################################################
|
||||
# Feature #1483: Move scheduling attributes
|
||||
# /VMTEMPLATE/TEMPLATE/REQUIREMENTS -> /VMTEMPLATE/TEMPLATE/SCHED_REQUIREMENTS
|
||||
# /VMTEMPLATE/TEMPLATE/RANK -> /VMTEMPLATE/TEMPLATE/SCHED_RANK
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE template_pool RENAME TO old_template_pool;"
|
||||
@db.run "CREATE TABLE template_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);"
|
||||
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT * FROM old_template_pool") do |row|
|
||||
|
||||
doc = Nokogiri::XML(row[:body])
|
||||
|
||||
template = doc.root.at_xpath("TEMPLATE")
|
||||
|
||||
elem = template.at_xpath("REQUIREMENTS")
|
||||
|
||||
if !elem.nil?
|
||||
new_elem = doc.create_element("SCHED_REQUIREMENTS")
|
||||
new_elem.content = elem.text
|
||||
elem.remove
|
||||
|
||||
template.add_child(new_elem)
|
||||
end
|
||||
|
||||
elem = template.at_xpath("RANK")
|
||||
|
||||
if !elem.nil?
|
||||
new_elem = doc.create_element("SCHED_RANK")
|
||||
new_elem.content = elem.text
|
||||
elem.remove
|
||||
|
||||
template.add_child(new_elem)
|
||||
end
|
||||
|
||||
@db[:template_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_template_pool;"
|
||||
|
||||
log_time()
|
||||
|
||||
########################################################################
|
||||
# Feature #1691 Add new attribute NIC/NIC_ID
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE vm_pool RENAME TO old_vm_pool;"
|
||||
@db.run "CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, last_poll INTEGER, state INTEGER, lcm_state INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);"
|
||||
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT * FROM old_vm_pool") do |row|
|
||||
if ( row[:state] != 6 ) # DONE
|
||||
doc = Nokogiri::XML(row[:body])
|
||||
|
||||
nic_id = 0
|
||||
|
||||
doc.root.xpath("TEMPLATE/NIC").each { |e|
|
||||
e.xpath("NIC_ID").each {|n| n.remove}
|
||||
e.add_child(doc.create_element("NIC_ID")).content =
|
||||
(nic_id).to_s
|
||||
|
||||
nic_id += 1
|
||||
}
|
||||
|
||||
row[:body] = doc.root.to_s
|
||||
end
|
||||
|
||||
@db[:vm_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => row[:body],
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:last_poll => row[:last_poll],
|
||||
:state => row[:state],
|
||||
:lcm_state => row[:lcm_state],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_vm_pool;"
|
||||
|
||||
log_time()
|
||||
|
||||
########################################################################
|
||||
#
|
||||
# Banner for the new /var/lib/one/vms directory
|
||||
#
|
||||
########################################################################
|
||||
|
||||
puts
|
||||
puts "ATTENTION: manual intervention required".red
|
||||
puts <<-END.gsub(/^ {8}/, '')
|
||||
Virtual Machine deployment files have been moved from /var/lib/one to
|
||||
/var/lib/one/vms. You need to move these files manually:
|
||||
|
||||
$ mv /var/lib/one/[0-9]* /var/lib/one/vms
|
||||
|
||||
END
|
||||
|
||||
return true
|
||||
end
|
||||
|
||||
|
||||
def set_default_quotas(doc)
|
||||
|
||||
# VM quotas
|
||||
|
||||
doc.root.xpath("VM_QUOTA/VM/CPU").each do |e|
|
||||
e.content = "-1" if e.text.to_f == 0
|
||||
end
|
||||
|
||||
doc.root.xpath("VM_QUOTA/VM/MEMORY").each do |e|
|
||||
e.content = "-1" if e.text.to_i == 0
|
||||
end
|
||||
|
||||
doc.root.xpath("VM_QUOTA/VM/VMS").each do |e|
|
||||
e.content = "-1" if e.text.to_i == 0
|
||||
end
|
||||
|
||||
# VNet quotas
|
||||
|
||||
doc.root.xpath("NETWORK_QUOTA/NETWORK/LEASES").each do |e|
|
||||
e.content = "-1" if e.text.to_i == 0
|
||||
end
|
||||
|
||||
# Image quotas
|
||||
|
||||
doc.root.xpath("IMAGE_QUOTA/IMAGE/RVMS").each do |e|
|
||||
e.content = "-1" if e.text.to_i == 0
|
||||
end
|
||||
|
||||
# Datastore quotas
|
||||
|
||||
doc.root.xpath("DATASTORE_QUOTA/DATASTORE/IMAGES").each do |e|
|
||||
e.content = "-1" if e.text.to_i == 0
|
||||
end
|
||||
|
||||
doc.root.xpath("DATASTORE_QUOTA/DATASTORE/SIZE").each do |e|
|
||||
e.content = "-1" if e.text.to_i == 0
|
||||
end
|
||||
end
|
||||
end
|
@ -14,8 +14,7 @@
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
require "rexml/document"
|
||||
include REXML
|
||||
require "nokogiri"
|
||||
|
||||
class String
|
||||
def red
|
||||
@ -39,6 +38,7 @@ module Migrator
|
||||
end
|
||||
|
||||
def up
|
||||
init_log_time()
|
||||
|
||||
########################################################################
|
||||
# Feature #1631: Add ACTION to history entries
|
||||
@ -47,49 +47,57 @@ module Migrator
|
||||
@db.run "ALTER TABLE vm_pool RENAME TO old_vm_pool;"
|
||||
@db.run "CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, last_poll INTEGER, state INTEGER, lcm_state INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);"
|
||||
|
||||
@db.fetch("SELECT * FROM old_vm_pool") do |row|
|
||||
doc = Document.new(row[:body])
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT * FROM old_vm_pool") do |row|
|
||||
doc = Nokogiri::XML(row[:body])
|
||||
|
||||
doc.root.each_element("HISTORY_RECORDS/HISTORY") do |e|
|
||||
update_history(e)
|
||||
doc.root.xpath("HISTORY_RECORDS/HISTORY").each do |e|
|
||||
update_history(e)
|
||||
end
|
||||
|
||||
@db[:vm_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:last_poll => row[:last_poll],
|
||||
:state => row[:state],
|
||||
:lcm_state => row[:lcm_state],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
|
||||
@db[:vm_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:last_poll => row[:last_poll],
|
||||
:state => row[:state],
|
||||
:lcm_state => row[:lcm_state],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_vm_pool;"
|
||||
|
||||
log_time()
|
||||
|
||||
@db.run "ALTER TABLE history RENAME TO old_history;"
|
||||
@db.run "CREATE TABLE history (vid INTEGER, seq INTEGER, body MEDIUMTEXT, stime INTEGER, etime INTEGER,PRIMARY KEY(vid,seq));"
|
||||
|
||||
@db.fetch("SELECT * FROM old_history") do |row|
|
||||
doc = Document.new(row[:body])
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT * FROM old_history") do |row|
|
||||
doc = Nokogiri::XML(row[:body])
|
||||
|
||||
doc.root.each_element("/HISTORY") do |e|
|
||||
update_history(e)
|
||||
doc.root.xpath("/HISTORY").each do |e|
|
||||
update_history(e)
|
||||
end
|
||||
|
||||
@db[:history].insert(
|
||||
:vid => row[:vid],
|
||||
:seq => row[:seq],
|
||||
:body => doc.root.to_s,
|
||||
:stime => row[:stime],
|
||||
:etime => row[:etime])
|
||||
end
|
||||
|
||||
@db[:history].insert(
|
||||
:vid => row[:vid],
|
||||
:seq => row[:seq],
|
||||
:body => doc.root.to_s,
|
||||
:stime => row[:stime],
|
||||
:etime => row[:etime])
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_history;"
|
||||
|
||||
log_time()
|
||||
|
||||
########################################################################
|
||||
# Banner for drivers renamed
|
||||
########################################################################
|
||||
@ -135,16 +143,18 @@ module Migrator
|
||||
end
|
||||
|
||||
def update_history(history_elem)
|
||||
history_elem.add_element("ACTION").text = "0" # NONE_ACTION
|
||||
# NONE_ACTION
|
||||
history_elem.add_child(
|
||||
history_elem.document.create_element("ACTION")).content = "0"
|
||||
|
||||
# History reason enum has changed from
|
||||
# NONE, ERROR, STOP_RESUME, USER, CANCEL to
|
||||
# NONE, ERROR, USER
|
||||
history_elem.each_element("REASON") do |reason_e|
|
||||
history_elem.xpath("REASON").each do |reason_e|
|
||||
reason = reason_e.text.to_i
|
||||
|
||||
if reason > 1 # STOP_RESUME, USER, CANCEL
|
||||
reason_e.text = "2" # USER
|
||||
if reason > 1 # STOP_RESUME, USER, CANCEL
|
||||
reason_e.content = "2" # USER
|
||||
end
|
||||
end
|
||||
end
|
@ -15,9 +15,9 @@
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
require 'fileutils'
|
||||
require 'rexml/document'
|
||||
require 'openssl'
|
||||
|
||||
require "nokogiri"
|
||||
|
||||
module Migrator
|
||||
def db_version
|
||||
@ -42,30 +42,35 @@ module Migrator
|
||||
puts "Please copy the files manually."
|
||||
end
|
||||
|
||||
init_log_time()
|
||||
|
||||
@db.run "ALTER TABLE user_pool RENAME TO old_user_pool;"
|
||||
@db.run "CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));"
|
||||
|
||||
@db.fetch("SELECT * FROM old_user_pool") do |row|
|
||||
doc = REXML::Document.new(row[:body])
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT * FROM old_user_pool") do |row|
|
||||
doc = Nokogiri::XML(row[:body])
|
||||
|
||||
doc.root.each_element("TEMPLATE") do |e|
|
||||
e.add_element("TOKEN_PASSWORD").text =
|
||||
OpenSSL::Digest::SHA1.hexdigest( rand().to_s )
|
||||
doc.root.at_xpath("TEMPLATE")
|
||||
.add_child(doc.create_element("TOKEN_PASSWORD"))
|
||||
.content = OpenSSL::Digest::SHA1.hexdigest( rand().to_s )
|
||||
|
||||
@db[:user_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:oid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
|
||||
@db[:user_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:oid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_user_pool;"
|
||||
|
||||
log_time()
|
||||
|
||||
########################################################################
|
||||
# Feature #1613
|
||||
########################################################################
|
||||
@ -73,27 +78,31 @@ module Migrator
|
||||
@db.run "ALTER TABLE datastore_pool RENAME TO old_datastore_pool;"
|
||||
@db.run "CREATE TABLE datastore_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));"
|
||||
|
||||
@db.fetch("SELECT * FROM old_datastore_pool") do |row|
|
||||
doc = REXML::Document.new(row[:body])
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT * FROM old_datastore_pool") do |row|
|
||||
doc = Nokogiri::XML(row[:body])
|
||||
|
||||
doc.root.add_element("TOTAL_MB").text = "0"
|
||||
doc.root.add_element("FREE_MB").text = "0"
|
||||
doc.root.add_element("USED_MB").text = "0"
|
||||
doc.root.add_child(doc.create_element("TOTAL_MB")).content = "0"
|
||||
doc.root.add_child(doc.create_element("FREE_MB")).content = "0"
|
||||
doc.root.add_child(doc.create_element("USED_MB")).content = "0"
|
||||
|
||||
@db[:datastore_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u],
|
||||
:cid => row[:cid])
|
||||
@db[:datastore_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u],
|
||||
:cid => row[:cid])
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_datastore_pool;"
|
||||
|
||||
log_time()
|
||||
|
||||
return true
|
||||
end
|
||||
end
|
434
src/onedb/shared/4.2.0_to_4.3.80.rb
Normal file
434
src/onedb/shared/4.2.0_to_4.3.80.rb
Normal file
@ -0,0 +1,434 @@
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
require 'rexml/document'
|
||||
require 'nokogiri'
|
||||
|
||||
TM_MAD_CONF = {
|
||||
"dummy" => {
|
||||
:ln_target => "NONE",
|
||||
:clone_target => "SYSTEM"
|
||||
},
|
||||
"lvm" => {
|
||||
:ln_target => "NONE",
|
||||
:clone_target => "SELF"
|
||||
},
|
||||
"shared" => {
|
||||
:ln_target => "NONE",
|
||||
:clone_target => "SYSTEM"
|
||||
},
|
||||
"shared_lvm" => {
|
||||
:ln_target => "SYSTEM",
|
||||
:clone_target => "SYSTEM"
|
||||
},
|
||||
"qcow2" => {
|
||||
:ln_target => "NONE",
|
||||
:clone_target => "SYSTEM"
|
||||
},
|
||||
"ssh" => {
|
||||
:ln_target => "SYSTEM",
|
||||
:clone_target => "SYSTEM"
|
||||
},
|
||||
"vmfs" => {
|
||||
:ln_target => "NONE",
|
||||
:clone_target => "SYSTEM"
|
||||
},
|
||||
"iscsi" => {
|
||||
:ln_target => "NONE",
|
||||
:clone_target => "SELF"
|
||||
},
|
||||
"ceph" => {
|
||||
:ln_target => "NONE",
|
||||
:clone_target => "SELF"
|
||||
}
|
||||
}
|
||||
|
||||
class String
|
||||
def red
|
||||
colorize(31)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def colorize(color_code)
|
||||
"\e[#{color_code}m#{self}\e[0m"
|
||||
end
|
||||
end
|
||||
|
||||
module Migrator
|
||||
def db_version
|
||||
"4.3.80"
|
||||
end
|
||||
|
||||
def one_version
|
||||
"OpenNebula 4.3.80"
|
||||
end
|
||||
|
||||
def up
|
||||
|
||||
init_log_time()
|
||||
|
||||
########################################################################
|
||||
# Feature #1742 & #1612
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE user_pool RENAME TO old_user_pool;"
|
||||
@db.run "CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));"
|
||||
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT * FROM old_user_pool") do |row|
|
||||
doc = Nokogiri::XML(row[:body])
|
||||
|
||||
g_elem = doc.create_element("GROUPS")
|
||||
g_elem.add_child(doc.create_element("ID")).content = row[:gid].to_s
|
||||
|
||||
doc.root.add_child(g_elem)
|
||||
|
||||
# oneadmin does not have quotas
|
||||
if row[:oid] != 0
|
||||
redo_vm_quotas(doc, "uid=#{row[:oid]}")
|
||||
end
|
||||
|
||||
@db[:user_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:oid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_user_pool;"
|
||||
|
||||
log_time()
|
||||
|
||||
########################################################################
|
||||
# Feature #1612
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE group_pool RENAME TO old_group_pool;"
|
||||
@db.run "CREATE TABLE group_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));"
|
||||
|
||||
@db.transaction do
|
||||
# oneadmin group does not have quotas
|
||||
@db.fetch("SELECT * FROM old_group_pool WHERE oid=0") do |row|
|
||||
@db[:group_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => row[:body],
|
||||
:uid => row[:oid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
|
||||
@db.fetch("SELECT * FROM old_group_pool WHERE oid>0") do |row|
|
||||
doc = Nokogiri::XML(row[:body])
|
||||
|
||||
redo_vm_quotas(doc, "gid=#{row[:oid]}")
|
||||
|
||||
@db[:group_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:oid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_group_pool;"
|
||||
|
||||
log_time()
|
||||
|
||||
########################################################################
|
||||
# Bug #2330 & Feature #1678
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE datastore_pool RENAME TO old_datastore_pool;"
|
||||
@db.run "CREATE TABLE datastore_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));"
|
||||
|
||||
#tm_mads = {}
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT * FROM old_datastore_pool") do |row|
|
||||
doc = REXML::Document.new(row[:body])
|
||||
|
||||
doc.root.each_element("TEMPLATE/HOST") do |e|
|
||||
e.name = "BRIDGE_LIST"
|
||||
end
|
||||
|
||||
tm_mad = ""
|
||||
doc.root.each_element("TM_MAD"){ |e| tm_mad = e.text }
|
||||
|
||||
type = 0
|
||||
doc.root.each_element("TYPE"){ |e| type = e.text.to_i }
|
||||
|
||||
if (type == 1) # System DS
|
||||
doc.root.each_element("TEMPLATE") do |e|
|
||||
e.add_element("SHARED").text =
|
||||
(tm_mad == "ssh" ? "NO" : "YES")
|
||||
end
|
||||
else
|
||||
#tm_mads[row[:oid].to_i] = tm_mad
|
||||
|
||||
conf = TM_MAD_CONF[tm_mad]
|
||||
|
||||
if conf.nil?
|
||||
puts
|
||||
puts "ATTENTION: manual intervention required".red
|
||||
puts <<-END
|
||||
The Datastore ##{row[:oid]} #{row[:name]} is using the
|
||||
custom TM MAD '#{tm_mad}'. You will need to define new
|
||||
configuration parameters in oned.conf for this driver, see
|
||||
http://opennebula.org/documentation:rel4.4:upgrade
|
||||
END
|
||||
else
|
||||
doc.root.each_element("TEMPLATE") do |e|
|
||||
e.add_element("LN_TARGET").text = conf[:ln_target]
|
||||
e.add_element("CLONE_TARGET").text = conf[:clone_target]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@db[:datastore_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u],
|
||||
:cid => row[:cid])
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_datastore_pool;"
|
||||
|
||||
log_time()
|
||||
|
||||
########################################################################
|
||||
# Feature #2392
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE vm_pool RENAME TO old_vm_pool;"
|
||||
@db.run "CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, last_poll INTEGER, state INTEGER, lcm_state INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);"
|
||||
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT * FROM old_vm_pool") do |row|
|
||||
doc = Nokogiri::XML(row[:body])
|
||||
|
||||
doc.root.xpath("HISTORY_RECORDS/HISTORY").each do |e|
|
||||
update_history(e)
|
||||
end
|
||||
|
||||
@db[:vm_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:last_poll => row[:last_poll],
|
||||
:state => row[:state],
|
||||
:lcm_state => row[:lcm_state],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_vm_pool;"
|
||||
|
||||
log_time()
|
||||
|
||||
@db.run "ALTER TABLE history RENAME TO old_history;"
|
||||
@db.run "CREATE TABLE history (vid INTEGER, seq INTEGER, body MEDIUMTEXT, stime INTEGER, etime INTEGER,PRIMARY KEY(vid,seq));"
|
||||
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT * FROM old_history") do |row|
|
||||
doc = Nokogiri::XML(row[:body])
|
||||
|
||||
doc.root.xpath("/HISTORY").each do |e|
|
||||
update_history(e)
|
||||
end
|
||||
|
||||
@db[:history].insert(
|
||||
:vid => row[:vid],
|
||||
:seq => row[:seq],
|
||||
:body => doc.root.to_s,
|
||||
:stime => row[:stime],
|
||||
:etime => row[:etime])
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_history;"
|
||||
|
||||
log_time()
|
||||
|
||||
########################################################################
|
||||
# Feature #1678
|
||||
########################################################################
|
||||
|
||||
@db.run "ALTER TABLE host_pool RENAME TO old_host_pool;"
|
||||
@db.run "CREATE TABLE host_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, state INTEGER, last_mon_time INTEGER, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));"
|
||||
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT * FROM old_host_pool") do |row|
|
||||
doc = Nokogiri::XML(row[:body])
|
||||
|
||||
doc.root.at_xpath("HOST_SHARE")
|
||||
.add_child(doc.create_element("DATASTORES"))
|
||||
|
||||
@db[:host_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:state => row[:state],
|
||||
:last_mon_time => row[:last_mon_time],
|
||||
:uid => row[:uid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u],
|
||||
:cid => row[:cid])
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_host_pool;"
|
||||
|
||||
log_time()
|
||||
|
||||
# TODO:
|
||||
# For Feature #1678, VMs have new disk elements:
|
||||
# VM/DISK/CLONE_TARGET
|
||||
# VM/DISK/LN_TARGET
|
||||
# VM/DISK/SIZE
|
||||
#
|
||||
# These elements are only used to schedule new deployments, so if we
|
||||
# don't add them it will only affect automatic deployment of VMs
|
||||
# recreated (onevm delete --recreate). Manual deployments will still
|
||||
# work without problems.
|
||||
|
||||
return true
|
||||
end
|
||||
|
||||
def redo_vm_quotas(doc, where_filter)
|
||||
cpu_limit = "-1"
|
||||
mem_limit = "-1"
|
||||
vms_limit = "-1"
|
||||
vol_limit = "-1"
|
||||
|
||||
doc.root.xpath("VM_QUOTA/VM/CPU").each { |e|
|
||||
cpu_limit = e.text
|
||||
}
|
||||
|
||||
doc.root.xpath("VM_QUOTA/VM/MEMORY").each { |e|
|
||||
mem_limit = e.text
|
||||
}
|
||||
|
||||
doc.root.xpath("VM_QUOTA/VM/VMS").each { |e|
|
||||
vms_limit = e.text
|
||||
}
|
||||
|
||||
doc.root.xpath("VM_QUOTA").each { |e|
|
||||
e.remove
|
||||
}
|
||||
|
||||
vm_quota = doc.root.add_child(doc.create_element("VM_QUOTA"))
|
||||
|
||||
# VM quotas
|
||||
cpu_used = 0
|
||||
mem_used = 0
|
||||
vms_used = 0
|
||||
vol_used = 0
|
||||
|
||||
@db.fetch("SELECT body FROM vm_pool WHERE #{where_filter} AND state<>6") do |vm_row|
|
||||
vmdoc = Nokogiri::XML(vm_row[:body])
|
||||
|
||||
# VM quotas
|
||||
vmdoc.root.xpath("TEMPLATE/CPU").each { |e|
|
||||
cpu_used += e.text.to_f
|
||||
}
|
||||
|
||||
vmdoc.root.xpath("TEMPLATE/MEMORY").each { |e|
|
||||
mem_used += e.text.to_i
|
||||
}
|
||||
|
||||
vmdoc.root.xpath("TEMPLATE/DISK").each { |e|
|
||||
type = ""
|
||||
|
||||
e.xpath("TYPE").each { |t_elem|
|
||||
type = t_elem.text.upcase
|
||||
}
|
||||
|
||||
if ( type == "SWAP" || type == "FS")
|
||||
e.xpath("SIZE").each { |size_elem|
|
||||
vol_used += size_elem.text.to_i
|
||||
}
|
||||
end
|
||||
}
|
||||
|
||||
vms_used += 1
|
||||
end
|
||||
|
||||
if (vms_used != 0 ||
|
||||
cpu_limit != "-1" || mem_limit != "-1" || vms_limit != "-1" || vol_limit != "-1" )
|
||||
|
||||
# VM quotas
|
||||
vm_elem = vm_quota.add_child(doc.create_element("VM"))
|
||||
|
||||
vm_elem.add_child(doc.create_element("CPU")).content = cpu_limit
|
||||
vm_elem.add_child(doc.create_element("CPU_USED")).content = sprintf('%.2f', cpu_used)
|
||||
|
||||
vm_elem.add_child(doc.create_element("MEMORY")).content = mem_limit
|
||||
vm_elem.add_child(doc.create_element("MEMORY_USED")).content = mem_used.to_s
|
||||
|
||||
vm_elem.add_child(doc.create_element("VMS")).content = vms_limit
|
||||
vm_elem.add_child(doc.create_element("VMS_USED")).content = vms_used.to_s
|
||||
|
||||
vm_elem.add_child(doc.create_element("VOLATILE_SIZE")).content = vol_limit
|
||||
vm_elem.add_child(doc.create_element("VOLATILE_SIZE_USED")).content = vol_used.to_s
|
||||
end
|
||||
end
|
||||
|
||||
def update_history(history_elem)
|
||||
hid = nil
|
||||
|
||||
history_elem.xpath("HID").each do |e|
|
||||
hid = e.text
|
||||
end
|
||||
|
||||
new_elem = history_elem.add_child(
|
||||
history_elem.document.create_element("CID"))
|
||||
|
||||
new_elem.content = "-1" # Cluster None
|
||||
|
||||
if hid.nil?
|
||||
return
|
||||
end
|
||||
|
||||
@db.fetch("SELECT cid FROM host_pool WHERE oid = #{hid}") do |row|
|
||||
new_elem.content = row[:cid].to_s
|
||||
end
|
||||
end
|
||||
|
||||
end
|
29
src/onedb/shared/4.4.0_to_4.4.1.rb
Normal file
29
src/onedb/shared/4.4.0_to_4.4.1.rb
Normal file
@ -0,0 +1,29 @@
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
module Migrator
|
||||
def db_version
|
||||
"4.4.1"
|
||||
end
|
||||
|
||||
def one_version
|
||||
"OpenNebula 4.4.1"
|
||||
end
|
||||
|
||||
def up
|
||||
return true
|
||||
end
|
||||
end
|
152
src/onedb/shared/4.4.1_to_4.5.80.rb
Normal file
152
src/onedb/shared/4.4.1_to_4.5.80.rb
Normal file
@ -0,0 +1,152 @@
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
require 'nokogiri'
|
||||
|
||||
module Migrator
|
||||
def db_version
|
||||
"4.5.80"
|
||||
end
|
||||
|
||||
def one_version
|
||||
"OpenNebula 4.5.80"
|
||||
end
|
||||
|
||||
def up
|
||||
|
||||
init_log_time()
|
||||
|
||||
@db.run "ALTER TABLE acl RENAME TO old_acl;"
|
||||
@db.run "CREATE TABLE acl (oid INT PRIMARY KEY, user BIGINT, resource BIGINT, rights BIGINT, zone BIGINT, UNIQUE(user, resource, rights, zone));"
|
||||
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT * FROM old_acl") do |row|
|
||||
@db[:acl].insert(
|
||||
:oid => row[:oid],
|
||||
:user => row[:user],
|
||||
:resource => row[:resource],
|
||||
:rights => row[:rights],
|
||||
:zone => 4294967296)
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_acl;"
|
||||
|
||||
log_time()
|
||||
|
||||
# Move USER/QUOTA to user_quotas table
|
||||
|
||||
@db.run "ALTER TABLE user_pool RENAME TO old_user_pool;"
|
||||
@db.run "CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));"
|
||||
|
||||
@db.run "CREATE TABLE user_quotas (user_oid INTEGER PRIMARY KEY, body MEDIUMTEXT);"
|
||||
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT * FROM old_user_pool") do |row|
|
||||
doc = Nokogiri::XML(row[:body])
|
||||
|
||||
quotas_doc = extract_quotas(doc)
|
||||
|
||||
@db[:user_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:oid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
|
||||
@db[:user_quotas].insert(
|
||||
:user_oid => row[:oid],
|
||||
:body => quotas_doc.root.to_s)
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_user_pool;"
|
||||
|
||||
log_time()
|
||||
|
||||
# GROUP/RESOURCE_PROVIDER is not needed
|
||||
|
||||
# Move GROUP/QUOTA to group_quotas table
|
||||
# Add GROUP/TEMPLATE
|
||||
|
||||
@db.run "ALTER TABLE group_pool RENAME TO old_group_pool;"
|
||||
@db.run "CREATE TABLE group_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));"
|
||||
|
||||
@db.run "CREATE TABLE group_quotas (group_oid INTEGER PRIMARY KEY, body MEDIUMTEXT);"
|
||||
|
||||
@db.transaction do
|
||||
@db.fetch("SELECT * FROM old_group_pool") do |row|
|
||||
doc = Nokogiri::XML(row[:body])
|
||||
|
||||
quotas_doc = extract_quotas(doc)
|
||||
|
||||
doc.root.add_child(doc.create_element("TEMPLATE"))
|
||||
|
||||
@db[:group_pool].insert(
|
||||
:oid => row[:oid],
|
||||
:name => row[:name],
|
||||
:body => doc.root.to_s,
|
||||
:uid => row[:oid],
|
||||
:gid => row[:gid],
|
||||
:owner_u => row[:owner_u],
|
||||
:group_u => row[:group_u],
|
||||
:other_u => row[:other_u])
|
||||
|
||||
@db[:group_quotas].insert(
|
||||
:group_oid => row[:oid],
|
||||
:body => quotas_doc.root.to_s)
|
||||
end
|
||||
end
|
||||
|
||||
@db.run "DROP TABLE old_group_pool;"
|
||||
|
||||
log_time()
|
||||
|
||||
# Default ZONE
|
||||
@db.run "CREATE TABLE zone_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));"
|
||||
@db.run "INSERT INTO zone_pool VALUES(0,'OpenNebula','<ZONE><ID>0</ID><NAME>OpenNebula</NAME><TEMPLATE><ENDPOINT><![CDATA[-]]></ENDPOINT></TEMPLATE></ZONE>',0,0,1,0,0);"
|
||||
|
||||
@db.run "INSERT INTO pool_control VALUES('zone_pool',99);"
|
||||
|
||||
# New versioning table
|
||||
@db.run "CREATE TABLE local_db_versioning (oid INTEGER PRIMARY KEY, version VARCHAR(256), timestamp INTEGER, comment VARCHAR(256), is_slave BOOLEAN);"
|
||||
@db.run "INSERT INTO local_db_versioning VALUES(0,'#{db_version()}',#{Time.now.to_i},'Database migrated from 4.4.1 to 4.5.80 (OpenNebula 4.5.80) by onedb command.',0);"
|
||||
|
||||
return true
|
||||
end
|
||||
|
||||
def extract_quotas(doc)
|
||||
ds_quota = doc.root.at_xpath("DATASTORE_QUOTA").remove
|
||||
net_quota = doc.root.at_xpath("NETWORK_QUOTA").remove
|
||||
vm_quota = doc.root.at_xpath("VM_QUOTA").remove
|
||||
img_quota = doc.root.at_xpath("IMAGE_QUOTA").remove
|
||||
|
||||
quotas_doc = Nokogiri::XML("<QUOTAS></QUOTAS>")
|
||||
|
||||
quotas_doc.root.add_child(quotas_doc.create_element("ID"))
|
||||
.content = doc.root.at_xpath("ID").text
|
||||
|
||||
quotas_doc.root.add_child(ds_quota)
|
||||
quotas_doc.root.add_child(net_quota)
|
||||
quotas_doc.root.add_child(vm_quota)
|
||||
quotas_doc.root.add_child(img_quota)
|
||||
|
||||
return quotas_doc
|
||||
end
|
||||
end
|
@ -437,6 +437,7 @@ void RequestManager::register_xml_methods()
|
||||
/* Group related methods */
|
||||
|
||||
xmlrpc_c::method * group_allocate_pt;
|
||||
xmlrpc_c::method * group_update_pt;
|
||||
xmlrpc_c::method * group_delete_pt;
|
||||
xmlrpc_c::method * group_add_provider_pt;
|
||||
xmlrpc_c::method * group_del_provider_pt;
|
||||
@ -447,6 +448,7 @@ void RequestManager::register_xml_methods()
|
||||
group_delete_pt = new RequestManagerProxy("one.group.delete");
|
||||
group_add_provider_pt = new RequestManagerProxy("one.group.addprovider");
|
||||
group_del_provider_pt = new RequestManagerProxy("one.group.delprovider");
|
||||
group_update_pt = new RequestManagerProxy("one.group.update");
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -454,12 +456,14 @@ void RequestManager::register_xml_methods()
|
||||
group_delete_pt = new GroupDelete();
|
||||
group_add_provider_pt = new GroupAddProvider();
|
||||
group_del_provider_pt = new GroupDelProvider();
|
||||
group_update_pt = new GroupUpdateTemplate();
|
||||
}
|
||||
|
||||
xmlrpc_c::methodPtr group_allocate(group_allocate_pt);
|
||||
xmlrpc_c::methodPtr group_delete(group_delete_pt);
|
||||
xmlrpc_c::methodPtr group_add_provider(group_add_provider_pt);
|
||||
xmlrpc_c::methodPtr group_del_provider(group_del_provider_pt);
|
||||
xmlrpc_c::methodPtr group_update(group_update_pt);
|
||||
|
||||
xmlrpc_c::methodPtr group_info(new GroupInfo());
|
||||
xmlrpc_c::methodPtr group_set_quota(new GroupSetQuota());
|
||||
@ -467,12 +471,13 @@ void RequestManager::register_xml_methods()
|
||||
xmlrpc_c::methodPtr group_get_default_quota(new GroupQuotaInfo());
|
||||
xmlrpc_c::methodPtr group_set_default_quota(new GroupQuotaUpdate());
|
||||
|
||||
RequestManagerRegistry.addMethod("one.group.allocate", group_allocate);
|
||||
RequestManagerRegistry.addMethod("one.group.delete", group_delete);
|
||||
RequestManagerRegistry.addMethod("one.group.info", group_info);
|
||||
RequestManagerRegistry.addMethod("one.group.quota", group_set_quota);
|
||||
RequestManagerRegistry.addMethod("one.group.allocate", group_allocate);
|
||||
RequestManagerRegistry.addMethod("one.group.delete", group_delete);
|
||||
RequestManagerRegistry.addMethod("one.group.info", group_info);
|
||||
RequestManagerRegistry.addMethod("one.group.quota", group_set_quota);
|
||||
RequestManagerRegistry.addMethod("one.group.addprovider",group_add_provider);
|
||||
RequestManagerRegistry.addMethod("one.group.delprovider",group_del_provider);
|
||||
RequestManagerRegistry.addMethod("one.group.update", group_update);
|
||||
|
||||
RequestManagerRegistry.addMethod("one.grouppool.info", grouppool_info);
|
||||
|
||||
|
@ -302,3 +302,18 @@ int UserDelete::drop(int oid, PoolObjectSQL * object, string& error_msg)
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
int ZoneDelete::drop(int oid, PoolObjectSQL * object, string& error_msg)
|
||||
{
|
||||
int rc = RequestManagerDelete::drop(oid, object, error_msg);
|
||||
|
||||
if ( rc == 0 )
|
||||
{
|
||||
aclm->del_zid_rules(oid);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -197,8 +197,6 @@ void GroupEditProvider::request_execute(
|
||||
return;
|
||||
}
|
||||
|
||||
edit_acl_rules(group_id, zone_id, cluster_id, error_str);
|
||||
|
||||
success_response(cluster_id, att);
|
||||
}
|
||||
|
||||
@ -214,154 +212,8 @@ int GroupAddProvider::edit_resource_provider(
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
int GroupAddProvider::edit_acl_rules(
|
||||
int group_id, int zone_id, int cluster_id, string& error_msg)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
long long mask_prefix;
|
||||
|
||||
if (cluster_id == ClusterPool::ALL_RESOURCES)
|
||||
{
|
||||
mask_prefix = AclRule::ALL_ID;
|
||||
}
|
||||
else
|
||||
{
|
||||
mask_prefix = AclRule::CLUSTER_ID | cluster_id;
|
||||
}
|
||||
|
||||
// @<gid> HOST/%<cid> MANAGE #<zone>
|
||||
rc += aclm->add_rule(
|
||||
AclRule::GROUP_ID |
|
||||
group_id,
|
||||
|
||||
mask_prefix |
|
||||
PoolObjectSQL::HOST,
|
||||
|
||||
AuthRequest::MANAGE,
|
||||
|
||||
AclRule::INDIVIDUAL_ID |
|
||||
zone_id,
|
||||
|
||||
error_msg);
|
||||
|
||||
// @<gid> DATASTORE+NET/%<cid> USE #<zone>
|
||||
rc += aclm->add_rule(
|
||||
AclRule::GROUP_ID |
|
||||
group_id,
|
||||
|
||||
mask_prefix |
|
||||
PoolObjectSQL::DATASTORE |
|
||||
PoolObjectSQL::NET,
|
||||
|
||||
AuthRequest::USE,
|
||||
|
||||
AclRule::INDIVIDUAL_ID |
|
||||
zone_id,
|
||||
|
||||
error_msg);
|
||||
|
||||
// @<gid> ZONE/#<zone> USE *
|
||||
rc += aclm->add_rule(
|
||||
AclRule::GROUP_ID |
|
||||
group_id,
|
||||
|
||||
PoolObjectSQL::ZONE |
|
||||
AclRule::INDIVIDUAL_ID |
|
||||
zone_id,
|
||||
|
||||
AuthRequest::USE,
|
||||
|
||||
AclRule::ALL_ID,
|
||||
|
||||
error_msg);
|
||||
|
||||
if (rc != 0)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
int GroupDelProvider::edit_resource_provider(
|
||||
Group* group, int zone_id, int cluster_id, string& error_msg)
|
||||
{
|
||||
return group->del_resource_provider(zone_id, cluster_id, error_msg);
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
int GroupDelProvider::edit_acl_rules(
|
||||
int group_id, int zone_id, int cluster_id, string& error_msg)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
long long mask_prefix;
|
||||
|
||||
if (cluster_id == ClusterPool::ALL_RESOURCES)
|
||||
{
|
||||
mask_prefix = AclRule::ALL_ID;
|
||||
}
|
||||
else
|
||||
{
|
||||
mask_prefix = AclRule::CLUSTER_ID | cluster_id;
|
||||
}
|
||||
|
||||
// @<gid> HOST/%<cid> MANAGE #<zid>
|
||||
rc += aclm->del_rule(
|
||||
AclRule::GROUP_ID |
|
||||
group_id,
|
||||
|
||||
mask_prefix |
|
||||
PoolObjectSQL::HOST,
|
||||
|
||||
AuthRequest::MANAGE,
|
||||
|
||||
AclRule::INDIVIDUAL_ID |
|
||||
zone_id,
|
||||
|
||||
error_msg);
|
||||
|
||||
// @<gid> DATASTORE+NET/%<cid> USE #<zid>
|
||||
rc += aclm->del_rule(
|
||||
AclRule::GROUP_ID |
|
||||
group_id,
|
||||
|
||||
mask_prefix |
|
||||
PoolObjectSQL::DATASTORE |
|
||||
PoolObjectSQL::NET,
|
||||
|
||||
AuthRequest::USE,
|
||||
|
||||
AclRule::INDIVIDUAL_ID |
|
||||
zone_id,
|
||||
|
||||
error_msg);
|
||||
|
||||
// @<gid> ZONE/#<zone> USE *
|
||||
rc += aclm->del_rule(
|
||||
AclRule::GROUP_ID |
|
||||
group_id,
|
||||
|
||||
PoolObjectSQL::ZONE |
|
||||
AclRule::INDIVIDUAL_ID |
|
||||
zone_id,
|
||||
|
||||
AuthRequest::USE,
|
||||
|
||||
AclRule::ALL_ID,
|
||||
|
||||
error_msg);
|
||||
|
||||
if (rc != 0)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user