diff --git a/include/AclManager.h b/include/AclManager.h index 6a25884967..c870e6a7fb 100644 --- a/include/AclManager.h +++ b/include/AclManager.h @@ -38,16 +38,13 @@ class AclManager : public Callbackable, public ActionListener public: /** - * * @param _db pointer to the DB * @param zone_id of the Zone - * @param is_federation_enabled true is this oned is part of a federation - * @param is_federation_slave true is this oned is a federation slave. It - * it is true, it will reload periodically rules from the DB + * @param is_federation_slave true is this oned is a federation slave. If + * it is true, it will reload periodically rules from the DB * @param timer_period period to reload the rules */ - AclManager(SqlDB * _db, int zone_id, bool is_federation_enabled, - bool is_federation_slave, time_t timer); + AclManager(SqlDB * _db, int zone_id, bool is_federation_slave, time_t timer); virtual ~AclManager(); @@ -145,6 +142,13 @@ public: */ void del_cid_rules(int cid); + /** + * Deletes rules that apply to this cluster id + * + * @param zid The zone id + */ + void del_zid_rules(int zid); + /** * Deletes all rules that apply to this resource * @@ -214,8 +218,7 @@ protected: * from DB) */ AclManager(int _zone_id) - :zone_id(_zone_id), db(0),lastOID(0), is_federation_enabled(false), - is_federation_slave(false) + :zone_id(_zone_id), db(0),lastOID(0), is_federation_slave(false) { pthread_mutex_init(&mutex, 0); }; @@ -312,6 +315,13 @@ private: long long resource_req, long long resource_mask); + /** + * Deletes all rules that match the zone mask + * + * @param zone_req Mask to match + */ + void del_zone_matching_rules(long long zone_req); + // ---------------------------------------- // Local zone // ---------------------------------------- @@ -420,11 +430,6 @@ private: // Refresh loop thread // ---------------------------------------- - /** - * Flag to know if this oned is part of a federation - */ - bool is_federation_enabled; - /** * Flag to refresh the cache periodically */ diff --git a/include/Cluster.h b/include/Cluster.h index 3513a56819..0f4ad2261b 100644 --- a/include/Cluster.h +++ b/include/Cluster.h @@ -170,6 +170,19 @@ public: return vnets.get_collection_copy(); } + /** + * Get the default reserved capacity for hosts in the cluster. It can be + * overridden if defined in the host template. + * @param cpu reserved cpu (in percentage) + * @param mem reserved mem (in KB) + */ + void get_reserved_capacity(long long &cpu, long long& mem) + { + get_template_attribute("RESERVED_CPU", cpu); + + get_template_attribute("RESERVED_MEM", mem); + } + // ************************************************************************* // DataBase implementation (Public) // ************************************************************************* diff --git a/include/Group.h b/include/Group.h index 4e0e26de21..febd6edc7c 100644 --- a/include/Group.h +++ b/include/Group.h @@ -21,6 +21,7 @@ #include "ObjectCollection.h" #include "User.h" #include "QuotasSQL.h" +#include "Template.h" using namespace std; @@ -109,6 +110,14 @@ public: return quota.update(oid, db); }; + /** + * Factory method for Group templates + */ + Template * get_new_template() const + { + return new Template; + } + private: // ------------------------------------------------------------------------- @@ -128,9 +137,14 @@ private: { // Allow users in this group to see it group_u = 1; + + obj_template = new Template; }; - virtual ~Group(){}; + virtual ~Group() + { + delete obj_template; + }; // ************************************************************************* // Attributes (Private) diff --git a/include/Host.h b/include/Host.h index 35261e3f65..08bb75a6da 100644 --- a/include/Host.h +++ b/include/Host.h @@ -157,13 +157,17 @@ public: * @param with_vm_info if monitoring contains VM information * @param lost set of VMs that should be in the host and were not found * @param found VMs running in the host (as expected) and info. + * @param reserved_cpu from cluster defaults + * @param reserved_mem from cluster defaults * @return 0 on success **/ int update_info(Template &tmpl, bool &with_vm_info, set &lost, map &found, - const set &non_shared_ds); + const set &non_shared_ds, + long long reserved_cpu, + long long reserved_mem); /** * Extracts the DS attributes from the given template * @param parse_str string with values to be parsed @@ -268,11 +272,33 @@ public: return last_monitored; }; - // ------------------------------------------------------------------------ + /** + * Get the reserved capacity for this host. Parameters will be only updated + * if values are defined in the host. Reserved capacity will be subtracted + * from the Host total capacity. + * @param cpu reserved cpu (in percentage) + * @param mem reserved mem (in KB) + */ + void get_reserved_capacity(long long &cpu, long long& mem) + { + long long tcpu; + long long tmem; + + if (get_template_attribute("RESERVED_CPU", tcpu)) + { + cpu = tcpu; + } + + if (get_template_attribute("RESERVED_MEM", tmem)) + { + mem = tmem; + } + } + + // ------------------------------------------------------------------------- // Share functions. Returns the value associated with each host share // metric - // ------------------------------------------------------------------------ - + // ------------------------------------------------------------------------- long long get_share_running_vms() { return host_share.running_vms; diff --git a/include/ImagePool.h b/include/ImagePool.h index 234db68bd7..bc51791fb2 100644 --- a/include/ImagePool.h +++ b/include/ImagePool.h @@ -43,6 +43,7 @@ public: SqlDB * db, const string& __default_type, const string& __default_dev_prefix, + const string& __default_cdrom_dev_prefix, vector& restricted_attrs, vector hook_mads, const string& remotes_location, diff --git a/include/LifeCycleManager.h b/include/LifeCycleManager.h index d71eeab413..40999420de 100644 --- a/include/LifeCycleManager.h +++ b/include/LifeCycleManager.h @@ -207,7 +207,7 @@ private: void attach_success_action(int vid); - void attach_failure_action(int vid); + void attach_failure_action(int vid, bool release_save_as); void detach_success_action(int vid); diff --git a/include/MonitorThread.h b/include/MonitorThread.h index c8911d87f7..00d3145963 100644 --- a/include/MonitorThread.h +++ b/include/MonitorThread.h @@ -22,6 +22,7 @@ #include class HostPool; +class ClusterPool; class DatastorePool; class LifeCycleManager; @@ -53,6 +54,8 @@ private: // Pointers shared by all the MonitorThreads, init by MonitorThreadPool static HostPool * hpool; + static ClusterPool *cpool; + static DatastorePool * dspool; static LifeCycleManager *lcm; diff --git a/include/Nebula.h b/include/Nebula.h index abe37673a9..130b836de2 100644 --- a/include/Nebula.h +++ b/include/Nebula.h @@ -363,10 +363,32 @@ public: */ static string version() { - return "OpenNebula 4.5.0"; + return "OpenNebula " + code_version(); }; - static string db_version() + /** + * Returns the version of oned + * @return + */ + static string code_version() + { + return "4.5.80"; // bump version + } + + /** + * Version needed for the DB, shared tables + * @return + */ + static string shared_db_version() + { + return "4.5.0"; + } + + /** + * Version needed for the DB, local tables + * @return + */ + static string local_db_version() { return "4.5.0"; } diff --git a/include/RequestManagerDelete.h b/include/RequestManagerDelete.h index 2a387708a4..f5db3ee36c 100644 --- a/include/RequestManagerDelete.h +++ b/include/RequestManagerDelete.h @@ -310,6 +310,8 @@ public: }; ~ZoneDelete(){}; + + int drop(int oid, PoolObjectSQL * object, string& error_msg); }; /* -------------------------------------------------------------------------- */ diff --git a/include/RequestManagerGroup.h b/include/RequestManagerGroup.h index f4e8eb752e..814595d358 100644 --- a/include/RequestManagerGroup.h +++ b/include/RequestManagerGroup.h @@ -104,9 +104,6 @@ protected: virtual int edit_resource_provider( Group* group, int zone_id, int cluster_id, string& error_msg) = 0; - - virtual int edit_acl_rules( - int group_id, int zone_id, int cluster_id, string& error_msg) = 0; }; /* ------------------------------------------------------------------------- */ @@ -125,9 +122,6 @@ public: int edit_resource_provider( Group* group, int zone_id, int cluster_id, string& error_msg); - - int edit_acl_rules( - int group_id, int zone_id, int cluster_id, string& error_msg); }; /* ------------------------------------------------------------------------- */ @@ -146,9 +140,6 @@ public: int edit_resource_provider( Group* group, int zone_id, int cluster_id, string& error_msg); - - int edit_acl_rules( - int group_id, int zone_id, int cluster_id, string& error_msg); }; /* -------------------------------------------------------------------------- */ diff --git a/include/RequestManagerUpdateTemplate.h b/include/RequestManagerUpdateTemplate.h index c82ac89a16..669f20d266 100644 --- a/include/RequestManagerUpdateTemplate.h +++ b/include/RequestManagerUpdateTemplate.h @@ -239,6 +239,24 @@ public: ~ZoneUpdateTemplate(){}; }; +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class GroupUpdateTemplate : public RequestManagerUpdateTemplate +{ +public: + GroupUpdateTemplate(): + RequestManagerUpdateTemplate("GroupUpdateTemplate", + "Updates a Group template") + { + Nebula& nd = Nebula::instance(); + pool = nd.get_gpool(); + auth_object = PoolObjectSQL::GROUP; + }; + + ~GroupUpdateTemplate(){}; +}; + /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ diff --git a/include/SystemDB.h b/include/SystemDB.h index 979bb19d16..06002cc71d 100644 --- a/include/SystemDB.h +++ b/include/SystemDB.h @@ -80,18 +80,18 @@ private: static const char * pc_table; // DB versioning table - static const char * ver_names; + static const char * shared_ver_names; - static const char * ver_bootstrap; + static const char * shared_ver_bootstrap; - static const char * ver_table; + static const char * shared_ver_table; // DB slave versioning table - static const char * slave_ver_names; + static const char * local_ver_names; - static const char * slave_ver_bootstrap; + static const char * local_ver_bootstrap; - static const char * slave_ver_table; + static const char * local_ver_table; // System attributes table static const char * sys_names; @@ -115,18 +115,18 @@ private: bool replace, string& error_str); /** - * Bootstraps the database control tables + * Bootstraps the database control tables for shared tables * * @return 0 on success */ - int bootstrap(); + int shared_bootstrap(); /** - * Bootstraps the database control tables for a slave DB + * Bootstraps the database control tables for a local DB tables * * @return 0 on success */ - int slave_bootstrap(); + int local_bootstrap(); /** * Callback function for the check_db_version method. Stores the read @@ -151,13 +151,23 @@ private: /** * Reads the current DB version. * @param is_federation_slave + * @param local_bs boostrap local DB tables + * @param shared_bs boostrap shared DB tables * * @return 0 on success, - * -1 if there is a version mismatch, - * -2 if the DB needs a bootstrap from the master - * -3 if the DB needs a bootstrap from the slave + * -1 if there is a version mismatch, or replica config error. */ - int check_db_version(bool is_federation_slave); + int check_db_version(bool is_slave, bool& local_bs, bool& shared_bs); + + /** + * check_db_version to check versioning + * @param table name of the DB table + * @param version target DB version + * @return 0 success, -1 upgrade needed, -2 bootstrap needed + */ + int check_db_version(const string& table, + const string& version, + string& error); }; #endif //SYSTEM_DB_H diff --git a/include/User.h b/include/User.h index 58d170f3a5..28df797816 100644 --- a/include/User.h +++ b/include/User.h @@ -112,22 +112,7 @@ public: * @param error_str Returns the error reason, if any * @returns -1 if the password is not valid */ - int set_password(const string& passwd, string& error_str) - { - int rc = 0; - - if (pass_is_valid(passwd, error_str)) - { - password = passwd; - invalidate_session(); - } - else - { - rc = -1; - } - - return rc; - }; + int set_password(const string& passwd, string& error_str); /** * Returns user password diff --git a/include/UserTemplate.h b/include/UserTemplate.h index 2eb92ad639..26d7151f89 100644 --- a/include/UserTemplate.h +++ b/include/UserTemplate.h @@ -35,4 +35,4 @@ public: /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -#endif /*IMAGE_TEMPLATE_H_*/ +#endif /*USER_TEMPLATE_H_*/ diff --git a/include/Zone.h b/include/Zone.h index 85a35b9442..5b39a011e0 100644 --- a/include/Zone.h +++ b/include/Zone.h @@ -1,5 +1,5 @@ /* ------------------------------------------------------------------------ */ -/* Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs */ +/* Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs */ /* */ /* Licensed under the Apache License, Version 2.0 (the "License"); you may */ /* not use this file except in compliance with the License. You may obtain */ diff --git a/include/ZonePool.h b/include/ZonePool.h index caa0e8de3c..9d9c811437 100644 --- a/include/ZonePool.h +++ b/include/ZonePool.h @@ -1,5 +1,5 @@ /* -------------------------------------------------------------------------- */ -/* Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs */ +/* Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs */ /* */ /* Licensed under the Apache License, Version 2.0 (the "License"); you may */ /* not use this file except in compliance with the License. You may obtain */ diff --git a/install.sh b/install.sh index ba5d05efea..bb9195592e 100755 --- a/install.sh +++ b/install.sh @@ -227,6 +227,8 @@ LIB_DIRS="$LIB_LOCATION/ruby \ $LIB_LOCATION/ruby/cloud/marketplace \ $LIB_LOCATION/ruby/cloud/CloudAuth \ $LIB_LOCATION/ruby/onedb \ + $LIB_LOCATION/ruby/onedb/shared \ + $LIB_LOCATION/ruby/onedb/local \ $LIB_LOCATION/ruby/vendors \ $LIB_LOCATION/ruby/vendors/rbvmomi \ $LIB_LOCATION/ruby/vendors/rbvmomi/lib \ @@ -394,7 +396,9 @@ INSTALL_FILES=( MAD_RUBY_LIB_FILES:$VAR_LOCATION/remotes MAD_SH_LIB_FILES:$LIB_LOCATION/sh MAD_SH_LIB_FILES:$VAR_LOCATION/remotes - ONEDB_MIGRATOR_FILES:$LIB_LOCATION/ruby/onedb + ONEDB_FILES:$LIB_LOCATION/ruby/onedb + ONEDB_SHARED_MIGRATOR_FILES:$LIB_LOCATION/ruby/onedb/shared + ONEDB_LOCAL_MIGRATOR_FILES:$LIB_LOCATION/ruby/onedb/local MADS_LIB_FILES:$LIB_LOCATION/mads IM_PROBES_FILES:$VAR_LOCATION/remotes/im IM_PROBES_KVM_FILES:$VAR_LOCATION/remotes/im/kvm.d @@ -795,7 +799,8 @@ IM_PROBES_KVM_PROBES_FILES="src/im_mad/remotes/kvm-probes.d/kvm.rb \ src/im_mad/remotes/kvm-probes.d/poll.sh \ src/im_mad/remotes/kvm-probes.d/name.sh \ src/im_mad/remotes/common.d/monitor_ds.sh \ - src/im_mad/remotes/common.d/version.sh" + src/im_mad/remotes/common.d/version.sh \ + src/im_mad/remotes/common.d/collectd-client-shepherd.sh" IM_PROBES_XEN3_FILES="src/im_mad/remotes/xen.d/collectd-client_control.sh \ src/im_mad/remotes/xen.d/collectd-client.rb" @@ -806,7 +811,8 @@ IM_PROBES_XEN3_PROBES_FILES="src/im_mad/remotes/xen-probes.d/xen.rb \ src/im_mad/remotes/xen-probes.d/poll3.sh \ src/im_mad/remotes/xen-probes.d/name.sh src/im_mad/remotes/common.d/monitor_ds.sh \ - src/im_mad/remotes/common.d/version.sh" + src/im_mad/remotes/common.d/version.sh \ + src/im_mad/remotes/common.d/collectd-client-shepherd.sh" IM_PROBES_XEN4_FILES="src/im_mad/remotes/xen.d/collectd-client_control.sh \ src/im_mad/remotes/xen.d/collectd-client.rb" @@ -817,7 +823,8 @@ IM_PROBES_XEN4_PROBES_FILES="src/im_mad/remotes/xen-probes.d/xen.rb \ src/im_mad/remotes/xen-probes.d/poll4.sh \ src/im_mad/remotes/xen-probes.d/name.sh \ src/im_mad/remotes/common.d/monitor_ds.sh \ - src/im_mad/remotes/common.d/version.sh" + src/im_mad/remotes/common.d/version.sh \ + src/im_mad/remotes/common.d/collectd-client-shepherd.sh" IM_PROBES_VMWARE_FILES="src/im_mad/remotes/vmware.d/vmware.rb" @@ -1038,41 +1045,48 @@ DATASTORE_DRIVER_CEPH_SCRIPTS="src/datastore_mad/remotes/ceph/cp \ #------------------------------------------------------------------------------- # Migration scripts for onedb command, to be installed under $LIB_LOCATION #------------------------------------------------------------------------------- -ONEDB_MIGRATOR_FILES="src/onedb/2.0_to_2.9.80.rb \ - src/onedb/2.9.80_to_2.9.85.rb \ - src/onedb/2.9.85_to_2.9.90.rb \ - src/onedb/2.9.90_to_3.0.0.rb \ - src/onedb/3.0.0_to_3.1.0.rb \ - src/onedb/3.1.0_to_3.1.80.rb \ - src/onedb/3.1.80_to_3.2.0.rb \ - src/onedb/3.2.0_to_3.2.1.rb \ - src/onedb/3.2.1_to_3.3.0.rb \ - src/onedb/3.3.0_to_3.3.80.rb \ - src/onedb/3.3.80_to_3.4.0.rb \ - src/onedb/3.4.0_to_3.4.1.rb \ - src/onedb/3.4.1_to_3.5.80.rb \ - src/onedb/3.5.80_to_3.6.0.rb \ - src/onedb/3.6.0_to_3.7.80.rb \ - src/onedb/3.7.80_to_3.8.0.rb \ - src/onedb/3.8.0_to_3.8.1.rb \ - src/onedb/3.8.1_to_3.8.2.rb \ - src/onedb/3.8.2_to_3.8.3.rb \ - src/onedb/3.8.3_to_3.8.4.rb \ - src/onedb/3.8.4_to_3.8.5.rb \ - src/onedb/3.8.5_to_3.9.80.rb \ - src/onedb/3.9.80_to_3.9.90.rb \ - src/onedb/3.9.90_to_4.0.0.rb \ - src/onedb/4.0.0_to_4.0.1.rb \ - src/onedb/4.0.1_to_4.1.80.rb \ - src/onedb/4.1.80_to_4.2.0.rb \ - src/onedb/4.2.0_to_4.3.80.rb \ - src/onedb/4.3.80_to_4.3.85.rb \ - src/onedb/4.3.85_to_4.3.90.rb \ - src/onedb/4.3.90_to_4.4.0.rb \ - src/onedb/fsck.rb \ - src/onedb/import_slave.rb \ - src/onedb/onedb.rb \ - src/onedb/onedb_backend.rb" + + +ONEDB_FILES="src/onedb/fsck.rb \ + src/onedb/import_slave.rb \ + src/onedb/onedb.rb \ + src/onedb/onedb_backend.rb" + +ONEDB_SHARED_MIGRATOR_FILES="src/onedb/shared/2.0_to_2.9.80.rb \ + src/onedb/shared/2.9.80_to_2.9.85.rb \ + src/onedb/shared/2.9.85_to_2.9.90.rb \ + src/onedb/shared/2.9.90_to_3.0.0.rb \ + src/onedb/shared/3.0.0_to_3.1.0.rb \ + src/onedb/shared/3.1.0_to_3.1.80.rb \ + src/onedb/shared/3.1.80_to_3.2.0.rb \ + src/onedb/shared/3.2.0_to_3.2.1.rb \ + src/onedb/shared/3.2.1_to_3.3.0.rb \ + src/onedb/shared/3.3.0_to_3.3.80.rb \ + src/onedb/shared/3.3.80_to_3.4.0.rb \ + src/onedb/shared/3.4.0_to_3.4.1.rb \ + src/onedb/shared/3.4.1_to_3.5.80.rb \ + src/onedb/shared/3.5.80_to_3.6.0.rb \ + src/onedb/shared/3.6.0_to_3.7.80.rb \ + src/onedb/shared/3.7.80_to_3.8.0.rb \ + src/onedb/shared/3.8.0_to_3.8.1.rb \ + src/onedb/shared/3.8.1_to_3.8.2.rb \ + src/onedb/shared/3.8.2_to_3.8.3.rb \ + src/onedb/shared/3.8.3_to_3.8.4.rb \ + src/onedb/shared/3.8.4_to_3.8.5.rb \ + src/onedb/shared/3.8.5_to_3.9.80.rb \ + src/onedb/shared/3.9.80_to_3.9.90.rb \ + src/onedb/shared/3.9.90_to_4.0.0.rb \ + src/onedb/shared/4.0.0_to_4.0.1.rb \ + src/onedb/shared/4.0.1_to_4.1.80.rb \ + src/onedb/shared/4.1.80_to_4.2.0.rb \ + src/onedb/shared/4.2.0_to_4.3.80.rb \ + src/onedb/shared/4.3.80_to_4.3.85.rb \ + src/onedb/shared/4.3.85_to_4.3.90.rb \ + src/onedb/shared/4.3.90_to_4.4.0.rb \ + src/onedb/shared/4.4.0_to_4.4.1.rb \ + src/onedb/shared/4.4.1_to_4.5.80.rb" + +ONEDB_LOCAL_MIGRATOR_FILES="" #------------------------------------------------------------------------------- # Configuration files for OpenNebula, to be installed under $ETC_LOCATION diff --git a/share/etc/oned.conf b/share/etc/oned.conf index f04b5ee725..88612ca436 100644 --- a/share/etc/oned.conf +++ b/share/etc/oned.conf @@ -176,11 +176,14 @@ MAC_PREFIX = "02:00" # CDROM Image file holding a CDROM # DATABLOCK Image file holding a datablock, # always created as an empty block +# # DEFAULT_DEVICE_PREFIX: This can be set to # hd IDE prefix # sd SCSI # xvd XEN Virtual Disk # vd KVM virtual disk +# +# DEFAULT_CDROM_DEVICE_PREFIX: Same as above but for CDROM devices. #******************************************************************************* #DATASTORE_LOCATION = /var/lib/one/datastores @@ -192,6 +195,7 @@ DATASTORE_CAPACITY_CHECK = "yes" DEFAULT_IMAGE_TYPE = "OS" DEFAULT_DEVICE_PREFIX = "hd" +DEFAULT_CDROM_DEVICE_PREFIX = "hd" #******************************************************************************* # Information Driver Configuration @@ -689,6 +693,7 @@ IMAGE_RESTRICTED_ATTR = "SOURCE" INHERIT_DATASTORE_ATTR = "CEPH_HOST" INHERIT_DATASTORE_ATTR = "CEPH_SECRET" INHERIT_DATASTORE_ATTR = "CEPH_USER" +INHERIT_DATASTORE_ATTR = "RBD_FORMAT" INHERIT_DATASTORE_ATTR = "GLUSTER_HOST" INHERIT_DATASTORE_ATTR = "GLUSTER_VOLUME" diff --git a/share/rubygems/generate b/share/rubygems/generate index af1520fb93..69de4fabdb 100755 --- a/share/rubygems/generate +++ b/share/rubygems/generate @@ -21,7 +21,7 @@ require 'tmpdir' DEFAULTS={ - :version => "4.5.0", + :version => "4.5.80", :date => Time.now.strftime("%Y-%m-%d"), :dependencies => [] } diff --git a/share/scripts/context-packages/generate.sh b/share/scripts/context-packages/generate.sh index a114c397bb..703b302ad2 100755 --- a/share/scripts/context-packages/generate.sh +++ b/share/scripts/context-packages/generate.sh @@ -16,7 +16,7 @@ # limitations under the License. # #--------------------------------------------------------------------------- # -VERSION=${VERSION:-4.5.0} +VERSION=${VERSION:-4.5.80} MAINTAINER=${MAINTAINER:-C12G Labs } LICENSE=${LICENSE:-Apache 2.0} PACKAGE_NAME=${PACKAGE_NAME:-one-context} diff --git a/src/acl/AclManager.cc b/src/acl/AclManager.cc index 6a647b422f..6d0c07659d 100644 --- a/src/acl/AclManager.cc +++ b/src/acl/AclManager.cc @@ -51,11 +51,9 @@ int AclManager::init_cb(void *nil, int num, char **values, char **names) AclManager::AclManager( SqlDB * _db, int _zone_id, - bool _is_federation_enabled, bool _is_federation_slave, time_t _timer_period) :zone_id(_zone_id), db(_db), lastOID(-1), - is_federation_enabled(_is_federation_enabled), is_federation_slave(_is_federation_slave), timer_period(_timer_period) { ostringstream oss; @@ -85,32 +83,25 @@ AclManager::AclManager( string error_str; // Users in group USERS can create standard resources - // @1 VM+NET+IMAGE+TEMPLATE/* CREATE + // @1 VM+NET+IMAGE+TEMPLATE+DOCUMENT/* CREATE # add_rule(AclRule::GROUP_ID | 1, AclRule::ALL_ID | PoolObjectSQL::VM | PoolObjectSQL::NET | PoolObjectSQL::IMAGE | - PoolObjectSQL::TEMPLATE, - AuthRequest::CREATE, - AclRule::ALL_ID, - error_str); - - // Users in USERS can deploy VMs in any HOST - // @1 HOST/* MANAGE - add_rule(AclRule::GROUP_ID | - 1, - AclRule::ALL_ID | - PoolObjectSQL::HOST, - AuthRequest::MANAGE, - AclRule::ALL_ID, - error_str); - - add_rule(AclRule::ALL_ID, - AclRule::ALL_ID | + PoolObjectSQL::TEMPLATE | PoolObjectSQL::DOCUMENT, AuthRequest::CREATE, + AclRule::INDIVIDUAL_ID | + zone_id, + error_str); + + // * ZONE/* USE * + add_rule(AclRule::ALL_ID, + AclRule::ALL_ID | + PoolObjectSQL::ZONE, + AuthRequest::USE, AclRule::ALL_ID, error_str); } @@ -529,11 +520,6 @@ int AclManager::add_rule(long long user, long long resource, long long rights, return -1; } - if (!is_federation_enabled) - { - zone = AclRule::INDIVIDUAL_ID | zone_id; - } - lock(); if (lastOID == INT_MAX) @@ -799,6 +785,18 @@ void AclManager::del_cid_rules(int cid) /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ +void AclManager::del_zid_rules(int zid) +{ + long long request = AclRule::INDIVIDUAL_ID | zid; + + // Delete rules that match + // __ __/__ __ #zid + del_zone_matching_rules(request); +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + void AclManager::del_resource_rules(int oid, PoolObjectSQL::ObjectType obj_type) { long long request = obj_type | @@ -876,6 +874,35 @@ void AclManager::del_resource_matching_rules(long long resource_req, /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ +void AclManager::del_zone_matching_rules(long long zone_req) +{ + multimap::iterator it; + + vector oids; + vector::iterator oid_it; + string error_str; + + lock(); + + for ( it = acl_rules.begin(); it != acl_rules.end(); it++ ) + { + if ( it->second->zone == zone_req ) + { + oids.push_back(it->second->oid); + } + } + + unlock(); + + for ( oid_it = oids.begin() ; oid_it < oids.end(); oid_it++ ) + { + del_rule(*oid_it, error_str); + } +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + void AclManager::reverse_search(int uid, const set& user_groups, PoolObjectSQL::ObjectType obj_type, diff --git a/src/cli/cli_helper.rb b/src/cli/cli_helper.rb index 30e7116320..5cdb3f5bbf 100644 --- a/src/cli/cli_helper.rb +++ b/src/cli/cli_helper.rb @@ -14,6 +14,8 @@ # limitations under the License. # #--------------------------------------------------------------------------- # +require 'csv' + module CLIHelper LIST = { :name => "list", @@ -23,6 +25,12 @@ module CLIHelper :description => "Selects columns to display with list command" } + CSV = { + :name => "csv", + :large => "--csv", + :description => "Write table in csv format" + } + #ORDER = { # :name => "order", # :short => "-o x,y,z", @@ -56,7 +64,7 @@ module CLIHelper } #OPTIONS = [LIST, ORDER, FILTER, HEADER, DELAY] - OPTIONS = [LIST, DELAY, FILTER] + OPTIONS = [LIST, DELAY, FILTER, CSV] # Sets bold font def CLIHelper.scr_bold @@ -154,6 +162,8 @@ module CLIHelper class ShowTable require 'yaml' + attr_reader :default_columns + def initialize(conf=nil, ext=nil, &block) @columns = Hash.new @default_columns = Array.new @@ -241,7 +251,7 @@ module CLIHelper private def print_table(data, options) - CLIHelper.print_header(header_str) + CLIHelper.print_header(header_str) if !options[:csv] data ? print_data(data, options) : puts end @@ -257,17 +267,24 @@ module CLIHelper end begin - res_data.each{|l| - puts (0..ncolumns-1).collect{ |i| - dat=l[i] - col=@default_columns[i] + if options[:csv] + CSV($stdout, :write_headers => true, + :headers => @default_columns) do |csv| + res_data.each {|l| csv << l } + end + else + res_data.each{|l| + puts (0..ncolumns-1).collect{ |i| + dat=l[i] + col=@default_columns[i] - str=format_str(col, dat) - str=CLIHelper.color_state(str) if i==stat_column + str=format_str(col, dat) + str=CLIHelper.color_state(str) if i==stat_column - str - }.join(' ').rstrip - } + str + }.join(' ').rstrip + } + end rescue Errno::EPIPE end end diff --git a/src/cli/one_helper.rb b/src/cli/one_helper.rb index 43fbc34a9a..57929b6f3c 100644 --- a/src/cli/one_helper.rb +++ b/src/cli/one_helper.rb @@ -100,19 +100,31 @@ EOT :name => 'user', :large => '--user name', :description => 'User name used to connect to OpenNebula', - :format => String + :format => String, + :proc => lambda do |o, options| + OneHelper.set_user(o) + [0, o] + end }, { :name => 'password', :large => '--password password', :description => 'Password to authenticate with OpenNebula', - :format => String + :format => String, + :proc => lambda do |o, options| + OneHelper.set_password(o) + [0, o] + end }, { :name => 'endpoint', :large => '--endpoint endpoint', :description => 'URL of OpenNebula xmlrpc frontend', - :format => String + :format => String, + :proc => lambda do |o, options| + OneHelper.set_endpoint(o) + [0, o] + end } ] @@ -340,18 +352,31 @@ EOT class OneHelper attr_accessor :client - def self.get_client(options) - if defined?(@@client) + def self.get_client(options={}, force=false) + if !force && defined?(@@client) @@client else + secret=nil - user=options[:user] + password=nil + + if defined?(@@user) + user=@@user + password=@@password if defined?(@@password) + else + user=options[:user] + end + if user - password=options[:password]||self.get_password + password=password||options[:password]||self.get_password secret="#{user}:#{password}" end - endpoint=options[:endpoint] + if defined?(@@endpoint) + endpoint=@@endpoint + else + endpoint=options[:endpoint] + end @@client=OpenNebula::Client.new(secret, endpoint) end @@ -361,10 +386,22 @@ EOT if defined?(@@client) @@client else - self.get_client({}) + self.get_client end end + def self.set_user(user) + @@user=user + end + + def self.set_password(password) + @@password=password + end + + def self.set_endpoint(endpoint) + @@endpoint=endpoint + end + if RUBY_VERSION>="1.9.3" require 'io/console' def self.get_password @@ -374,6 +411,7 @@ EOT puts pass.chop! if pass + @@password=pass pass end else @@ -381,8 +419,9 @@ EOT def self.get_password print "Password: " system("stty", "-echo") + @@password=gets.chop begin - return gets.chop + return @@password ensure system("stty", "echo") print "\n" @@ -397,7 +436,7 @@ EOT end def set_client(options) - @client=OpenNebulaHelper::OneHelper.get_client(options) + @client=OpenNebulaHelper::OneHelper.get_client(options, true) end def create_resource(options, &block) @@ -565,7 +604,7 @@ EOT end def self.name_to_id(name, pool, ename) - if ename=="CLUSTER" and name=="ALL" + if ename=="CLUSTER" and name.upcase=="ALL" return 0, "ALL" end diff --git a/src/cli/one_helper/oneacct_helper.rb b/src/cli/one_helper/oneacct_helper.rb index f3048e3aa5..55187c696d 100644 --- a/src/cli/one_helper/oneacct_helper.rb +++ b/src/cli/one_helper/oneacct_helper.rb @@ -87,7 +87,7 @@ class AcctHelper < OpenNebulaHelper::OneHelper :name => "json", :short => "-j", :large => "--json", - :description => "Show the resource in xml format" + :description => "Show the resource in json format" } SPLIT={ @@ -100,6 +100,10 @@ class AcctHelper < OpenNebulaHelper::OneHelper ACCT_TABLE = CLIHelper::ShowTable.new("oneacct.yaml", nil) do + column :UID, "User ID", :size=>4 do |d| + d["UID"] + end + column :VID, "Virtual Machine ID", :size=>4 do |d| d["OID"] end @@ -181,4 +185,4 @@ class AcctHelper < OpenNebulaHelper::OneHelper CLIHelper.scr_restore puts end -end \ No newline at end of file +end diff --git a/src/cli/one_helper/onegroup_helper.rb b/src/cli/one_helper/onegroup_helper.rb index d60576f5d3..b011b9fd81 100644 --- a/src/cli/one_helper/onegroup_helper.rb +++ b/src/cli/one_helper/onegroup_helper.rb @@ -41,6 +41,10 @@ class OneGroupHelper < OpenNebulaHelper::OneHelper exit_code , msg = group.create_default_acls + if OpenNebula.is_error?(exit_code) + return -1, exit_code.message + end + exit_code.to_i end @@ -62,21 +66,27 @@ class OneGroupHelper < OpenNebulaHelper::OneHelper def format_pool(options) config_file = self.class.table_conf - prefix = '/GROUP_POOL/DEFAULT_GROUP_QUOTAS/' - group_pool = @group_pool - - quotas = group_pool.get_hash()['GROUP_POOL']['QUOTAS'] - quotas_hash = Hash.new - - if (!quotas.nil?) - quotas = [quotas].flatten - - quotas.each do |q| - quotas_hash[q['ID']] = q - end - end - table = CLIHelper::ShowTable.new(config_file, self) do + def pool_default_quotas(path) + @data.dsearch('/GROUP_POOL/DEFAULT_GROUP_QUOTAS/'+path) + end + + def quotas + if !defined?(@quotas) + quotas = @data.dsearch('GROUP_POOL/QUOTAS') + @quotas = Hash.new + + if (!quotas.nil?) + quotas = [quotas].flatten + + quotas.each do |q| + @quotas[q['ID']] = q + end + end + end + @quotas + end + column :ID, "ONE identifier for the Group", :size=>4 do |d| d["ID"] end @@ -100,11 +110,11 @@ class OneGroupHelper < OpenNebulaHelper::OneHelper column :VMS , "Number of VMS", :size=>9 do |d| begin - q = quotas_hash[d['ID']] + q = quotas[d['ID']] limit = q['VM_QUOTA']['VM']["VMS"] if limit == "-1" - limit = group_pool["#{prefix}VM_QUOTA/VM/VMS"] + limit = pool_default_quotas["#{prefix}VM_QUOTA/VM/VMS"] limit = "0" if limit.nil? || limit == "" end @@ -117,11 +127,11 @@ class OneGroupHelper < OpenNebulaHelper::OneHelper column :MEMORY, "Total memory allocated to user VMs", :size=>17 do |d| begin - q = quotas_hash[d['ID']] + q = quotas[d['ID']] limit = q['VM_QUOTA']['VM']["MEMORY"] if limit == "-1" - limit = group_pool["#{prefix}VM_QUOTA/VM/MEMORY"] + limit = pool_default_quotas["#{prefix}VM_QUOTA/VM/MEMORY"] limit = "0" if limit.nil? || limit == "" end @@ -135,11 +145,11 @@ class OneGroupHelper < OpenNebulaHelper::OneHelper column :CPU, "Total CPU allocated to user VMs", :size=>11 do |d| begin - q = quotas_hash[d['ID']] + q = quotas[d['ID']] limit = q['VM_QUOTA']['VM']["CPU"] if limit == "-1" - limit = group_pool["#{prefix}VM_QUOTA/VM/CPU"] + limit = pool_default_quotas["#{prefix}VM_QUOTA/VM/CPU"] limit = "0" if limit.nil? || limit == "" end @@ -213,6 +223,10 @@ class OneGroupHelper < OpenNebulaHelper::OneHelper puts str % ["NAME", group.name] puts + CLIHelper.print_header(str_h1 % "GROUP TEMPLATE",false) + puts group.template_str + puts + CLIHelper.print_header(str_h1 % "USERS", false) CLIHelper.print_header("%-15s" % ["ID"]) group.user_ids.each do |uid| diff --git a/src/cli/one_helper/onehost_helper.rb b/src/cli/one_helper/onehost_helper.rb index d80a3b3c4b..50f149dee5 100644 --- a/src/cli/one_helper/onehost_helper.rb +++ b/src/cli/one_helper/onehost_helper.rb @@ -307,7 +307,7 @@ class OneHostHelper < OpenNebulaHelper::OneHelper str="#{bar} #{info} " name=host[0..(79-str.length)] str=str+name - str=str+" "*(79-str.length) + str=str+" "*(80-str.length) print "#{str}\r" STDOUT.flush diff --git a/src/cli/one_helper/onezone_helper.rb b/src/cli/one_helper/onezone_helper.rb index f854201794..8a8fe39da2 100644 --- a/src/cli/one_helper/onezone_helper.rb +++ b/src/cli/one_helper/onezone_helper.rb @@ -1,5 +1,5 @@ # -------------------------------------------------------------------------- # -# Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs # +# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs # # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # # not use this file except in compliance with the License. You may obtain # @@ -43,11 +43,11 @@ class OneZoneHelper < OpenNebulaHelper::OneHelper d["NAME"] end - column :ENDPOINT, "Endpoint of the Zone", :left, :size=>50 do |d| + column :ENDPOINT, "Endpoint of the Zone", :left, :size=>45 do |d| d["TEMPLATE"]['ENDPOINT'] end - default :ID, :NAME, :ENDPOINT + default :CURRENT, :ID, :NAME, :ENDPOINT end table diff --git a/src/cli/oneacct b/src/cli/oneacct index 0aca436c03..9f27f732d8 100755 --- a/src/cli/oneacct +++ b/src/cli/oneacct @@ -44,9 +44,15 @@ cmd = CommandParser::CmdParser.new(ARGV) do end option AcctHelper::ACCT_OPTIONS + CommandParser::OPTIONS + + [OpenNebulaHelper::DESCRIBE, CLIHelper::LIST, CLIHelper::CSV] + OpenNebulaHelper::CLIENT_OPTIONS main do + if options[:describe] + AcctHelper::ACCT_TABLE.describe_columns + exit(0) + end + filter_flag = (options[:userfilter] || VirtualMachinePool::INFO_ALL) start_time = options[:start_time] ? options[:start_time].to_i : -1 end_time = options[:end_time] ? options[:end_time].to_i : -1 @@ -81,7 +87,10 @@ cmd = CommandParser::CmdParser.new(ARGV) do else order_by = Hash.new order_by[:order_by_1] = 'VM/UID' - order_by[:order_by_2] = 'VM/ID' if options[:split] + + if options[:split] && !options[:csv] + order_by[:order_by_2] = 'VM/ID' + end acct_hash = pool.accounting(filter_flag, common_opts.merge(order_by)) @@ -90,6 +99,21 @@ cmd = CommandParser::CmdParser.new(ARGV) do exit -1 end + if options[:csv] + a=Array.new + acct_hash.each do |user_id, value| + value['HISTORY_RECORDS']['HISTORY'].each do |l| + l['UID']=user_id + a << l + end + end + + cols=AcctHelper::ACCT_TABLE.default_columns + AcctHelper::ACCT_TABLE.default(:UID, *cols) + + AcctHelper::ACCT_TABLE.show(a, options) + exit(0) + end if ( start_time != -1 or end_time != -1 ) AcctHelper.print_start_end_time_header(start_time, end_time) @@ -102,13 +126,13 @@ cmd = CommandParser::CmdParser.new(ARGV) do # Use one table for each VM value.each { |vm_id, history_array| array = history_array['HISTORY_RECORDS']['HISTORY'] - AcctHelper::ACCT_TABLE.show(array) + AcctHelper::ACCT_TABLE.show(array, options) puts } else # Use the same table for all the VMs array = value['HISTORY_RECORDS']['HISTORY'] - AcctHelper::ACCT_TABLE.show(array) + AcctHelper::ACCT_TABLE.show(array, options) puts end } diff --git a/src/cli/onegroup b/src/cli/onegroup index 746e41fd47..8a3d0ca587 100755 --- a/src/cli/onegroup +++ b/src/cli/onegroup @@ -135,6 +135,24 @@ cmd=CommandParser::CmdParser.new(ARGV) do end end + update_desc = <<-EOT.unindent + Update the template contents. If a path is not provided the editor will + be launched to modify the current content. + EOT + + command :update, update_desc, :groupid, [:file, nil], + :options=>OpenNebulaHelper::APPEND do + helper.perform_action(args[0],options,"modified") do |obj| + if options[:append] + str = OpenNebulaHelper.append_template(args[0], obj, args[1]) + else + str = OpenNebulaHelper.update_template(args[0], obj, args[1]) + end + + obj.update(str, options[:append]) + end + end + delete_desc = <<-EOT.unindent Deletes the given Group EOT diff --git a/src/cli/onehost b/src/cli/onehost index 7813046590..8e4d5de3e8 100755 --- a/src/cli/onehost +++ b/src/cli/onehost @@ -20,7 +20,7 @@ ONE_LOCATION=ENV["ONE_LOCATION"] if !ONE_LOCATION RUBY_LIB_LOCATION="/usr/lib/one/ruby" - REMOTES_LOCATION="/var/lib/one/remotes" + REMOTES_LOCATION="/var/lib/one/remotes/" else RUBY_LIB_LOCATION=ONE_LOCATION+"/lib/ruby" REMOTES_LOCATION=ONE_LOCATION+"/var/remotes/" diff --git a/src/cli/onezone b/src/cli/onezone index 2b196d0cb7..24b1ec5c46 100755 --- a/src/cli/onezone +++ b/src/cli/onezone @@ -1,7 +1,7 @@ #!/usr/bin/env ruby # -------------------------------------------------------------------------- # -# Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs # +# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs # # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # # not use this file except in compliance with the License. You may obtain # diff --git a/src/cloud/common/CloudClient.rb b/src/cloud/common/CloudClient.rb index 555e6a308e..f915681438 100644 --- a/src/cloud/common/CloudClient.rb +++ b/src/cloud/common/CloudClient.rb @@ -50,7 +50,7 @@ end module CloudClient # OpenNebula version - VERSION = '4.5.0' + VERSION = '4.5.80' # ######################################################################### # Default location for the authentication file diff --git a/src/cloud/ec2/lib/keypair.rb b/src/cloud/ec2/lib/keypair.rb index 464b5d8904..47294e93b4 100644 --- a/src/cloud/ec2/lib/keypair.rb +++ b/src/cloud/ec2/lib/keypair.rb @@ -88,7 +88,7 @@ module Keypair erb_private_key = rsa_kp erb_public_key = rsa_kp.public_key - erb_key_fingerprint = Digest::MD5.hexdigest(rsa_kp.to_der) + erb_key_fingerprint = Digest::MD5.hexdigest(rsa_kp.to_blob) erb_key_fingerprint.gsub!(/(.{2})(?=.)/, '\1:\2') erb_ssh_public_key = erb_public_key.ssh_type << diff --git a/src/cluster/Cluster.cc b/src/cluster/Cluster.cc index fbdeee4311..451204766e 100644 --- a/src/cluster/Cluster.cc +++ b/src/cluster/Cluster.cc @@ -118,18 +118,7 @@ string& Cluster::get_ds_location(string &ds_location) int Cluster::add_datastore(int id, Datastore::DatastoreType ds_type, string& error_msg) { - if ( id == DatastorePool::SYSTEM_DS_ID ) - { - ostringstream oss; - oss << "Datastore "<< DatastorePool::SYSTEM_DS_ID - << " cannot be added to any cluster."; - - error_msg = oss.str(); - - return -1; - } - - int rc = datastores.add_collection_id(id); + int rc = datastores.add_collection_id(id); if ( rc < 0 ) { diff --git a/src/datastore/DatastorePool.cc b/src/datastore/DatastorePool.cc index 8cee553741..27d81c2485 100644 --- a/src/datastore/DatastorePool.cc +++ b/src/datastore/DatastorePool.cc @@ -103,7 +103,7 @@ DatastorePool::DatastorePool(SqlDB * db): GroupPool::ONEADMIN_ID, UserPool::oneadmin_name, GroupPool::ONEADMIN_NAME, - 0133, + 0137, ds_tmpl, &rc, ClusterPool::NONE_CLUSTER_ID, @@ -137,7 +137,7 @@ DatastorePool::DatastorePool(SqlDB * db): GroupPool::ONEADMIN_ID, UserPool::oneadmin_name, GroupPool::ONEADMIN_NAME, - 0133, + 0137, ds_tmpl, &rc, ClusterPool::NONE_CLUSTER_ID, @@ -241,14 +241,6 @@ int DatastorePool::drop(PoolObjectSQL * objsql, string& error_msg) int rc; - // Return error if the datastore is a default one. - if( datastore->get_oid() < 100 ) - { - error_msg = "System Datastores (ID < 100) cannot be deleted."; - NebulaLog::log("DATASTORE", Log::ERROR, error_msg); - return -2; - } - if( datastore->get_collection_size() > 0 ) { ostringstream oss; diff --git a/src/datastore_mad/remotes/ceph/ceph.conf b/src/datastore_mad/remotes/ceph/ceph.conf index ebd0123739..b3885c2bd5 100644 --- a/src/datastore_mad/remotes/ceph/ceph.conf +++ b/src/datastore_mad/remotes/ceph/ceph.conf @@ -22,3 +22,14 @@ POOL_NAME=one # temporarily during the create/mkfs processes. This directoy MUST exist, # have enough space and be writeable by 'oneadmin' STAGING_DIR=/var/tmp + +# Default RBD_FORMAT. By default RBD format 1 will be used. Uncomment the +# following options to enable support for RBD 2. This value affects all the ceph +# datastores, however it can be enabled per ceph datastore using the same +# option in the datastore template +# RBD_FORMAT=2 + +# Extra arguments send to "qemu-img convert". Depending on the qemu-img version +# it using "-O rbd" can be either recommended or may cause segfaults. Uncomment +# the following line to add "-O rbd" to the qemu-img convert command +# QEMU_IMG_CONVERT_ARGS="-O rbd" diff --git a/src/datastore_mad/remotes/ceph/cp b/src/datastore_mad/remotes/ceph/cp index 8a55e8f159..505ce2389d 100755 --- a/src/datastore_mad/remotes/ceph/cp +++ b/src/datastore_mad/remotes/ceph/cp @@ -54,6 +54,7 @@ done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/BRIDGE_LIST \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/POOL_NAME \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/STAGING_DIR \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/RBD_FORMAT \ /DS_DRIVER_ACTION_DATA/IMAGE/PATH \ /DS_DRIVER_ACTION_DATA/IMAGE/SIZE \ /DS_DRIVER_ACTION_DATA/IMAGE/TEMPLATE/MD5 \ @@ -69,6 +70,7 @@ SAFE_DIRS="${XPATH_ELEMENTS[i++]}" BRIDGE_LIST="${XPATH_ELEMENTS[i++]}" POOL_NAME="${XPATH_ELEMENTS[i++]:-$POOL_NAME}" STAGING_DIR="${XPATH_ELEMENTS[i++]:-$STAGING_DIR}" +RBD_FORMAT="${XPATH_ELEMENTS[i++]:-$RBD_FORMAT}" SRC="${XPATH_ELEMENTS[i++]}" SIZE="${XPATH_ELEMENTS[i++]}" MD5="${XPATH_ELEMENTS[i++]}" @@ -121,8 +123,18 @@ exec_and_log "eval $DUMP | $SSH $DST_HOST $DD of=$TMP_DST bs=64k" \ REGISTER_CMD=$(cat <( decompress "$decompressor" "$TO" ) \ - >( hasher $HASH_TYPE ) >/dev/null + >( hasher $HASH_TYPE ) | cat >/dev/null if [ "$?" != "0" ]; then echo "Error copying" >&2 diff --git a/src/group/Group.cc b/src/group/Group.cc index 3f0c31e1f3..363e75a4eb 100644 --- a/src/group/Group.cc +++ b/src/group/Group.cc @@ -214,6 +214,7 @@ string& Group::to_xml_extended(string& xml, bool extended) const { ostringstream oss; string collection_xml; + string template_xml; set >::const_iterator it; @@ -221,8 +222,9 @@ string& Group::to_xml_extended(string& xml, bool extended) const oss << "" << - "" << oid << "" << - "" << name << "" << + "" << oid << "" << + "" << name << "" << + obj_template->to_xml(template_xml) << collection_xml; for (it = providers.begin(); it != providers.end(); it++) @@ -285,6 +287,19 @@ int Group::from_xml(const string& xml) ObjectXML::free_nodes(content); content.clear(); + // Get associated metadata for the group + ObjectXML::get_nodes("/GROUP/TEMPLATE", content); + + if (content.empty()) + { + return -1; + } + + rc += obj_template->from_xml_node(content[0]); + + ObjectXML::free_nodes(content); + content.clear(); + // Set of resource providers ObjectXML::get_nodes("/GROUP/RESOURCE_PROVIDER", content); @@ -317,6 +332,11 @@ int Group::from_xml(const string& xml) int Group::add_resource_provider(int zone_id, int cluster_id, string& error_msg) { + AclManager* aclm = Nebula::instance().get_aclm(); + + int rc = 0; + long long mask_prefix; + pair >::iterator,bool> ret; ret = providers.insert(pair(zone_id, cluster_id)); @@ -327,6 +347,56 @@ int Group::add_resource_provider(int zone_id, int cluster_id, string& error_msg) return -1; } + if (cluster_id == ClusterPool::ALL_RESOURCES) + { + mask_prefix = AclRule::ALL_ID; + } + else + { + mask_prefix = AclRule::CLUSTER_ID | cluster_id; + } + + // @ HOST/% MANAGE # + rc += aclm->add_rule( + AclRule::GROUP_ID | + oid, + + mask_prefix | + PoolObjectSQL::HOST, + + AuthRequest::MANAGE, + + AclRule::INDIVIDUAL_ID | + zone_id, + + error_msg); + + if (rc < 0) + { + NebulaLog::log("GROUP",Log::ERROR,error_msg); + } + + // @ DATASTORE+NET/% USE # + rc += aclm->add_rule( + AclRule::GROUP_ID | + oid, + + mask_prefix | + PoolObjectSQL::DATASTORE | + PoolObjectSQL::NET, + + AuthRequest::USE, + + AclRule::INDIVIDUAL_ID | + zone_id, + + error_msg); + + if (rc < 0) + { + NebulaLog::log("GROUP",Log::ERROR,error_msg); + } + return 0; } @@ -335,11 +405,68 @@ int Group::add_resource_provider(int zone_id, int cluster_id, string& error_msg) int Group::del_resource_provider(int zone_id, int cluster_id, string& error_msg) { + AclManager* aclm = Nebula::instance().get_aclm(); + + int rc = 0; + + long long mask_prefix; + if( providers.erase(pair(zone_id, cluster_id)) != 1 ) { error_msg = "Resource provider is not assigned to this group"; return -1; } + if (cluster_id == ClusterPool::ALL_RESOURCES) + { + mask_prefix = AclRule::ALL_ID; + } + else + { + mask_prefix = AclRule::CLUSTER_ID | cluster_id; + } + + // @ HOST/% MANAGE # + rc += aclm->del_rule( + AclRule::GROUP_ID | + oid, + + mask_prefix | + PoolObjectSQL::HOST, + + AuthRequest::MANAGE, + + AclRule::INDIVIDUAL_ID | + zone_id, + + error_msg); + + if (rc < 0) + { + NebulaLog::log("GROUP",Log::ERROR,error_msg); + } + + // @ DATASTORE+NET/% USE # + rc += aclm->del_rule( + AclRule::GROUP_ID | + oid, + + mask_prefix | + PoolObjectSQL::DATASTORE | + PoolObjectSQL::NET, + + AuthRequest::USE, + + AclRule::INDIVIDUAL_ID | + zone_id, + + error_msg); + + if (rc < 0) + { + NebulaLog::log("GROUP",Log::ERROR,error_msg); + } + return 0; } + diff --git a/src/group/GroupPool.cc b/src/group/GroupPool.cc index 2340fe0433..829d7c8d60 100644 --- a/src/group/GroupPool.cc +++ b/src/group/GroupPool.cc @@ -77,6 +77,12 @@ GroupPool::GroupPool(SqlDB * db, goto error_groups; } + group = get(rc, true); + + group->add_resource_provider(Nebula::instance().get_zone_id(), ClusterPool::ALL_RESOURCES, error_str); + + group->unlock(); + set_update_lastOID(99); } diff --git a/src/host/Host.cc b/src/host/Host.cc index 42a47b97db..8676c57b3e 100644 --- a/src/host/Host.cc +++ b/src/host/Host.cc @@ -238,7 +238,9 @@ int Host::update_info(Template &tmpl, bool &with_vm_info, set &lost, map &found, - const set &non_shared_ds) + const set &non_shared_ds, + long long reserved_cpu, + long long reserved_mem) { VectorAttribute* vatt; vector::iterator it; @@ -289,10 +291,12 @@ int Host::update_info(Template &tmpl, if (isEnabled()) { + get_reserved_capacity(reserved_cpu, reserved_mem); + erase_template_attribute("TOTALCPU", val); - host_share.max_cpu = val; + host_share.max_cpu = val - reserved_cpu; erase_template_attribute("TOTALMEMORY", val); - host_share.max_mem = val; + host_share.max_mem = val - reserved_mem; erase_template_attribute("DS_LOCATION_TOTAL_MB", val); host_share.max_disk = val; diff --git a/src/im/MonitorThread.cc b/src/im/MonitorThread.cc index bfd4f561bb..921133fcbf 100644 --- a/src/im/MonitorThread.cc +++ b/src/im/MonitorThread.cc @@ -36,6 +36,8 @@ LifeCycleManager * MonitorThread::lcm; MonitorThreadPool * MonitorThread::mthpool; +ClusterPool * MonitorThread::cpool; + /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ @@ -94,7 +96,7 @@ void MonitorThread::do_message() } // ------------------------------------------------------------------------- - // Get DS Information from Moniroting Information + // Get DS Information from Moniroting Information & Reserved Capacity // ------------------------------------------------------------------------- map datastores; map::iterator itm; @@ -104,7 +106,13 @@ void MonitorThread::do_message() set non_shared_ds; - int rc = host->extract_ds_info(*hinfo, tmpl, datastores); + int rc = host->extract_ds_info(*hinfo, tmpl, datastores); + + int cid = host->get_cluster_id(); + + long long reserved_cpu = 0; + + long long reserved_mem = 0; delete hinfo; @@ -115,6 +123,18 @@ void MonitorThread::do_message() return; } + if (cid != -1) + { + Cluster *cluster = cpool->get(cid, true); + + if (cluster != 0) + { + cluster->get_reserved_capacity(reserved_cpu, reserved_mem); + + cluster->unlock(); + } + } + for (itm = datastores.begin(); itm != datastores.end(); itm++) { ds = dspool->get(itm->first, true); @@ -170,7 +190,8 @@ void MonitorThread::do_message() return; } - rc = host->update_info(tmpl, vm_poll, lost, found, non_shared_ds); + rc = host->update_info(tmpl, vm_poll, lost, found, non_shared_ds, + reserved_cpu, reserved_mem); hpool->update(host); @@ -220,6 +241,8 @@ MonitorThreadPool::MonitorThreadPool(int max_thr):concurrent_threads(max_thr), MonitorThread::lcm = Nebula::instance().get_lcm(); + MonitorThread::cpool = Nebula::instance().get_clpool(); + MonitorThread::mthpool= this; //Initialize concurrency variables diff --git a/src/im_mad/remotes/VERSION b/src/im_mad/remotes/VERSION index ae153944ee..a6517be9b3 100644 --- a/src/im_mad/remotes/VERSION +++ b/src/im_mad/remotes/VERSION @@ -1 +1 @@ -4.5.0 \ No newline at end of file +4.5.80 \ No newline at end of file diff --git a/src/im_mad/remotes/common.d/collectd-client-shepherd.sh b/src/im_mad/remotes/common.d/collectd-client-shepherd.sh new file mode 100755 index 0000000000..04058ca178 --- /dev/null +++ b/src/im_mad/remotes/common.d/collectd-client-shepherd.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +# -------------------------------------------------------------------------- # +# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +( + +running_pid=$(cat /tmp/one-collectd-client.pid) +pids=$(ps axuwww | grep /collectd-client.rb | grep -v grep | awk '{ print $2 }' | grep -v "^${running_pid}$") + +if [ -n "$pids" ]; then + kill -6 $pids +fi + +) > /dev/null + diff --git a/src/image/ImagePool.cc b/src/image/ImagePool.cc index 6974929157..4d326fafed 100644 --- a/src/image/ImagePool.cc +++ b/src/image/ImagePool.cc @@ -36,6 +36,7 @@ ImagePool::ImagePool( SqlDB * db, const string& __default_type, const string& __default_dev_prefix, + const string& __default_cdrom_dev_prefix, vector& restricted_attrs, vector hook_mads, const string& remotes_location, @@ -47,7 +48,7 @@ ImagePool::ImagePool( _default_type = __default_type; _default_dev_prefix = __default_dev_prefix; - _default_cdrom_dev_prefix = "hd"; + _default_cdrom_dev_prefix = __default_cdrom_dev_prefix; // Init inherit attributes vector::const_iterator it; diff --git a/src/lcm/LifeCycleManager.cc b/src/lcm/LifeCycleManager.cc index 1e4137ad4e..62f79d37b1 100644 --- a/src/lcm/LifeCycleManager.cc +++ b/src/lcm/LifeCycleManager.cc @@ -356,7 +356,7 @@ void LifeCycleManager::do_action(const string &action, void * arg) } else if (action == "ATTACH_FAILURE") { - attach_failure_action(vid); + attach_failure_action(vid, false); } else if (action == "DETACH_SUCCESS") { diff --git a/src/lcm/LifeCycleStates.cc b/src/lcm/LifeCycleStates.cc index a4a33fc995..49a296fad0 100644 --- a/src/lcm/LifeCycleStates.cc +++ b/src/lcm/LifeCycleStates.cc @@ -1340,7 +1340,7 @@ void LifeCycleManager::attach_success_action(int vid) /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -void LifeCycleManager::attach_failure_action(int vid) +void LifeCycleManager::attach_failure_action(int vid, bool release_save_as) { VirtualMachine * vm; VectorAttribute * disk; @@ -1385,6 +1385,17 @@ void LifeCycleManager::attach_failure_action(int vid) Quotas::quota_del(Quotas::IMAGE, uid, gid, &tmpl); imagem->release_image(oid, image_id, false); + + // Release non-persistent images in the detach event + if (release_save_as) + { + int save_as_id; + + if ( disk->vector_value("SAVE_AS", save_as_id) == 0 ) + { + imagem->release_image(oid, save_as_id, false); + } + } } else // Volatile disk { @@ -1408,7 +1419,7 @@ void LifeCycleManager::attach_failure_action(int vid) void LifeCycleManager::detach_success_action(int vid) { - attach_failure_action(vid); + attach_failure_action(vid, true); } /* -------------------------------------------------------------------------- */ diff --git a/src/nebula/Nebula.cc b/src/nebula/Nebula.cc index 458f22151f..ae7095fcb3 100644 --- a/src/nebula/Nebula.cc +++ b/src/nebula/Nebula.cc @@ -300,66 +300,68 @@ void Nebula::start(bool bootstrap_only) // Prepare the SystemDB and check versions // --------------------------------------------------------------------- + bool local_bootstrap; + bool shared_bootstrap; + NebulaLog::log("ONE",Log::INFO,"Checking database version."); system_db = new SystemDB(db); - rc = system_db->check_db_version(is_federation_slave()); - + rc = system_db->check_db_version(is_federation_slave(), + local_bootstrap, + shared_bootstrap); if( rc == -1 ) { - throw runtime_error("Database version mismatch."); + throw runtime_error("Database version mismatch. Check oned.log."); } - if( is_federation_slave() && rc == -2 ) - { - throw runtime_error( - "Either the database was not bootstrapped by the " - "federation master, or the replication was " - "not configured."); - } + rc = 0; - if( rc == -2 || rc == -3 ) + if (local_bootstrap) { - rc = 0; - NebulaLog::log("ONE",Log::INFO, - "Bootstrapping OpenNebula database."); + "Bootstrapping OpenNebula database, stage 1."); rc += VirtualMachinePool::bootstrap(db); rc += HostPool::bootstrap(db); rc += VirtualNetworkPool::bootstrap(db); - rc += GroupPool::bootstrap(db); - rc += UserPool::bootstrap(db); rc += ImagePool::bootstrap(db); rc += VMTemplatePool::bootstrap(db); - rc += AclManager::bootstrap(db); rc += DatastorePool::bootstrap(db); rc += ClusterPool::bootstrap(db); rc += DocumentPool::bootstrap(db); + + // Create the system tables only if bootstrap went well + if (rc == 0) + { + rc += system_db->local_bootstrap(); + } + } + + if (shared_bootstrap) + { + NebulaLog::log("ONE",Log::INFO, + "Bootstrapping OpenNebula database, stage 2."); + + rc += GroupPool::bootstrap(db); + rc += UserPool::bootstrap(db); + rc += AclManager::bootstrap(db); rc += ZonePool::bootstrap(db); // Create the system tables only if bootstrap went well if ( rc == 0 ) { - if (is_federation_slave()) - { - rc += system_db->slave_bootstrap(); - } - else - { - rc += system_db->bootstrap(); - } + rc += system_db->shared_bootstrap(); } // Insert default system attributes rc += default_user_quota.insert(); rc += default_group_quota.insert(); + } - if ( rc != 0 ) - { - throw runtime_error("Error bootstrapping database."); - } + if ( rc != 0 ) + { + throw runtime_error("Error bootstrapping database."); } } catch (exception&) @@ -377,6 +379,59 @@ void Nebula::start(bool bootstrap_only) return; } + // ----------------------------------------------------------- + // Close stds, we no longer need them + // ----------------------------------------------------------- + + fd = open("/dev/null", O_RDWR); + + dup2(fd,0); + dup2(fd,1); + dup2(fd,2); + + close(fd); + + fcntl(0,F_SETFD,0); // Keep them open across exec funcs + fcntl(1,F_SETFD,0); + fcntl(2,F_SETFD,0); + + // ----------------------------------------------------------- + // Block all signals before creating any Nebula thread + // ----------------------------------------------------------- + + sigfillset(&mask); + + pthread_sigmask(SIG_BLOCK, &mask, NULL); + + // ----------------------------------------------------------- + //Managers + // ----------------------------------------------------------- + + MadManager::mad_manager_system_init(); + + time_t timer_period; + time_t monitor_period; + + nebula_configuration->get("MANAGER_TIMER", timer_period); + nebula_configuration->get("MONITORING_INTERVAL", monitor_period); + + // ---- ACL Manager ---- + try + { + aclm = new AclManager(db, zone_id, is_federation_slave(), timer_period); + } + catch (bad_alloc&) + { + throw; + } + + rc = aclm->start(); + + if ( rc != 0 ) + { + throw runtime_error("Could not start the ACL Manager"); + } + // ----------------------------------------------------------- // Pools // ----------------------------------------------------------- @@ -387,6 +442,7 @@ void Nebula::start(bool bootstrap_only) string mac_prefix; string default_image_type; string default_device_prefix; + string default_cdrom_device_prefix; time_t expiration_time; time_t vm_expiration; @@ -465,10 +521,12 @@ void Nebula::start(bool bootstrap_only) nebula_configuration->get("DEFAULT_IMAGE_TYPE", default_image_type); nebula_configuration->get("DEFAULT_DEVICE_PREFIX", default_device_prefix); - + nebula_configuration->get("DEFAULT_CDROM_DEVICE_PREFIX", + default_cdrom_device_prefix); ipool = new ImagePool(db, default_image_type, default_device_prefix, + default_cdrom_device_prefix, img_restricted_attrs, image_hooks, remotes_location, @@ -487,41 +545,6 @@ void Nebula::start(bool bootstrap_only) throw; } - // ----------------------------------------------------------- - // Close stds, we no longer need them - // ----------------------------------------------------------- - - fd = open("/dev/null", O_RDWR); - - dup2(fd,0); - dup2(fd,1); - dup2(fd,2); - - close(fd); - - fcntl(0,F_SETFD,0); // Keep them open across exec funcs - fcntl(1,F_SETFD,0); - fcntl(2,F_SETFD,0); - - // ----------------------------------------------------------- - // Block all signals before creating any Nebula thread - // ----------------------------------------------------------- - - sigfillset(&mask); - - pthread_sigmask(SIG_BLOCK, &mask, NULL); - - // ----------------------------------------------------------- - //Managers - // ----------------------------------------------------------- - - MadManager::mad_manager_system_init(); - - time_t timer_period; - time_t monitor_period; - - nebula_configuration->get("MANAGER_TIMER", timer_period); - nebula_configuration->get("MONITORING_INTERVAL", monitor_period); // ---- Virtual Machine Manager ---- try @@ -698,24 +721,6 @@ void Nebula::start(bool bootstrap_only) } } - // ---- ACL Manager ---- - try - { - aclm = new AclManager(db, zone_id, is_federation_enabled(), - is_federation_slave(), timer_period); - } - catch (bad_alloc&) - { - throw; - } - - rc = aclm->start(); - - if ( rc != 0 ) - { - throw runtime_error("Could not start the ACL Manager"); - } - // ---- Image Manager ---- try { diff --git a/src/nebula/NebulaTemplate.cc b/src/nebula/NebulaTemplate.cc index 591eadd958..5f52db8fae 100644 --- a/src/nebula/NebulaTemplate.cc +++ b/src/nebula/NebulaTemplate.cc @@ -282,6 +282,7 @@ void OpenNebulaTemplate::set_conf_default() # DATASTORE_CAPACITY_CHECK # DEFAULT_IMAGE_TYPE # DEFAULT_DEVICE_PREFIX +# DEFAULT_CDROM_DEVICE_PREFIX #******************************************************************************* */ //DATASTORE_LOCATION @@ -311,6 +312,10 @@ void OpenNebulaTemplate::set_conf_default() attribute = new SingleAttribute("DEFAULT_DEVICE_PREFIX",value); conf_default.insert(make_pair(attribute->name(),attribute)); + + //DEFAULT_CDROM_DEVICE_PREFIX + attribute = new SingleAttribute("DEFAULT_CDROM_DEVICE_PREFIX",value); + conf_default.insert(make_pair(attribute->name(),attribute)); /* #******************************************************************************* # Auth Manager Configuration diff --git a/src/nebula/SystemDB.cc b/src/nebula/SystemDB.cc index 53902fb48e..7004f0e89a 100644 --- a/src/nebula/SystemDB.cc +++ b/src/nebula/SystemDB.cc @@ -30,21 +30,21 @@ const char * SystemDB::pc_bootstrap = "CREATE TABLE pool_control " "(tablename VARCHAR(32) PRIMARY KEY, last_oid BIGINT UNSIGNED)"; -// DB versioning table -const char * SystemDB::ver_table = "db_versioning"; -const char * SystemDB::ver_names = "oid, version, timestamp, comment"; +// DB versioning table, shared (federation) tables +const char * SystemDB::shared_ver_table = "db_versioning"; +const char * SystemDB::shared_ver_names = "oid, version, timestamp, comment"; -const char * SystemDB::ver_bootstrap = "CREATE TABLE db_versioning " +const char * SystemDB::shared_ver_bootstrap = "CREATE TABLE db_versioning " "(oid INTEGER PRIMARY KEY, version VARCHAR(256), timestamp INTEGER, " "comment VARCHAR(256))"; -// DB slave versioning table -const char * SystemDB::slave_ver_table = "slave_db_versioning"; -const char * SystemDB::slave_ver_names = "oid, version, timestamp, comment"; +// DB versioning table, local tables +const char * SystemDB::local_ver_table = "local_db_versioning"; +const char * SystemDB::local_ver_names = "oid, version, timestamp, comment, is_slave"; -const char * SystemDB::slave_ver_bootstrap = "CREATE TABLE slave_db_versioning " +const char * SystemDB::local_ver_bootstrap = "CREATE TABLE local_db_versioning " "(oid INTEGER PRIMARY KEY, version VARCHAR(256), timestamp INTEGER, " - "comment VARCHAR(256))"; + "comment VARCHAR(256), is_slave BOOLEAN)"; // System attributes table const char * SystemDB::sys_table = "system_attributes"; @@ -56,10 +56,34 @@ const char * SystemDB::sys_bootstrap = "CREATE TABLE IF NOT EXISTS" /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -int SystemDB::bootstrap() +int SystemDB::shared_bootstrap() { - int rc; - ostringstream oss; + int rc; + ostringstream oss; + + // --------------------------------------------------------------------- + // db versioning, version of OpenNebula. + // --------------------------------------------------------------------- + oss.str(shared_ver_bootstrap); + rc = db->exec(oss); + + oss.str(""); + oss << "INSERT INTO " << shared_ver_table << " (" << shared_ver_names << ") " + << "VALUES (0, '" << Nebula::shared_db_version() << "', " << time(0) + << ", '" << Nebula::version() << " daemon bootstrap')"; + + rc += db->exec(oss); + + return rc; +}; + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +int SystemDB::local_bootstrap() +{ + int rc; + ostringstream oss; // ------------------------------------------------------------------------ // pool control, tracks the last ID's assigned to objects @@ -68,15 +92,17 @@ int SystemDB::bootstrap() rc = db->exec(oss); // ------------------------------------------------------------------------ - // db versioning, version of OpenNebula. + // local db versioning, version of tables that are not replicated in a + // slave OpenNebula. // ------------------------------------------------------------------------ - oss.str(ver_bootstrap); + oss.str(local_ver_bootstrap); rc += db->exec(oss); oss.str(""); - oss << "INSERT INTO " << ver_table << " (" << ver_names << ") " - << "VALUES (0, '" << Nebula::db_version() << "', " << time(0) - << ", '" << Nebula::version() << " daemon bootstrap')"; + oss << "INSERT INTO " << local_ver_table << " (" << local_ver_names << ") " + << "VALUES (0, '" << Nebula::local_db_version() << "', " << time(0) + << ", '" << Nebula::version() << " daemon bootstrap', " + << Nebula::instance().is_federation_slave() << ")"; rc += db->exec(oss); @@ -87,43 +113,7 @@ int SystemDB::bootstrap() rc += db->exec(oss); return rc; -} - -/* -------------------------------------------------------------------------- */ -/* -------------------------------------------------------------------------- */ - -int SystemDB::slave_bootstrap() -{ - int rc; - ostringstream oss; - - // ------------------------------------------------------------------------ - // pool control, tracks the last ID's assigned to objects - // ------------------------------------------------------------------------ - oss.str(pc_bootstrap); - rc = db->exec(oss); - - // ------------------------------------------------------------------------ - // db versioning, version of OpenNebula. - // ------------------------------------------------------------------------ - oss.str(slave_ver_bootstrap); - rc += db->exec(oss); - - oss.str(""); - oss << "INSERT INTO " << slave_ver_table << " (" << slave_ver_names << ") " - << "VALUES (0, '" << Nebula::db_version() << "', " << time(0) - << ", '" << Nebula::version() << " daemon bootstrap')"; - - rc += db->exec(oss); - - // ------------------------------------------------------------------------ - // system - // ------------------------------------------------------------------------ - oss.str(sys_bootstrap); - rc += db->exec(oss); - - return rc; -} +}; /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ @@ -146,98 +136,128 @@ int SystemDB::select_cb(void *_loaded_db_version, int num, char **values, /* -------------------------------------------------------------------------- */ -int SystemDB::check_db_version(bool is_federation_slave) +int SystemDB::check_db_version(const string& table, + const string& version, + string& error) { - int rc; - ostringstream oss; + ostringstream oss; + string db_version; - string loaded_db_version = ""; + error.clear(); - // Try to read latest version - set_callback( static_cast(&SystemDB::select_cb), - static_cast(&loaded_db_version) ); + set_callback(static_cast(&SystemDB::select_cb), + static_cast(&db_version)); - oss << "SELECT version FROM " << ver_table - << " WHERE oid=(SELECT MAX(oid) FROM " << ver_table << ")"; + oss << "SELECT version FROM " << table <<" WHERE oid=(SELECT MAX(oid) FROM " + << table << ")"; - db->exec(oss, this, true); + int rc = db->exec(oss, this, true); - oss.str(""); unset_callback(); - if( loaded_db_version == "" ) + if( rc != 0 || db_version.empty() )//DB needs bootstrap or replica config. { - // Table user_pool is present for all OpenNebula versions, and it - // always contains at least the oneadmin user. - oss << "SELECT MAX(oid) FROM user_pool"; - rc = db->exec(oss, 0, true); - - oss.str(""); - - if( rc != 0 ) // Database needs bootstrap - { - return -2; - } + return -2; } - if( Nebula::db_version() != loaded_db_version ) - { - if (!is_federation_slave) - { - oss << "Database version mismatch. " - << "Installed " << Nebula::version() << " uses DB version '" - << Nebula::db_version() << "', and existing DB version is '" - << loaded_db_version << "'."; - } - else - { - oss << "Database version mismatch. " - << "Installed slave " << Nebula::version() << " uses DB version '" - << Nebula::db_version() << "', and existing master DB version is '" - << loaded_db_version << "'."; - } + oss.str(""); + + if(version != db_version)//DB needs upgrade + { + oss << "Database version mismatch ( " << table << "). " + << "Installed " << Nebula::version() << " needs DB version '" + << version << "', and existing DB version is '"<< db_version << "'."; + + error = oss.str(); - NebulaLog::log("ONE",Log::ERROR,oss); return -1; } - if (is_federation_slave) + oss << "oned is using version " << version << " for " << table; + + NebulaLog::log("ONE", Log::INFO, oss); + + return 0; +}; + +/* -------------------------------------------------------------------------- */ + +int SystemDB::check_db_version(bool is_slave, bool &local_bs, bool &shared_bs) +{ + int rc; + string error; + + local_bs = false; + shared_bs = false; + + /* ---------------------------------------------------------------------- */ + /* Check DB version for local tables */ + /* ---------------------------------------------------------------------- */ + + rc = check_db_version(local_ver_table, Nebula::local_db_version(), error); + + switch(rc) { - string loaded_db_version = ""; + case 0:// All ok continue + break; - // Try to read latest version from the slave db version table - set_callback( static_cast(&SystemDB::select_cb), - static_cast(&loaded_db_version) ); - - oss << "SELECT version FROM " << slave_ver_table - << " WHERE oid=(SELECT MAX(oid) FROM " << slave_ver_table << ")"; - - db->exec(oss, this, true); - - oss.str(""); - unset_callback(); - - if( loaded_db_version == "" ) - { - return -3; - } - - if( Nebula::db_version() != loaded_db_version ) - { - oss << "Database version mismatch. " - << "Installed slave " << Nebula::version() << " uses DB version '" - << Nebula::db_version() << "', and existing slave DB version is '" - << loaded_db_version << "'."; - - NebulaLog::log("ONE",Log::ERROR,oss); + case -1:// Version missmatch (same for master/slave/standalone) + NebulaLog::log("ONE", Log::ERROR, error); + NebulaLog::log("ONE", Log::ERROR, "Use onedb to upgrade DB."); return -1; - } - return 0; + case -2: //Cannot access DB table or empty, bootstrap + local_bs = true; + break; + + default: + break; + } + + /* ---------------------------------------------------------------------- */ + /* Check DB version for shared (federation) tables */ + /* ---------------------------------------------------------------------- */ + + rc = check_db_version(shared_ver_table, Nebula::shared_db_version(), error); + + switch(rc) + { + case 0:// All ok continue + break; + + case -1:// Version missmatch + NebulaLog::log("ONE", Log::ERROR, error); + + if (is_slave) + { + NebulaLog::log("ONE", Log::ERROR, + "Cannot join federation, oned master needs upgrade."); + } + else + { + NebulaLog::log("ONE", Log::ERROR, "Use onedb to upgrade DB."); + } + + return -1; + + case -2: //Cannot access DB table or empty, bootstrap (only master/standalone) + if (is_slave) + { + NebulaLog::log("ONE", Log::ERROR, "Cannot access shared DB" + " tables. Check DB replica configuration."); + + return -1; + } + + shared_bs = true; + break; + + default: + break; } return 0; -} +}; /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ diff --git a/src/oca/java/src/org/opennebula/client/OneSystem.java b/src/oca/java/src/org/opennebula/client/OneSystem.java index a6413fa309..173903d34a 100644 --- a/src/oca/java/src/org/opennebula/client/OneSystem.java +++ b/src/oca/java/src/org/opennebula/client/OneSystem.java @@ -32,7 +32,7 @@ public class OneSystem private static final String GROUP_QUOTA_INFO = "groupquota.info"; private static final String GROUP_QUOTA_UPDATE = "groupquota.update"; - public static final String VERSION = "4.5.0"; + public static final String VERSION = "4.5.80"; public OneSystem(Client client) { diff --git a/src/oca/ruby/opennebula.rb b/src/oca/ruby/opennebula.rb index bb23d3163a..0639799180 100644 --- a/src/oca/ruby/opennebula.rb +++ b/src/oca/ruby/opennebula.rb @@ -56,5 +56,5 @@ require 'opennebula/system' module OpenNebula # OpenNebula version - VERSION = '4.5.0' + VERSION = '4.5.80' end diff --git a/src/oca/ruby/opennebula/client.rb b/src/oca/ruby/opennebula/client.rb index 72e19c0274..d8e39f90f9 100644 --- a/src/oca/ruby/opennebula/client.rb +++ b/src/oca/ruby/opennebula/client.rb @@ -101,6 +101,8 @@ module OpenNebula # @param [Hash] options # @option params [Integer] :timeout connection timeout in seconds, # defaults to 30 + # @option params [String] :http_proxy HTTP proxy string used for + # connecting to the endpoint; defaults to no proxy # # @return [OpenNebula::Client] def initialize(secret=nil, endpoint=nil, options={}) @@ -130,7 +132,10 @@ module OpenNebula timeout=nil timeout=options[:timeout] if options[:timeout] - @server = XMLRPC::Client.new2(@one_endpoint, nil, timeout) + http_proxy=nil + http_proxy=options[:http_proxy] if options[:http_proxy] + + @server = XMLRPC::Client.new2(@one_endpoint, http_proxy, timeout) if defined?(OxStreamParser) @server.set_parser(OxStreamParser.new) diff --git a/src/oca/ruby/opennebula/group.rb b/src/oca/ruby/opennebula/group.rb index cebbaa9980..770a13c608 100644 --- a/src/oca/ruby/opennebula/group.rb +++ b/src/oca/ruby/opennebula/group.rb @@ -26,6 +26,7 @@ module OpenNebula GROUP_METHODS = { :info => "group.info", :allocate => "group.allocate", + :update => "group.update", :delete => "group.delete", :quota => "group.quota", :add_provider => "group.addprovider", @@ -36,7 +37,7 @@ module OpenNebula SELF = -1 # Default resource ACL's for group users (create) - GROUP_DEFAULT_ACLS = "VM+IMAGE+NET+TEMPLATE" + GROUP_DEFAULT_ACLS = "VM+IMAGE+NET+TEMPLATE+DOCUMENT" ALL_CLUSTERS_IN_ZONE = 10 # Creates a Group description with just its identifier @@ -222,6 +223,18 @@ module OpenNebula super(GROUP_METHODS[:allocate], groupname) end + # Replaces the template contents + # + # @param new_template [String] New template contents + # @param append [true, false] True to append new attributes instead of + # replace the whole template + # + # @return [nil, OpenNebula::Error] nil in case of success, Error + # otherwise + def update(new_template=nil, append=false) + super(GROUP_METHODS[:update], new_template, append ? 1 : 0) + end + # Deletes the Group def delete() super(GROUP_METHODS[:delete]) diff --git a/src/oca/ruby/opennebula/xml_utils.rb b/src/oca/ruby/opennebula/xml_utils.rb index 70d488bf72..d4a83bdde3 100644 --- a/src/oca/ruby/opennebula/xml_utils.rb +++ b/src/oca/ruby/opennebula/xml_utils.rb @@ -110,6 +110,7 @@ module OpenNebula include ParsePoolBase alias :text :characters + alias :cdata :characters end end elsif NOKOGIRI diff --git a/src/oca/ruby/opennebula/zone.rb b/src/oca/ruby/opennebula/zone.rb index 2e371aedd0..483265304d 100644 --- a/src/oca/ruby/opennebula/zone.rb +++ b/src/oca/ruby/opennebula/zone.rb @@ -1,5 +1,5 @@ # -------------------------------------------------------------------------- # -# Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs # +# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs # # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # # not use this file except in compliance with the License. You may obtain # diff --git a/src/oca/ruby/opennebula/zone_pool.rb b/src/oca/ruby/opennebula/zone_pool.rb index dc929ce04c..d29174a35d 100644 --- a/src/oca/ruby/opennebula/zone_pool.rb +++ b/src/oca/ruby/opennebula/zone_pool.rb @@ -1,5 +1,5 @@ # -------------------------------------------------------------------------- # -# Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs # +# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs # # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # # not use this file except in compliance with the License. You may obtain # diff --git a/src/onedb/3.8.5_to_3.9.80.rb b/src/onedb/3.8.5_to_3.9.80.rb deleted file mode 100644 index aa61860e88..0000000000 --- a/src/onedb/3.8.5_to_3.9.80.rb +++ /dev/null @@ -1,600 +0,0 @@ -# -------------------------------------------------------------------------- # -# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs # -# # -# Licensed under the Apache License, Version 2.0 (the "License"); you may # -# not use this file except in compliance with the License. You may obtain # -# a copy of the License at # -# # -# http://www.apache.org/licenses/LICENSE-2.0 # -# # -# Unless required by applicable law or agreed to in writing, software # -# distributed under the License is distributed on an "AS IS" BASIS, # -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and # -# limitations under the License. # -#--------------------------------------------------------------------------- # - -require 'set' -require "rexml/document" -include REXML - -class String - def red - colorize(31) - end - -private - - def colorize(color_code) - "\e[#{color_code}m#{self}\e[0m" - end -end - -module Migrator - def db_version - "3.9.80" - end - - def one_version - "OpenNebula 3.9.80" - end - - def up - - ######################################################################## - # Add Cloning Image ID collection to Images - ######################################################################## - - counters = {} - counters[:image] = {} - - # Init image counters - @db.fetch("SELECT oid,body FROM image_pool") do |row| - if counters[:image][row[:oid]].nil? - counters[:image][row[:oid]] = { - :clones => Set.new - } - end - - doc = Document.new(row[:body]) - - doc.root.each_element("CLONING_ID") do |e| - img_id = e.text.to_i - - if counters[:image][img_id].nil? - counters[:image][img_id] = { - :clones => Set.new - } - end - - counters[:image][img_id][:clones].add(row[:oid]) - end - end - - ######################################################################## - # Image - # - # IMAGE/CLONING_OPS - # IMAGE/CLONES/ID - ######################################################################## - - @db.run "CREATE TABLE image_pool_new (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name,uid) );" - - @db[:image_pool].each do |row| - doc = Document.new(row[:body]) - - oid = row[:oid] - - n_cloning_ops = counters[:image][oid][:clones].size - - # Rewrite number of clones - doc.root.each_element("CLONING_OPS") { |e| - if e.text != n_cloning_ops.to_s - warn("Image #{oid} CLONING_OPS has #{e.text} \tis\t#{n_cloning_ops}") - e.text = n_cloning_ops - end - } - - # re-do list of Images cloning this one - clones_new_elem = doc.root.add_element("CLONES") - - counters[:image][oid][:clones].each do |id| - clones_new_elem.add_element("ID").text = id.to_s - end - - row[:body] = doc.to_s - - # commit - @db[:image_pool_new].insert(row) - end - - # Rename table - @db.run("DROP TABLE image_pool") - @db.run("ALTER TABLE image_pool_new RENAME TO image_pool") - - ######################################################################## - # Feature #1565: New cid column in host, ds and vnet tables - ######################################################################## - - @db.run "ALTER TABLE host_pool RENAME TO old_host_pool;" - @db.run "CREATE TABLE host_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, state INTEGER, last_mon_time INTEGER, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));" - - @db.fetch("SELECT * FROM old_host_pool") do |row| - doc = Document.new(row[:body]) - - cluster_id = doc.root.get_text('CLUSTER_ID').to_s - - @db[:host_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => row[:body], - :state => row[:state], - :last_mon_time => row[:last_mon_time], - :uid => row[:uid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u], - :cid => cluster_id) - end - - @db.run "DROP TABLE old_host_pool;" - - ######################################################################## - # Feature #1565: New cid column - # Feature #471: IPv6 addresses - ######################################################################## - - @db.run "ALTER TABLE network_pool RENAME TO old_network_pool;" - @db.run "CREATE TABLE network_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name,uid));" - - @db.fetch("SELECT * FROM old_network_pool") do |row| - doc = Document.new(row[:body]) - - cluster_id = doc.root.get_text('CLUSTER_ID').to_s - - doc.root.add_element("GLOBAL_PREFIX") - doc.root.add_element("SITE_PREFIX") - - @db[:network_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:uid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u], - :cid => cluster_id) - end - - @db.run "DROP TABLE old_network_pool;" - - ######################################################################## - # Feature #1617 - # New datastore, 2 "files" - # DATASTORE/SYSTEM is now DATASTORE/TYPE - # - # Feature #1565: New cid column in host, ds and vnet tables - ######################################################################## - - @db.run "ALTER TABLE datastore_pool RENAME TO old_datastore_pool;" - @db.run "CREATE TABLE datastore_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));" - - @db.fetch("SELECT * FROM old_datastore_pool") do |row| - doc = Document.new(row[:body]) - - type = "0" # IMAGE_DS - - system_elem = doc.root.delete_element("SYSTEM") - - if ( !system_elem.nil? && system_elem.text == "1" ) - type = "1" # SYSTEM_DS - end - - doc.root.add_element("TYPE").text = type - - doc.root.each_element("TEMPLATE") do |e| - e.delete_element("SYSTEM") - e.add_element("TYPE").text = type == "0" ? "IMAGE_DS" : "SYSTEM_DS" - end - - cluster_id = doc.root.get_text('CLUSTER_ID').to_s - - @db[:datastore_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:uid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u], - :cid => cluster_id) - end - - @db.run "DROP TABLE old_datastore_pool;" - - - user_0_name = "oneadmin" - - @db.fetch("SELECT name FROM user_pool WHERE oid=0") do |row| - user_0_name = row[:name] - end - - group_0_name = "oneadmin" - - @db.fetch("SELECT name FROM group_pool WHERE oid=0") do |row| - group_0_name = row[:name] - end - - base_path = "/var/lib/one/datastores/2" - - @db.fetch("SELECT body FROM datastore_pool WHERE oid=0") do |row| - doc = Document.new(row[:body]) - - doc.root.each_element("BASE_PATH") do |e| - base_path = e.text - base_path[-1] = "2" - end - end - - @db.run "INSERT INTO datastore_pool VALUES(2,'files','200#{user_0_name}#{group_0_name}files110100100fsssh#{base_path}20-1',0,0,1,1,1,-1);" - - - ######################################################################## - # Feature #1611: Default quotas - ######################################################################## - - @db.run("CREATE TABLE IF NOT EXISTS system_attributes (name VARCHAR(128) PRIMARY KEY, body MEDIUMTEXT)") - @db.run("INSERT INTO system_attributes VALUES('DEFAULT_GROUP_QUOTAS','');") - @db.run("INSERT INTO system_attributes VALUES('DEFAULT_USER_QUOTAS','');") - - - @db.run "ALTER TABLE user_pool RENAME TO old_user_pool;" - @db.run "CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" - - # oneadmin does not have quotas - @db.fetch("SELECT * FROM old_user_pool WHERE oid=0") do |row| - @db[:user_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => row[:body], - :uid => row[:oid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) - end - - @db.fetch("SELECT * FROM old_user_pool WHERE oid>0") do |row| - doc = Document.new(row[:body]) - - set_default_quotas(doc) - - @db[:user_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:oid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) - end - - @db.run "DROP TABLE old_user_pool;" - - - @db.run "ALTER TABLE group_pool RENAME TO old_group_pool;" - @db.run "CREATE TABLE group_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" - - - # oneadmin group does not have quotas - @db.fetch("SELECT * FROM old_group_pool WHERE oid=0") do |row| - @db[:group_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => row[:body], - :uid => row[:oid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) - end - - @db.fetch("SELECT * FROM old_group_pool WHERE oid>0") do |row| - doc = Document.new(row[:body]) - - set_default_quotas(doc) - - @db[:group_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:oid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) - end - - @db.run "DROP TABLE old_group_pool;" - - ######################################################################## - # Bug #1694: SYSTEM_DS is now set with the method adddatastore - ######################################################################## - - @db.run "ALTER TABLE cluster_pool RENAME TO old_cluster_pool;" - @db.run "CREATE TABLE cluster_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" - - @db.fetch("SELECT * FROM old_cluster_pool") do |row| - doc = Document.new(row[:body]) - - system_ds = 0 - - doc.root.each_element("TEMPLATE") do |e| - elem = e.delete_element("SYSTEM_DS") - - if !elem.nil? - system_ds = elem.text.to_i - end - end - - if system_ds != 0 - updated_body = nil - - @db.fetch("SELECT body FROM datastore_pool WHERE oid=#{system_ds}") do |ds_row| - ds_doc = Document.new(ds_row[:body]) - - type = "0" # IMAGE_DS - - ds_doc.root.each_element("TYPE") do |e| - type = e.text - end - - if type != "1" - puts " > Cluster #{row[:oid]} has the "<< - "System Datastore set to Datastore #{system_ds}, "<< - "but its type is not SYSTEM_DS. The System Datastore "<< - "for this Cluster will be set to 0" - - system_ds = 0 - else - cluster_id = "-1" - - ds_doc.root.each_element("CLUSTER_ID") do |e| - cluster_id = e.text - end - - if row[:oid] != cluster_id.to_i - puts " > Cluster #{row[:oid]} has the "<< - "System Datastore set to Datastore #{system_ds}, "<< - "but it is not part of the Cluster. It will be added now." - - ds_doc.root.each_element("CLUSTER_ID") do |e| - e.text = row[:oid] - end - - ds_doc.root.each_element("CLUSTER") do |e| - e.text = row[:name] - end - - updated_body = ds_doc.root.to_s - end - end - end - - if !updated_body.nil? - @db[:datastore_pool].where(:oid => system_ds).update( - :body => updated_body) - end - end - - doc.root.add_element("SYSTEM_DS").text = system_ds.to_s - - @db[:cluster_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:uid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) - end - - @db.run "DROP TABLE old_cluster_pool;" - - - ######################################################################## - # Feature #1556: New elem USER_TEMPLATE - # - # Feature #1483: Move scheduling attributes - # /VM/TEMPLATE/REQUIREMENTS -> USER_TEMPLATE/SCHED_REQUIREMENTS - # /VM/TEMPLATE/RANK -> USER_TEMPLATE/SCHED_RANK - ######################################################################## - - @db.run "ALTER TABLE vm_pool RENAME TO old_vm_pool;" - @db.run "CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, last_poll INTEGER, state INTEGER, lcm_state INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);" - - @db.fetch("SELECT * FROM old_vm_pool") do |row| - - doc = Document.new(row[:body]) - user_template = doc.root.add_element("USER_TEMPLATE") - - doc.root.each_element("TEMPLATE") do |e| - elem = e.delete_element("REQUIREMENTS") - - if !elem.nil? - user_template.add_element("SCHED_REQUIREMENTS").text = elem.text - end - - elem = e.delete_element("RANK") - - if !elem.nil? - user_template.add_element("SCHED_RANK").text = elem.text - end - end - - @db[:vm_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:uid], - :gid => row[:gid], - :last_poll => row[:last_poll], - :state => row[:state], - :lcm_state => row[:lcm_state], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) - end - - @db.run "DROP TABLE old_vm_pool;" - - - ######################################################################## - # Feature #1483: Move scheduling attributes - # /VMTEMPLATE/TEMPLATE/REQUIREMENTS -> /VMTEMPLATE/TEMPLATE/SCHED_REQUIREMENTS - # /VMTEMPLATE/TEMPLATE/RANK -> /VMTEMPLATE/TEMPLATE/SCHED_RANK - ######################################################################## - - @db.run "ALTER TABLE template_pool RENAME TO old_template_pool;" - @db.run "CREATE TABLE template_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);" - - @db.fetch("SELECT * FROM old_template_pool") do |row| - - doc = Document.new(row[:body]) - - template = nil - - doc.root.each_element("TEMPLATE") do |e| - template = e - end - - doc.root.each_element("TEMPLATE") do |e| - elem = e.delete_element("REQUIREMENTS") - - if !elem.nil? - template.add_element("SCHED_REQUIREMENTS").text = elem.text - end - - elem = e.delete_element("RANK") - - if !elem.nil? - template.add_element("SCHED_RANK").text = elem.text - end - end - - @db[:template_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:uid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) - end - - @db.run "DROP TABLE old_template_pool;" - - ######################################################################## - # Feature #1691 Add new attribute NIC/NIC_ID - ######################################################################## - - @db.run "ALTER TABLE vm_pool RENAME TO old_vm_pool;" - @db.run "CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, last_poll INTEGER, state INTEGER, lcm_state INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);" - - @db.fetch("SELECT * FROM old_vm_pool") do |row| - if ( row[:state] != 6 ) # DONE - doc = Document.new(row[:body]) - - nic_id = 0 - - doc.root.each_element("TEMPLATE/NIC") { |e| - e.delete_element("NIC_ID") - e.add_element("NIC_ID").text = (nic_id).to_s - - nic_id += 1 - } - - row[:body] = doc.root.to_s - end - - @db[:vm_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => row[:body], - :uid => row[:uid], - :gid => row[:gid], - :last_poll => row[:last_poll], - :state => row[:state], - :lcm_state => row[:lcm_state], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) - end - - @db.run "DROP TABLE old_vm_pool;" - - ######################################################################## - # - # Banner for the new /var/lib/one/vms directory - # - ######################################################################## - - puts - puts "ATTENTION: manual intervention required".red - puts <<-END.gsub(/^ {8}/, '') - Virtual Machine deployment files have been moved from /var/lib/one to - /var/lib/one/vms. You need to move these files manually: - - $ mv /var/lib/one/[0-9]* /var/lib/one/vms - - END - - return true - end - - - def set_default_quotas(doc) - - # VM quotas - - doc.root.each_element("VM_QUOTA/VM/CPU") do |e| - e.text = "-1" if e.text.to_f == 0 - end - - doc.root.each_element("VM_QUOTA/VM/MEMORY") do |e| - e.text = "-1" if e.text.to_i == 0 - end - - doc.root.each_element("VM_QUOTA/VM/VMS") do |e| - e.text = "-1" if e.text.to_i == 0 - end - - # VNet quotas - - doc.root.each_element("NETWORK_QUOTA/NETWORK/LEASES") do |e| - e.text = "-1" if e.text.to_i == 0 - end - - # Image quotas - - doc.root.each_element("IMAGE_QUOTA/IMAGE/RVMS") do |e| - e.text = "-1" if e.text.to_i == 0 - end - - # Datastore quotas - - doc.root.each_element("DATASTORE_QUOTA/DATASTORE/IMAGES") do |e| - e.text = "-1" if e.text.to_i == 0 - end - - doc.root.each_element("DATASTORE_QUOTA/DATASTORE/SIZE") do |e| - e.text = "-1" if e.text.to_i == 0 - end - end -end diff --git a/src/onedb/4.2.0_to_4.3.80.rb b/src/onedb/4.2.0_to_4.3.80.rb deleted file mode 100644 index 96ffc65e27..0000000000 --- a/src/onedb/4.2.0_to_4.3.80.rb +++ /dev/null @@ -1,401 +0,0 @@ -# -------------------------------------------------------------------------- # -# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs # -# # -# Licensed under the Apache License, Version 2.0 (the "License"); you may # -# not use this file except in compliance with the License. You may obtain # -# a copy of the License at # -# # -# http://www.apache.org/licenses/LICENSE-2.0 # -# # -# Unless required by applicable law or agreed to in writing, software # -# distributed under the License is distributed on an "AS IS" BASIS, # -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and # -# limitations under the License. # -#--------------------------------------------------------------------------- # - -require 'rexml/document' - -TM_MAD_CONF = { - "dummy" => { - :ln_target => "NONE", - :clone_target => "SYSTEM" - }, - "lvm" => { - :ln_target => "NONE", - :clone_target => "SELF" - }, - "shared" => { - :ln_target => "NONE", - :clone_target => "SYSTEM" - }, - "shared_lvm" => { - :ln_target => "SYSTEM", - :clone_target => "SYSTEM" - }, - "qcow2" => { - :ln_target => "NONE", - :clone_target => "SYSTEM" - }, - "ssh" => { - :ln_target => "SYSTEM", - :clone_target => "SYSTEM" - }, - "vmfs" => { - :ln_target => "NONE", - :clone_target => "SYSTEM" - }, - "iscsi" => { - :ln_target => "NONE", - :clone_target => "SELF" - }, - "ceph" => { - :ln_target => "NONE", - :clone_target => "SELF" - } -} - -class String - def red - colorize(31) - end - -private - - def colorize(color_code) - "\e[#{color_code}m#{self}\e[0m" - end -end - -module Migrator - def db_version - "4.3.80" - end - - def one_version - "OpenNebula 4.3.80" - end - - def up - - ######################################################################## - # Feature #1742 & #1612 - ######################################################################## - - @db.run "ALTER TABLE user_pool RENAME TO old_user_pool;" - @db.run "CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" - - @db.fetch("SELECT * FROM old_user_pool") do |row| - doc = REXML::Document.new(row[:body]) - - doc.root.add_element("GROUPS").add_element("ID").text = row[:gid].to_s - - # oneadmin does not have quotas - if row[:oid] != 0 - redo_vm_quotas(doc, "uid=#{row[:oid]}") - end - - @db[:user_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:oid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) - end - - @db.run "DROP TABLE old_user_pool;" - - ######################################################################## - # Feature #1612 - ######################################################################## - - @db.run "ALTER TABLE group_pool RENAME TO old_group_pool;" - @db.run "CREATE TABLE group_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" - - # oneadmin group does not have quotas - @db.fetch("SELECT * FROM old_group_pool WHERE oid=0") do |row| - @db[:group_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => row[:body], - :uid => row[:oid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) - end - - @db.fetch("SELECT * FROM old_group_pool WHERE oid>0") do |row| - doc = REXML::Document.new(row[:body]) - - redo_vm_quotas(doc, "gid=#{row[:oid]}") - - @db[:group_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:oid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) - end - - @db.run "DROP TABLE old_group_pool;" - - ######################################################################## - # Bug #2330 & Feature #1678 - ######################################################################## - - @db.run "ALTER TABLE datastore_pool RENAME TO old_datastore_pool;" - @db.run "CREATE TABLE datastore_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));" - - #tm_mads = {} - - @db.fetch("SELECT * FROM old_datastore_pool") do |row| - doc = REXML::Document.new(row[:body]) - - doc.root.each_element("TEMPLATE/HOST") do |e| - e.name = "BRIDGE_LIST" - end - - tm_mad = "" - doc.root.each_element("TM_MAD"){ |e| tm_mad = e.text } - - type = 0 - doc.root.each_element("TYPE"){ |e| type = e.text.to_i } - - if (type == 1) # System DS - doc.root.each_element("TEMPLATE") do |e| - e.add_element("SHARED").text = - (tm_mad == "ssh" ? "NO" : "YES") - end - else - #tm_mads[row[:oid].to_i] = tm_mad - - conf = TM_MAD_CONF[tm_mad] - - if conf.nil? - puts - puts "ATTENTION: manual intervention required".red - puts <<-END -The Datastore ##{row[:oid]} #{row[:name]} is using the -custom TM MAD '#{tm_mad}'. You will need to define new -configuration parameters in oned.conf for this driver, see -http://opennebula.org/documentation:rel4.4:upgrade - END - else - doc.root.each_element("TEMPLATE") do |e| - e.add_element("LN_TARGET").text = conf[:ln_target] - e.add_element("CLONE_TARGET").text = conf[:clone_target] - end - end - end - - @db[:datastore_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:uid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u], - :cid => row[:cid]) - end - - @db.run "DROP TABLE old_datastore_pool;" - - ######################################################################## - # Feature #2392 - ######################################################################## - - @db.run "ALTER TABLE vm_pool RENAME TO old_vm_pool;" - @db.run "CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, last_poll INTEGER, state INTEGER, lcm_state INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);" - - @db.fetch("SELECT * FROM old_vm_pool") do |row| - doc = REXML::Document.new(row[:body]) - - doc.root.each_element("HISTORY_RECORDS/HISTORY") do |e| - update_history(e) - end - - @db[:vm_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:uid], - :gid => row[:gid], - :last_poll => row[:last_poll], - :state => row[:state], - :lcm_state => row[:lcm_state], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) - end - - @db.run "DROP TABLE old_vm_pool;" - - @db.run "ALTER TABLE history RENAME TO old_history;" - @db.run "CREATE TABLE history (vid INTEGER, seq INTEGER, body MEDIUMTEXT, stime INTEGER, etime INTEGER,PRIMARY KEY(vid,seq));" - - @db.fetch("SELECT * FROM old_history") do |row| - doc = REXML::Document.new(row[:body]) - - doc.root.each_element("/HISTORY") do |e| - update_history(e) - end - - @db[:history].insert( - :vid => row[:vid], - :seq => row[:seq], - :body => doc.root.to_s, - :stime => row[:stime], - :etime => row[:etime]) - end - - @db.run "DROP TABLE old_history;" - - ######################################################################## - # Feature #1678 - ######################################################################## - - @db.run "ALTER TABLE host_pool RENAME TO old_host_pool;" - @db.run "CREATE TABLE host_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, state INTEGER, last_mon_time INTEGER, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));" - - @db.fetch("SELECT * FROM old_host_pool") do |row| - doc = REXML::Document.new(row[:body]) - - doc.root.each_element("HOST_SHARE") do |e| - e.add_element("DATASTORES") - end - - @db[:host_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :state => row[:state], - :last_mon_time => row[:last_mon_time], - :uid => row[:uid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u], - :cid => row[:cid]) - end - - @db.run "DROP TABLE old_host_pool;" - - # TODO: - # For Feature #1678, VMs have new disk elements: - # VM/DISK/CLONE_TARGET - # VM/DISK/LN_TARGET - # VM/DISK/SIZE - # - # These elements are only used to schedule new deployments, so if we - # don't add them it will only affect automatic deployment of VMs - # recreated (onevm delete --recreate). Manual deployments will still - # work without problems. - - return true - end - - def redo_vm_quotas(doc, where_filter) - cpu_limit = "-1" - mem_limit = "-1" - vms_limit = "-1" - vol_limit = "-1" - - doc.root.each_element("VM_QUOTA/VM/CPU") { |e| - cpu_limit = e.text - } - - doc.root.each_element("VM_QUOTA/VM/MEMORY") { |e| - mem_limit = e.text - } - - doc.root.each_element("VM_QUOTA/VM/VMS") { |e| - vms_limit = e.text - } - - doc.root.delete_element("VM_QUOTA") - vm_quota = doc.root.add_element("VM_QUOTA") - - # VM quotas - cpu_used = 0 - mem_used = 0 - vms_used = 0 - vol_used = 0 - - @db.fetch("SELECT body FROM vm_pool WHERE #{where_filter} AND state<>6") do |vm_row| - vmdoc = REXML::Document.new(vm_row[:body]) - - # VM quotas - vmdoc.root.each_element("TEMPLATE/CPU") { |e| - cpu_used += e.text.to_f - } - - vmdoc.root.each_element("TEMPLATE/MEMORY") { |e| - mem_used += e.text.to_i - } - - vmdoc.root.each_element("TEMPLATE/DISK") { |e| - type = "" - - e.each_element("TYPE") { |t_elem| - type = t_elem.text.upcase - } - - if ( type == "SWAP" || type == "FS") - e.each_element("SIZE") { |size_elem| - vol_used += size_elem.text.to_i - } - end - } - - vms_used += 1 - end - - if (vms_used != 0 || - cpu_limit != "-1" || mem_limit != "-1" || vms_limit != "-1" || vol_limit != "-1" ) - - # VM quotas - vm_elem = vm_quota.add_element("VM") - - vm_elem.add_element("CPU").text = cpu_limit - vm_elem.add_element("CPU_USED").text = sprintf('%.2f', cpu_used) - - vm_elem.add_element("MEMORY").text = mem_limit - vm_elem.add_element("MEMORY_USED").text = mem_used.to_s - - vm_elem.add_element("VMS").text = vms_limit - vm_elem.add_element("VMS_USED").text = vms_used.to_s - - vm_elem.add_element("VOLATILE_SIZE").text = vol_limit - vm_elem.add_element("VOLATILE_SIZE_USED").text = vol_used.to_s - end - end - - def update_history(history_elem) - hid = nil - - history_elem.each_element("HID") do |e| - hid = e.text - end - - new_elem = history_elem.add_element("CID") - new_elem.text = "-1" # Cluster None - - if hid.nil? - return - end - - @db.fetch("SELECT cid FROM host_pool WHERE oid = #{hid}") do |row| - new_elem.text = row[:cid].to_s - end - end - -end diff --git a/src/onedb/fsck.rb b/src/onedb/fsck.rb index aeb8964b15..5fb7012365 100644 --- a/src/onedb/fsck.rb +++ b/src/onedb/fsck.rb @@ -19,11 +19,26 @@ include REXML require 'ipaddr' require 'set' +require 'nokogiri' + module OneDBFsck VERSION = "4.5.0" + LOCAL_VERSION = "4.5.0" - def db_version - VERSION + def check_db_version() + db_version = read_db_version() + + if ( db_version[:version] != VERSION || + db_version[:local_version] != LOCAL_VERSION ) + + raise <<-EOT +Version mismatch: fsck file is for version +Shared: #{VERSION}, Local: #{LOCAL_VERSION} + +Current database is version +Shared: #{db_version[:version]}, Local: #{db_version[:local_version]} +EOT + end end def one_version @@ -102,10 +117,12 @@ module OneDBFsck # VNET/GNAME ######################################################################## + init_log_time() @errors = 0 puts + db_version = read_db_version() ######################################################################## # pool_control @@ -113,7 +130,9 @@ module OneDBFsck tables = ["group_pool", "user_pool", "acl", "image_pool", "host_pool", "network_pool", "template_pool", "vm_pool", "cluster_pool", - "datastore_pool", "document_pool"] + "datastore_pool", "document_pool", "zone_pool"] + + federated_tables = ["group_pool", "user_pool", "acl", "zone_pool"] tables.each do |table| max_oid = -1 @@ -142,7 +161,11 @@ module OneDBFsck log_error("pool_control for table #{table} has last_oid #{control_oid}, but it is #{max_oid}") if control_oid != -1 - @db.run("UPDATE pool_control SET last_oid=#{max_oid} WHERE tablename='#{table}'") + if db_version[:is_slave] && federated_tables.include?(table) + log_error("^ Needs to be fixed in the master OpenNebula") + else + @db.run("UPDATE pool_control SET last_oid=#{max_oid} WHERE tablename='#{table}'") + end else @db[:pool_control].insert( :tablename => table, @@ -151,6 +174,7 @@ module OneDBFsck end end + log_time() ######################################################################## # Groups @@ -239,48 +263,65 @@ module OneDBFsck end end - users_fix.each do |id, user| - @db[:user_pool].where(:oid => id).update( - :body => user[:body], - :gid => user[:gid]) + if db_version[:is_slave] + log_error("^ User errors need to be fixed in the master OpenNebula") + else + @db.transaction do + users_fix.each do |id, user| + @db[:user_pool].where(:oid => id).update( + :body => user[:body], + :gid => user[:gid]) + end + end end + log_time() + if !db_version[:is_slave] + @db.run "CREATE TABLE group_pool_new (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" + end - @db.run "CREATE TABLE group_pool_new (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" + @db.transaction do + @db.fetch("SELECT * from group_pool") do |row| + gid = row[:oid] + doc = Document.new(row[:body]) - @db.fetch("SELECT * from group_pool") do |row| - gid = row[:oid] - doc = Document.new(row[:body]) + users_elem = doc.root.elements.delete("USERS") - users_elem = doc.root.elements.delete("USERS") + users_new_elem = doc.root.add_element("USERS") - users_new_elem = doc.root.add_element("USERS") + group[gid].each do |id| + id_elem = users_elem.elements.delete("ID[.=#{id}]") - group[gid].each do |id| - id_elem = users_elem.elements.delete("ID[.=#{id}]") + if id_elem.nil? + log_error("User #{id} is missing from Group #{gid} users id list") + end - if id_elem.nil? - log_error("User #{id} is missing from Group #{gid} users id list") + users_new_elem.add_element("ID").text = id.to_s end - users_new_elem.add_element("ID").text = id.to_s + users_elem.each_element("ID") do |id_elem| + log_error("User #{id_elem.text} is in Group #{gid} users id list, but it should not") + end + + row[:body] = doc.to_s + + if db_version[:is_slave] + log_error("^ Group errors need to be fixed in the master OpenNebula") + else + # commit + @db[:group_pool_new].insert(row) + end end - - users_elem.each_element("ID") do |id_elem| - log_error("User #{id_elem.text} is in Group #{gid} users id list, but it should not") - end - - row[:body] = doc.to_s - - # commit - @db[:group_pool_new].insert(row) end - # Rename table - @db.run("DROP TABLE group_pool") - @db.run("ALTER TABLE group_pool_new RENAME TO group_pool") + if !db_version[:is_slave] + # Rename table + @db.run("DROP TABLE group_pool") + @db.run("ALTER TABLE group_pool_new RENAME TO group_pool") + end + log_time() ######################################################################## # Clusters @@ -324,250 +365,259 @@ module OneDBFsck datastores_fix = {} vnets_fix = {} - @db.fetch("SELECT oid,body,cid FROM host_pool") do |row| - doc = Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT oid,body,cid FROM host_pool") do |row| + doc = Document.new(row[:body]) - cluster_id = doc.root.get_text('CLUSTER_ID').to_s.to_i - cluster_name = doc.root.get_text('CLUSTER') + cluster_id = doc.root.get_text('CLUSTER_ID').to_s.to_i + cluster_name = doc.root.get_text('CLUSTER') - if cluster_id != row[:cid] - log_error("Host #{row[:oid]} is in cluster #{cluster_id}, but cid column has cluster #{row[:cid]}") - hosts_fix[row[:oid]] = {:body => row[:body], :cid => cluster_id} - end + if cluster_id != row[:cid] + log_error("Host #{row[:oid]} is in cluster #{cluster_id}, but cid column has cluster #{row[:cid]}") + hosts_fix[row[:oid]] = {:body => row[:body], :cid => cluster_id} + end - if cluster_id != -1 - cluster_entry = cluster[cluster_id] + if cluster_id != -1 + cluster_entry = cluster[cluster_id] - if cluster_entry.nil? - log_error("Host #{row[:oid]} is in cluster #{cluster_id}, but it does not exist") + if cluster_entry.nil? + log_error("Host #{row[:oid]} is in cluster #{cluster_id}, but it does not exist") - doc.root.each_element('CLUSTER_ID') do |e| - e.text = "-1" - end - - doc.root.each_element('CLUSTER') do |e| - e.text = "" - end - - hosts_fix[row[:oid]] = {:body => doc.to_s, :cid => -1} - else - if cluster_name != cluster_entry[:name] - log_error("Host #{row[:oid]} has a wrong name for cluster #{cluster_id}, #{cluster_name}. It will be changed to #{cluster_entry[:name]}") - - doc.root.each_element('CLUSTER') do |e| - e.text = cluster_entry[:name] + doc.root.each_element('CLUSTER_ID') do |e| + e.text = "-1" end - hosts_fix[row[:oid]] = {:body => doc.to_s, :cid => cluster_id} - end + doc.root.each_element('CLUSTER') do |e| + e.text = "" + end - cluster_entry[:hosts] << row[:oid] - end - end - end - - hosts_fix.each do |id, entry| - @db[:host_pool].where(:oid => id).update(:body => entry[:body], :cid => entry[:cid]) - end - - - @db.fetch("SELECT oid,body,cid FROM datastore_pool") do |row| - doc = Document.new(row[:body]) - - cluster_id = doc.root.get_text('CLUSTER_ID').to_s.to_i - cluster_name = doc.root.get_text('CLUSTER') - - if cluster_id != row[:cid] - log_error("Datastore #{row[:oid]} is in cluster #{cluster_id}, but cid column has cluster #{row[:cid]}") - hosts_fix[row[:oid]] = {:body => row[:body], :cid => cluster_id} - end - - if cluster_id != -1 - cluster_entry = cluster[cluster_id] - - if cluster_entry.nil? - log_error("Datastore #{row[:oid]} is in cluster #{cluster_id}, but it does not exist") - - doc.root.each_element('CLUSTER_ID') do |e| - e.text = "-1" - end - - doc.root.each_element('CLUSTER') do |e| - e.text = "" - end - - datastores_fix[row[:oid]] = {:body => doc.to_s, :cid => -1} - else - if doc.root.get_text('TYPE').to_s != "1" - cluster_entry[:datastores] << row[:oid] + hosts_fix[row[:oid]] = {:body => doc.to_s, :cid => -1} else - if cluster_entry[:system_ds] == 0 - cluster_entry[:datastores] << row[:oid] - cluster_entry[:system_ds] = row[:oid] - else - log_error("System Datastore #{row[:oid]} is in Cluster #{cluster_id}, but it already contains System Datastore #{cluster_entry[:system_ds]}") - - doc.root.each_element('CLUSTER_ID') do |e| - e.text = "-1" - end + if cluster_name != cluster_entry[:name] + log_error("Host #{row[:oid]} has a wrong name for cluster #{cluster_id}, #{cluster_name}. It will be changed to #{cluster_entry[:name]}") doc.root.each_element('CLUSTER') do |e| - e.text = "" + e.text = cluster_entry[:name] end - datastores_fix[row[:oid]] = {:body => doc.to_s, :cid => -1} - - next - end - end - - if cluster_name != cluster_entry[:name] - log_error("Datastore #{row[:oid]} has a wrong name for cluster #{cluster_id}, #{cluster_name}. It will be changed to #{cluster_entry[:name]}") - - doc.root.each_element('CLUSTER') do |e| - e.text = cluster_entry[:name] + hosts_fix[row[:oid]] = {:body => doc.to_s, :cid => cluster_id} end - datastores_fix[row[:oid]] = {:body => doc.to_s, :cid => cluster_id} + cluster_entry[:hosts] << row[:oid] end end end - end - datastores_fix.each do |id, entry| - @db[:datastore_pool].where(:oid => id).update(:body => entry[:body], :cid => entry[:cid]) - end - - - @db.fetch("SELECT oid,body,cid FROM network_pool") do |row| - doc = Document.new(row[:body]) - - cluster_id = doc.root.get_text('CLUSTER_ID').to_s.to_i - cluster_name = doc.root.get_text('CLUSTER') - - if cluster_id != row[:cid] - log_error("VNet #{row[:oid]} is in cluster #{cluster_id}, but cid column has cluster #{row[:cid]}") - hosts_fix[row[:oid]] = {:body => row[:body], :cid => cluster_id} + hosts_fix.each do |id, entry| + @db[:host_pool].where(:oid => id).update(:body => entry[:body], :cid => entry[:cid]) end - if cluster_id != -1 - cluster_entry = cluster[cluster_id] + log_time() - if cluster_entry.nil? - log_error("VNet #{row[:oid]} is in cluster #{cluster_id}, but it does not exist") + @db.fetch("SELECT oid,body,cid FROM datastore_pool") do |row| + doc = Document.new(row[:body]) - doc.root.each_element('CLUSTER_ID') do |e| - e.text = "-1" - end + cluster_id = doc.root.get_text('CLUSTER_ID').to_s.to_i + cluster_name = doc.root.get_text('CLUSTER') - doc.root.each_element('CLUSTER') do |e| - e.text = "" - end + if cluster_id != row[:cid] + log_error("Datastore #{row[:oid]} is in cluster #{cluster_id}, but cid column has cluster #{row[:cid]}") + hosts_fix[row[:oid]] = {:body => row[:body], :cid => cluster_id} + end - vnets_fix[row[:oid]] = {:body => doc.to_s, :cid => -1} - else - if cluster_name != cluster_entry[:name] - log_error("VNet #{row[:oid]} has a wrong name for cluster #{cluster_id}, #{cluster_name}. It will be changed to #{cluster_entry[:name]}") + if cluster_id != -1 + cluster_entry = cluster[cluster_id] + + if cluster_entry.nil? + log_error("Datastore #{row[:oid]} is in cluster #{cluster_id}, but it does not exist") + + doc.root.each_element('CLUSTER_ID') do |e| + e.text = "-1" + end doc.root.each_element('CLUSTER') do |e| - e.text = cluster_entry[:name] + e.text = "" + end + + datastores_fix[row[:oid]] = {:body => doc.to_s, :cid => -1} + else + if doc.root.get_text('TYPE').to_s != "1" + cluster_entry[:datastores] << row[:oid] + else + if cluster_entry[:system_ds] == 0 + cluster_entry[:datastores] << row[:oid] + cluster_entry[:system_ds] = row[:oid] + else + log_error("System Datastore #{row[:oid]} is in Cluster #{cluster_id}, but it already contains System Datastore #{cluster_entry[:system_ds]}") + + doc.root.each_element('CLUSTER_ID') do |e| + e.text = "-1" + end + + doc.root.each_element('CLUSTER') do |e| + e.text = "" + end + + datastores_fix[row[:oid]] = {:body => doc.to_s, :cid => -1} + + next + end + end + + if cluster_name != cluster_entry[:name] + log_error("Datastore #{row[:oid]} has a wrong name for cluster #{cluster_id}, #{cluster_name}. It will be changed to #{cluster_entry[:name]}") + + doc.root.each_element('CLUSTER') do |e| + e.text = cluster_entry[:name] + end + + datastores_fix[row[:oid]] = {:body => doc.to_s, :cid => cluster_id} + end + end + end + end + + datastores_fix.each do |id, entry| + @db[:datastore_pool].where(:oid => id).update(:body => entry[:body], :cid => entry[:cid]) + end + + log_time() + + @db.fetch("SELECT oid,body,cid FROM network_pool") do |row| + doc = Document.new(row[:body]) + + cluster_id = doc.root.get_text('CLUSTER_ID').to_s.to_i + cluster_name = doc.root.get_text('CLUSTER') + + if cluster_id != row[:cid] + log_error("VNet #{row[:oid]} is in cluster #{cluster_id}, but cid column has cluster #{row[:cid]}") + hosts_fix[row[:oid]] = {:body => row[:body], :cid => cluster_id} + end + + if cluster_id != -1 + cluster_entry = cluster[cluster_id] + + if cluster_entry.nil? + log_error("VNet #{row[:oid]} is in cluster #{cluster_id}, but it does not exist") + + doc.root.each_element('CLUSTER_ID') do |e| + e.text = "-1" + end + + doc.root.each_element('CLUSTER') do |e| + e.text = "" end vnets_fix[row[:oid]] = {:body => doc.to_s, :cid => -1} - end + else + if cluster_name != cluster_entry[:name] + log_error("VNet #{row[:oid]} has a wrong name for cluster #{cluster_id}, #{cluster_name}. It will be changed to #{cluster_entry[:name]}") - cluster_entry[:vnets] << row[:oid] + doc.root.each_element('CLUSTER') do |e| + e.text = cluster_entry[:name] + end + + vnets_fix[row[:oid]] = {:body => doc.to_s, :cid => -1} + end + + cluster_entry[:vnets] << row[:oid] + end end end + + vnets_fix.each do |id, entry| + @db[:network_pool].where(:oid => id).update(:body => entry[:body], :cid => entry[:cid]) + end end - vnets_fix.each do |id, entry| - @db[:network_pool].where(:oid => id).update(:body => entry[:body], :cid => entry[:cid]) - end - + log_time() @db.run "CREATE TABLE cluster_pool_new (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" - @db.fetch("SELECT * from cluster_pool") do |row| - cluster_id = row[:oid] - doc = Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT * from cluster_pool") do |row| + cluster_id = row[:oid] + doc = Document.new(row[:body]) - # Hosts - hosts_elem = doc.root.elements.delete("HOSTS") + # Hosts + hosts_elem = doc.root.elements.delete("HOSTS") - hosts_new_elem = doc.root.add_element("HOSTS") + hosts_new_elem = doc.root.add_element("HOSTS") - cluster[cluster_id][:hosts].each do |id| - id_elem = hosts_elem.elements.delete("ID[.=#{id}]") + cluster[cluster_id][:hosts].each do |id| + id_elem = hosts_elem.elements.delete("ID[.=#{id}]") - if id_elem.nil? - log_error("Host #{id} is missing from Cluster #{cluster_id} host id list") + if id_elem.nil? + log_error("Host #{id} is missing from Cluster #{cluster_id} host id list") + end + + hosts_new_elem.add_element("ID").text = id.to_s end - hosts_new_elem.add_element("ID").text = id.to_s - end - - hosts_elem.each_element("ID") do |id_elem| - log_error("Host #{id_elem.text} is in Cluster #{cluster_id} host id list, but it should not") - end - - - # Datastores - ds_elem = doc.root.elements.delete("DATASTORES") - - ds_new_elem = doc.root.add_element("DATASTORES") - - doc.root.each_element("SYSTEM_DS") do |e| - system_ds = e.text.to_i - - if system_ds != cluster[cluster_id][:system_ds] - log_error("Cluster #{cluster_id} has System Datastore set to #{system_ds}, but it should be #{cluster[cluster_id][:system_ds]}") - - e.text = cluster[cluster_id][:system_ds].to_s - end - end - - cluster[cluster_id][:datastores].each do |id| - id_elem = ds_elem.elements.delete("ID[.=#{id}]") - - if id_elem.nil? - log_error("Datastore #{id} is missing from Cluster #{cluster_id} datastore id list") + hosts_elem.each_element("ID") do |id_elem| + log_error("Host #{id_elem.text} is in Cluster #{cluster_id} host id list, but it should not") end - ds_new_elem.add_element("ID").text = id.to_s - end - ds_elem.each_element("ID") do |id_elem| - log_error("Datastore #{id_elem.text} is in Cluster #{cluster_id} datastore id list, but it should not") - end + # Datastores + ds_elem = doc.root.elements.delete("DATASTORES") + ds_new_elem = doc.root.add_element("DATASTORES") - # VNets - vnets_elem = doc.root.elements.delete("VNETS") + doc.root.each_element("SYSTEM_DS") do |e| + system_ds = e.text.to_i - vnets_new_elem = doc.root.add_element("VNETS") + if system_ds != cluster[cluster_id][:system_ds] + log_error("Cluster #{cluster_id} has System Datastore set to #{system_ds}, but it should be #{cluster[cluster_id][:system_ds]}") - cluster[cluster_id][:vnets].each do |id| - id_elem = vnets_elem.elements.delete("ID[.=#{id}]") - - if id_elem.nil? - log_error("VNet #{id} is missing from Cluster #{cluster_id} vnet id list") + e.text = cluster[cluster_id][:system_ds].to_s + end end - vnets_new_elem.add_element("ID").text = id.to_s + cluster[cluster_id][:datastores].each do |id| + id_elem = ds_elem.elements.delete("ID[.=#{id}]") + + if id_elem.nil? + log_error("Datastore #{id} is missing from Cluster #{cluster_id} datastore id list") + end + + ds_new_elem.add_element("ID").text = id.to_s + end + + ds_elem.each_element("ID") do |id_elem| + log_error("Datastore #{id_elem.text} is in Cluster #{cluster_id} datastore id list, but it should not") + end + + + # VNets + vnets_elem = doc.root.elements.delete("VNETS") + + vnets_new_elem = doc.root.add_element("VNETS") + + cluster[cluster_id][:vnets].each do |id| + id_elem = vnets_elem.elements.delete("ID[.=#{id}]") + + if id_elem.nil? + log_error("VNet #{id} is missing from Cluster #{cluster_id} vnet id list") + end + + vnets_new_elem.add_element("ID").text = id.to_s + end + + vnets_elem.each_element("ID") do |id_elem| + log_error("VNet #{id_elem.text} is in Cluster #{cluster_id} vnet id list, but it should not") + end + + + row[:body] = doc.to_s + + # commit + @db[:cluster_pool_new].insert(row) end - - vnets_elem.each_element("ID") do |id_elem| - log_error("VNet #{id_elem.text} is in Cluster #{cluster_id} vnet id list, but it should not") - end - - - row[:body] = doc.to_s - - # commit - @db[:cluster_pool_new].insert(row) end + log_time() + # Rename table @db.run("DROP TABLE cluster_pool") @db.run("ALTER TABLE cluster_pool_new RENAME TO cluster_pool") @@ -633,46 +683,57 @@ module OneDBFsck end end - images_fix.each do |id, body| - @db[:image_pool].where(:oid => id).update(:body => body) + @db.transaction do + images_fix.each do |id, body| + @db[:image_pool].where(:oid => id).update(:body => body) + end end + log_time() @db.run "CREATE TABLE datastore_pool_new (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));" - @db.fetch("SELECT * from datastore_pool") do |row| - ds_id = row[:oid] - doc = Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT * from datastore_pool") do |row| + ds_id = row[:oid] + doc = Document.new(row[:body]) - images_elem = doc.root.elements.delete("IMAGES") + images_elem = doc.root.elements.delete("IMAGES") - images_new_elem = doc.root.add_element("IMAGES") + images_new_elem = doc.root.add_element("IMAGES") - datastore[ds_id][:images].each do |id| - id_elem = images_elem.elements.delete("ID[.=#{id}]") + datastore[ds_id][:images].each do |id| + id_elem = images_elem.elements.delete("ID[.=#{id}]") - if id_elem.nil? - log_error("Image #{id} is missing from Datastore #{ds_id} image id list") + if id_elem.nil? + log_error( + "Image #{id} is missing from Datastore #{ds_id} "<< + "image id list") + end + + images_new_elem.add_element("ID").text = id.to_s end - images_new_elem.add_element("ID").text = id.to_s + images_elem.each_element("ID") do |id_elem| + log_error( + "Image #{id_elem.text} is in Cluster #{ds_id} "<< + "image id list, but it should not") + end + + + row[:body] = doc.to_s + + # commit + @db[:datastore_pool_new].insert(row) end - - images_elem.each_element("ID") do |id_elem| - log_error("Image #{id_elem.text} is in Cluster #{ds_id} image id list, but it should not") - end - - - row[:body] = doc.to_s - - # commit - @db[:datastore_pool_new].insert(row) end # Rename table @db.run("DROP TABLE datastore_pool") @db.run("ALTER TABLE datastore_pool_new RENAME TO datastore_pool") + log_time() + ######################################################################## # VM Counters for host, image and vnet usage ######################################################################## @@ -692,6 +753,8 @@ module OneDBFsck } end + log_time() + # Init image counters @db.fetch("SELECT oid,body FROM image_pool") do |row| if counters[:image][row[:oid]].nil? @@ -717,6 +780,8 @@ module OneDBFsck end end + log_time() + # Init vnet counters @db.fetch("SELECT oid,body FROM network_pool") do |row| doc = Document.new(row[:body]) @@ -728,43 +793,46 @@ module OneDBFsck } end + log_time() + vms_fix = {} # Aggregate information of the RUNNING vms @db.fetch("SELECT oid,body FROM vm_pool WHERE state<>6") do |row| - vm_doc = Document.new(row[:body]) - - state = vm_doc.root.get_text('STATE').to_s.to_i - lcm_state = vm_doc.root.get_text('LCM_STATE').to_s.to_i + vm_doc = Nokogiri::XML(row[:body]) + state = vm_doc.root.at_xpath('STATE').text.to_i + lcm_state = vm_doc.root.at_xpath('LCM_STATE').text.to_i # Images used by this VM - vm_doc.root.each_element("TEMPLATE/DISK/IMAGE_ID") do |e| + vm_doc.root.xpath("TEMPLATE/DISK/IMAGE_ID").each do |e| img_id = e.text.to_i if counters[:image][img_id].nil? - log_error("VM #{row[:oid]} is using Image #{img_id}, but it does not exist") + log_error("VM #{row[:oid]} is using Image #{img_id}, but "<< + "it does not exist") else counters[:image][img_id][:vms].add(row[:oid]) end end # VNets used by this VM - vm_doc.root.each_element("TEMPLATE/NIC") do |e| + vm_doc.root.xpath("TEMPLATE/NIC").each do |e| net_id = nil - e.each_element("NETWORK_ID") do |nid| + e.xpath("NETWORK_ID").each do |nid| net_id = nid.text.to_i end if !net_id.nil? if counters[:vnet][net_id].nil? - log_error("VM #{row[:oid]} is using VNet #{net_id}, but it does not exist") + log_error("VM #{row[:oid]} is using VNet #{net_id}, "<< + "but it does not exist") else - counters[:vnet][net_id][:leases][e.get_text('IP').to_s] = + counters[:vnet][net_id][:leases][e.at_xpath('IP').text] = [ - e.get_text('MAC').to_s, # MAC + e.at_xpath('MAC').text, # MAC "1", # USED - vm_doc.root.get_text('ID').to_s.to_i # VID + vm_doc.root.at_xpath('ID').text.to_i # VID ] end end @@ -777,41 +845,39 @@ module OneDBFsck next if !([3,5,8].include? state) # Get memory (integer) - memory = 0 - vm_doc.root.each_element("TEMPLATE/MEMORY") { |e| - memory = e.text.to_i - } + memory = vm_doc.root.at_xpath("TEMPLATE/MEMORY").text.to_i # Get CPU (float) - cpu = 0 - vm_doc.root.each_element("TEMPLATE/CPU") { |e| - cpu = e.text.to_f - } + cpu = vm_doc.root.at_xpath("TEMPLATE/CPU").text.to_f # Get hostid, hostname hid = -1 - vm_doc.root.each_element("HISTORY_RECORDS/HISTORY[last()]/HID") { |e| + vm_doc.root.xpath("HISTORY_RECORDS/HISTORY[last()]/HID").each { |e| hid = e.text.to_i } hostname = "" - vm_doc.root.each_element("HISTORY_RECORDS/HISTORY[last()]/HOSTNAME") { |e| + vm_doc.root.xpath("HISTORY_RECORDS/HISTORY[last()]/HOSTNAME").each { |e| hostname = e.text } counters_host = counters[:host][hid] if counters_host.nil? - log_error("VM #{row[:oid]} is using Host #{hid}, but it does not exist") + log_error("VM #{row[:oid]} is using Host #{hid}, "<< + "but it does not exist") else if counters_host[:name] != hostname - log_error("VM #{row[:oid]} has a wrong hostname for Host #{hid}, #{hostname}. It will be changed to #{counters_host[:name]}") + log_error("VM #{row[:oid]} has a wrong hostname for "<< + "Host #{hid}, #{hostname}. It will be changed to "<< + "#{counters_host[:name]}") - vm_doc.root.each_element("HISTORY_RECORDS/HISTORY[last()]/HOSTNAME") { |e| - e.text = counters_host[:name] + vm_doc.root.xpath( + "HISTORY_RECORDS/HISTORY[last()]/HOSTNAME").each { |e| + e.content = counters_host[:name] } - vms_fix[row[:oid]] = vm_doc.to_s + vms_fix[row[:oid]] = vm_doc.root.to_s end counters_host[:memory] += memory @@ -820,10 +886,13 @@ module OneDBFsck end end - vms_fix.each do |id, body| - @db[:vm_pool].where(:oid => id).update(:body => body) + @db.transaction do + vms_fix.each do |id, body| + @db[:vm_pool].where(:oid => id).update(:body => body) + end end + log_time() ######################################################################## # Hosts @@ -842,72 +911,82 @@ module OneDBFsck "cid INTEGER, UNIQUE(name));" # Calculate the host's xml and write them to host_pool_new - @db[:host_pool].each do |row| - host_doc = Document.new(row[:body]) + @db.transaction do + @db[:host_pool].each do |row| + host_doc = Document.new(row[:body]) - hid = row[:oid] + hid = row[:oid] - counters_host = counters[:host][hid] + counters_host = counters[:host][hid] - rvms = counters_host[:rvms].size - cpu_usage = (counters_host[:cpu]*100).to_i - mem_usage = counters_host[:memory]*1024 + rvms = counters_host[:rvms].size + cpu_usage = (counters_host[:cpu]*100).to_i + mem_usage = counters_host[:memory]*1024 - # rewrite running_vms - host_doc.root.each_element("HOST_SHARE/RUNNING_VMS") {|e| - if e.text != rvms.to_s - log_error("Host #{hid} RUNNING_VMS has #{e.text} \tis\t#{rvms}") - e.text = rvms - end - } + # rewrite running_vms + host_doc.root.each_element("HOST_SHARE/RUNNING_VMS") {|e| + if e.text != rvms.to_s + log_error( + "Host #{hid} RUNNING_VMS has #{e.text} \tis\t#{rvms}") + e.text = rvms + end + } - # re-do list of VM IDs - vms_elem = host_doc.root.elements.delete("VMS") + # re-do list of VM IDs + vms_elem = host_doc.root.elements.delete("VMS") - vms_new_elem = host_doc.root.add_element("VMS") + vms_new_elem = host_doc.root.add_element("VMS") - counters_host[:rvms].each do |id| - id_elem = vms_elem.elements.delete("ID[.=#{id}]") + counters_host[:rvms].each do |id| + id_elem = vms_elem.elements.delete("ID[.=#{id}]") - if id_elem.nil? - log_error("VM #{id} is missing from Host #{hid} VM id list") + if id_elem.nil? + log_error( + "VM #{id} is missing from Host #{hid} VM id list") + end + + vms_new_elem.add_element("ID").text = id.to_s end - vms_new_elem.add_element("ID").text = id.to_s + vms_elem.each_element("ID") do |id_elem| + log_error( + "VM #{id_elem.text} is in Host #{hid} VM id list, "<< + "but it should not") + end + + + # rewrite cpu + host_doc.root.each_element("HOST_SHARE/CPU_USAGE") {|e| + if e.text != cpu_usage.to_s + log_error( + "Host #{hid} CPU_USAGE has #{e.text} "<< + "\tis\t#{cpu_usage}") + e.text = cpu_usage + end + } + + # rewrite memory + host_doc.root.each_element("HOST_SHARE/MEM_USAGE") {|e| + if e.text != mem_usage.to_s + log_error("Host #{hid} MEM_USAGE has #{e.text} "<< + "\tis\t#{mem_usage}") + e.text = mem_usage + end + } + + row[:body] = host_doc.to_s + + # commit + @db[:host_pool_new].insert(row) end - - vms_elem.each_element("ID") do |id_elem| - log_error("VM #{id_elem.text} is in Host #{hid} VM id list, but it should not") - end - - - # rewrite cpu - host_doc.root.each_element("HOST_SHARE/CPU_USAGE") {|e| - if e.text != cpu_usage.to_s - log_error("Host #{hid} CPU_USAGE has #{e.text} \tis\t#{cpu_usage}") - e.text = cpu_usage - end - } - - # rewrite memory - host_doc.root.each_element("HOST_SHARE/MEM_USAGE") {|e| - if e.text != mem_usage.to_s - log_error("Host #{hid} MEM_USAGE has #{e.text} \tis\t#{mem_usage}") - e.text = mem_usage - end - } - - row[:body] = host_doc.to_s - - # commit - @db[:host_pool_new].insert(row) end # Rename table @db.run("DROP TABLE host_pool") @db.run("ALTER TABLE host_pool_new RENAME TO host_pool") + log_time() ######################################################################## # Image @@ -926,122 +1005,124 @@ module OneDBFsck # Create a new empty table where we will store the new calculated values @db.run "CREATE TABLE image_pool_new (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name,uid) );" - # Calculate the host's xml and write them to host_pool_new - @db[:image_pool].each do |row| - doc = Document.new(row[:body]) + @db.transaction do + @db[:image_pool].each do |row| + doc = Document.new(row[:body]) - oid = row[:oid] + oid = row[:oid] - persistent = ( doc.root.get_text('PERSISTENT').to_s == "1" ) - current_state = doc.root.get_text('STATE').to_s.to_i + persistent = ( doc.root.get_text('PERSISTENT').to_s == "1" ) + current_state = doc.root.get_text('STATE').to_s.to_i - rvms = counters[:image][oid][:vms].size - n_cloning_ops = counters[:image][oid][:clones].size + rvms = counters[:image][oid][:vms].size + n_cloning_ops = counters[:image][oid][:clones].size - # rewrite running_vms - doc.root.each_element("RUNNING_VMS") {|e| - if e.text != rvms.to_s - log_error("Image #{oid} RUNNING_VMS has #{e.text} \tis\t#{rvms}") - e.text = rvms - end - } + # rewrite running_vms + doc.root.each_element("RUNNING_VMS") {|e| + if e.text != rvms.to_s + log_error("Image #{oid} RUNNING_VMS has #{e.text} \tis\t#{rvms}") + e.text = rvms + end + } - # re-do list of VM IDs - vms_elem = doc.root.elements.delete("VMS") + # re-do list of VM IDs + vms_elem = doc.root.elements.delete("VMS") - vms_new_elem = doc.root.add_element("VMS") + vms_new_elem = doc.root.add_element("VMS") - counters[:image][oid][:vms].each do |id| - id_elem = vms_elem.elements.delete("ID[.=#{id}]") + counters[:image][oid][:vms].each do |id| + id_elem = vms_elem.elements.delete("ID[.=#{id}]") - if id_elem.nil? - log_error("VM #{id} is missing from Image #{oid} VM id list") + if id_elem.nil? + log_error("VM #{id} is missing from Image #{oid} VM id list") + end + + vms_new_elem.add_element("ID").text = id.to_s end - vms_new_elem.add_element("ID").text = id.to_s + vms_elem.each_element("ID") do |id_elem| + log_error("VM #{id_elem.text} is in Image #{oid} VM id list, but it should not") + end + + + if ( persistent && rvms > 0 ) + n_cloning_ops = 0 + counters[:image][oid][:clones] = Set.new + end + + # Check number of clones + doc.root.each_element("CLONING_OPS") { |e| + if e.text != n_cloning_ops.to_s + log_error("Image #{oid} CLONING_OPS has #{e.text} \tis\t#{n_cloning_ops}") + e.text = n_cloning_ops + end + } + + # re-do list of Images cloning this one + clones_elem = doc.root.elements.delete("CLONES") + + clones_new_elem = doc.root.add_element("CLONES") + + counters[:image][oid][:clones].each do |id| + id_elem = clones_elem.elements.delete("ID[.=#{id}]") + + if id_elem.nil? + log_error("Image #{id} is missing from Image #{oid} CLONES id list") + end + + clones_new_elem.add_element("ID").text = id.to_s + end + + clones_elem.each_element("ID") do |id_elem| + log_error("Image #{id_elem.text} is in Image #{oid} CLONES id list, but it should not") + end + + + # Check state + + state = current_state + + if persistent + if ( rvms > 0 ) + state = 8 # USED_PERS + elsif ( n_cloning_ops > 0 ) + state = 6 # CLONE + elsif ( current_state == 8 || current_state == 6 ) + # rvms == 0 && n_cloning_ops == 0, but image is in state + # USED_PERS or CLONE + + state = 1 # READY + end + else + if ( rvms > 0 || n_cloning_ops > 0 ) + state = 2 # USED + elsif ( current_state == 2 ) + # rvms == 0 && n_cloning_ops == 0, but image is in state + # USED + + state = 1 # READY + end + end + + doc.root.each_element("STATE") { |e| + if e.text != state.to_s + log_error("Image #{oid} has STATE #{IMAGE_STATES[e.text.to_i]} \tis\t#{IMAGE_STATES[state]}") + e.text = state + end + } + + row[:body] = doc.to_s + + # commit + @db[:image_pool_new].insert(row) end - - vms_elem.each_element("ID") do |id_elem| - log_error("VM #{id_elem.text} is in Image #{oid} VM id list, but it should not") - end - - - if ( persistent && rvms > 0 ) - n_cloning_ops = 0 - counters[:image][oid][:clones] = Set.new - end - - # Check number of clones - doc.root.each_element("CLONING_OPS") { |e| - if e.text != n_cloning_ops.to_s - log_error("Image #{oid} CLONING_OPS has #{e.text} \tis\t#{n_cloning_ops}") - e.text = n_cloning_ops - end - } - - # re-do list of Images cloning this one - clones_elem = doc.root.elements.delete("CLONES") - - clones_new_elem = doc.root.add_element("CLONES") - - counters[:image][oid][:clones].each do |id| - id_elem = clones_elem.elements.delete("ID[.=#{id}]") - - if id_elem.nil? - log_error("Image #{id} is missing from Image #{oid} CLONES id list") - end - - clones_new_elem.add_element("ID").text = id.to_s - end - - clones_elem.each_element("ID") do |id_elem| - log_error("Image #{id_elem.text} is in Image #{oid} CLONES id list, but it should not") - end - - - # Check state - - state = current_state - - if persistent - if ( rvms > 0 ) - state = 8 # USED_PERS - elsif ( n_cloning_ops > 0 ) - state = 6 # CLONE - elsif ( current_state == 8 || current_state == 6 ) - # rvms == 0 && n_cloning_ops == 0, but image is in state - # USED_PERS or CLONE - - state = 1 # READY - end - else - if ( rvms > 0 || n_cloning_ops > 0 ) - state = 2 # USED - elsif ( current_state == 2 ) - # rvms == 0 && n_cloning_ops == 0, but image is in state - # USED - - state = 1 # READY - end - end - - doc.root.each_element("STATE") { |e| - if e.text != state.to_s - log_error("Image #{oid} has STATE #{IMAGE_STATES[e.text.to_i]} \tis\t#{IMAGE_STATES[state]}") - e.text = state - end - } - - row[:body] = doc.to_s - - # commit - @db[:image_pool_new].insert(row) end # Rename table @db.run("DROP TABLE image_pool") @db.run("ALTER TABLE image_pool_new RENAME TO image_pool") + log_time() ######################################################################## # VNet @@ -1051,107 +1132,111 @@ module OneDBFsck @db.run "CREATE TABLE leases_new (oid INTEGER, ip BIGINT, body MEDIUMTEXT, PRIMARY KEY(oid,ip));" - @db[:leases].each do |row| - doc = Document.new(row[:body]) + @db.transaction do + @db[:leases].each do |row| + doc = Nokogiri::XML(row[:body]) - used = (doc.root.get_text('USED') == "1") - vid = doc.root.get_text('VID').to_s.to_i + used = (doc.root.at_xpath('USED').text == "1") + vid = doc.root.at_xpath('VID').text.to_i - ip_str = IPAddr.new(row[:ip], Socket::AF_INET).to_s + ip_str = IPAddr.new(row[:ip], Socket::AF_INET).to_s - vnet_structure = counters[:vnet][row[:oid]] + vnet_structure = counters[:vnet][row[:oid]] - if vnet_structure.nil? - log_error("Table leases contains the lease #{ip_str} for VNet #{row[:oid]}, but it does not exit") + if vnet_structure.nil? + log_error("Table leases contains the lease #{ip_str} "<< + "for VNet #{row[:oid]}, but it does not exit") - next - end - - ranged = vnet_structure[:type] == 0 - - counter_mac, counter_used, counter_vid = - vnet_structure[:leases][ip_str] - - vnet_structure[:leases].delete(ip_str) - - insert = true - - if used && (vid != -1) # Lease used by a VM - if counter_mac.nil? - log_error("VNet #{row[:oid]} has used lease #{ip_str} (VM #{vid}) \tbut it is free") - - if ranged - insert = false - end - - doc.root.each_element("USED") { |e| - e.text = "0" - } - - doc.root.each_element("VID") {|e| - e.text = "-1" - } - - row[:body] = doc.to_s - - elsif vid != counter_vid - log_error("VNet #{row[:oid]} has used lease #{ip_str} (VM #{vid}) \tbut it used by VM #{counter_vid}") - - doc.root.each_element("VID") {|e| - e.text = counter_vid.to_s - } - - row[:body] = doc.to_s + next end - else # Lease is free or on hold (used=1, vid=-1) - if !counter_mac.nil? - if used - log_error("VNet #{row[:oid]} has lease on hold #{ip_str} \tbut it is used by VM #{counter_vid}") - else - log_error("VNet #{row[:oid]} has free lease #{ip_str} \tbut it is used by VM #{counter_vid}") + + ranged = vnet_structure[:type] == 0 + + counter_mac, counter_used, counter_vid = + vnet_structure[:leases][ip_str] + + vnet_structure[:leases].delete(ip_str) + + insert = true + + if used && (vid != -1) # Lease used by a VM + if counter_mac.nil? + log_error( + "VNet #{row[:oid]} has used lease #{ip_str} "<< + "(VM #{vid}) \tbut it is free") + + if ranged + insert = false + end + + doc.root.at_xpath("USED").content = "0" + + doc.root.at_xpath("VID").content = "-1" + + row[:body] = doc.root.to_s + + elsif vid != counter_vid + log_error( + "VNet #{row[:oid]} has used lease #{ip_str} "<< + "(VM #{vid}) \tbut it used by VM #{counter_vid}") + + doc.root.at_xpath("VID").content = counter_vid.to_s + + row[:body] = doc.root.to_s end + else # Lease is free or on hold (used=1, vid=-1) + if !counter_mac.nil? + if used + log_error( + "VNet #{row[:oid]} has lease on hold #{ip_str} "<< + "\tbut it is used by VM #{counter_vid}") + else + log_error( + "VNet #{row[:oid]} has free lease #{ip_str} "<< + "\tbut it is used by VM #{counter_vid}") + end - doc.root.each_element("USED") { |e| - e.text = "1" - } + doc.root.at_xpath("USED").content = "1" - doc.root.each_element("VID") {|e| - e.text = counter_vid.to_s - } + doc.root.at_xpath("VID").content = counter_vid.to_s - row[:body] = doc.to_s + row[:body] = doc.root.to_s + end end - end - if (doc.root.get_text('USED') == "1") - vnet_structure[:total_leases] += 1 - end + if (doc.root.at_xpath('USED').text == "1") + vnet_structure[:total_leases] += 1 + end - # commit - @db[:leases_new].insert(row) if insert + # commit + @db[:leases_new].insert(row) if insert + end end + log_time() + # Now insert all the leases left in the hash, i.e. used by a VM in # vm_pool, but not in the leases table. This will only happen in # ranged networks + @db.transaction do + counters[:vnet].each do |net_id,vnet_structure| + vnet_structure[:leases].each do |ip,array| + mac,used,vid = array - counters[:vnet].each do |net_id,vnet_structure| - vnet_structure[:leases].each do |ip,array| - mac,used,vid = array + ip_i = IPAddr.new(ip, Socket::AF_INET).to_i - ip_i = IPAddr.new(ip, Socket::AF_INET).to_i + # TODO: MAC_PREFIX is now hardcoded to "02:00" + body = "#{ip_i}512#{ip_i}#{used}#{vid}" - # TODO: MAC_PREFIX is now hardcoded to "02:00" - body = "#{ip_i}512#{ip_i}#{used}#{vid}" + log_error("VNet #{net_id} has free lease #{ip} \tbut it is used by VM #{vid}") - log_error("VNet #{net_id} has free lease #{ip} \tbut it is used by VM #{vid}") + vnet_structure[:total_leases] += 1 - vnet_structure[:total_leases] += 1 - - @db[:leases_new].insert( - :oid => net_id, - :ip => ip_i, - :body => body) + @db[:leases_new].insert( + :oid => net_id, + :ip => ip_i, + :body => body) + end end end @@ -1160,6 +1245,7 @@ module OneDBFsck @db.run("DROP TABLE leases") @db.run("ALTER TABLE leases_new RENAME TO leases") + log_time() ######################################################################## # VNet @@ -1170,31 +1256,34 @@ module OneDBFsck # Create a new empty table where we will store the new calculated values @db.run "CREATE TABLE network_pool_new (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name,uid));" - @db[:network_pool].each do |row| - doc = Document.new(row[:body]) + @db.transaction do + @db[:network_pool].each do |row| + doc = Document.new(row[:body]) - oid = row[:oid] + oid = row[:oid] - total_leases = counters[:vnet][oid][:total_leases] + total_leases = counters[:vnet][oid][:total_leases] - # rewrite running_vms - doc.root.each_element("TOTAL_LEASES") {|e| - if e.text != total_leases.to_s - log_error("VNet #{oid} TOTAL_LEASES has #{e.text} \tis\t#{total_leases}") - e.text = total_leases - end - } + # rewrite running_vms + doc.root.each_element("TOTAL_LEASES") {|e| + if e.text != total_leases.to_s + log_error("VNet #{oid} TOTAL_LEASES has #{e.text} \tis\t#{total_leases}") + e.text = total_leases + end + } - row[:body] = doc.to_s + row[:body] = doc.to_s - # commit - @db[:network_pool_new].insert(row) + # commit + @db[:network_pool_new].insert(row) + end end # Rename table @db.run("DROP TABLE network_pool") @db.run("ALTER TABLE network_pool_new RENAME TO network_pool") + log_time() ######################################################################## # Users @@ -1202,32 +1291,47 @@ module OneDBFsck # USER QUOTAS ######################################################################## - @db.run "ALTER TABLE user_pool RENAME TO old_user_pool;" - @db.run "CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" + # This block is not needed for now +=begin + @db.transaction do + @db.fetch("SELECT oid FROM user_pool") do |row| + found = false - # oneadmin does not have quotas - @db.fetch("SELECT * FROM old_user_pool WHERE oid=0") do |row| - @db[:user_pool].insert(row) + @db.fetch("SELECT user_oid FROM user_quotas WHERE user_oid=#{row[:oid]}") do |q_row| + found = true + end + + if !found + log_error("User #{row[:oid]} does not have a quotas entry") + + @db.run "INSERT INTO user_quotas VALUES(#{row[:oid]},'#{row[:oid]}');" + end + end + end +=end + @db.run "ALTER TABLE user_quotas RENAME TO old_user_quotas;" + @db.run "CREATE TABLE user_quotas (user_oid INTEGER PRIMARY KEY, body MEDIUMTEXT);" + + @db.transaction do + # oneadmin does not have quotas + @db.fetch("SELECT * FROM old_user_quotas WHERE user_oid=0") do |row| + @db[:user_quotas].insert(row) + end + + @db.fetch("SELECT * FROM old_user_quotas WHERE user_oid>0") do |row| + doc = Nokogiri::XML(row[:body]) + + calculate_quotas(doc, "uid=#{row[:user_oid]}", "User") + + @db[:user_quotas].insert( + :user_oid => row[:user_oid], + :body => doc.root.to_s) + end end - @db.fetch("SELECT * FROM old_user_pool WHERE oid>0") do |row| - doc = Document.new(row[:body]) - - calculate_quotas(doc, "uid=#{row[:oid]}", "User") - - @db[:user_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:oid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) - end - - @db.run "DROP TABLE old_user_pool;" + @db.run "DROP TABLE old_user_quotas;" + log_time() ######################################################################## # Groups @@ -1235,31 +1339,47 @@ module OneDBFsck # GROUP QUOTAS ######################################################################## - @db.run "ALTER TABLE group_pool RENAME TO old_group_pool;" - @db.run "CREATE TABLE group_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" + # This block is not needed for now +=begin + @db.transaction do + @db.fetch("SELECT oid FROM group_pool") do |row| + found = false - # oneadmin group does not have quotas - @db.fetch("SELECT * FROM old_group_pool WHERE oid=0") do |row| - @db[:group_pool].insert(row) + @db.fetch("SELECT group_oid FROM group_quotas WHERE group_oid=#{row[:oid]}") do |q_row| + found = true + end + + if !found + log_error("Group #{row[:oid]} does not have a quotas entry") + + @db.run "INSERT INTO group_quotas VALUES(#{row[:oid]},'#{row[:oid]}');" + end + end + end +=end + @db.run "ALTER TABLE group_quotas RENAME TO old_group_quotas;" + @db.run "CREATE TABLE group_quotas (group_oid INTEGER PRIMARY KEY, body MEDIUMTEXT);" + + @db.transaction do + # oneadmin does not have quotas + @db.fetch("SELECT * FROM old_group_quotas WHERE group_oid=0") do |row| + @db[:group_quotas].insert(row) + end + + @db.fetch("SELECT * FROM old_group_quotas WHERE group_oid>0") do |row| + doc = Nokogiri::XML(row[:body]) + + calculate_quotas(doc, "gid=#{row[:group_oid]}", "Group") + + @db[:group_quotas].insert( + :group_oid => row[:group_oid], + :body => doc.root.to_s) + end end - @db.fetch("SELECT * FROM old_group_pool WHERE oid>0") do |row| - doc = Document.new(row[:body]) + @db.run "DROP TABLE old_group_quotas;" - calculate_quotas(doc, "gid=#{row[:oid]}", "Group") - - @db[:group_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:oid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) - end - - @db.run "DROP TABLE old_group_pool;" + log_time() log_total_errors() @@ -1280,10 +1400,10 @@ module OneDBFsck def calculate_quotas(doc, where_filter, resource) - oid = doc.root.get_text("ID").to_s.to_i + oid = doc.root.at_xpath("ID").text.to_i # VM quotas - cpu_used = 0.0 + cpu_used = 0 mem_used = 0 vms_used = 0 vol_used = 0 @@ -1295,29 +1415,28 @@ module OneDBFsck img_usage = {} @db.fetch("SELECT body FROM vm_pool WHERE #{where_filter} AND state<>6") do |vm_row| - vmdoc = Document.new(vm_row[:body]) + vmdoc = Nokogiri::XML(vm_row[:body]) # VM quotas - vmdoc.root.each_element("TEMPLATE/CPU") { |e| + vmdoc.root.xpath("TEMPLATE/CPU").each { |e| # truncate to 2 decimals - cpu = (e.text.to_f * 100).to_i / 100.0 + cpu = (e.text.to_f * 100).to_i cpu_used += cpu - cpu_used = (cpu_used * 100).to_i / 100.0 } - vmdoc.root.each_element("TEMPLATE/MEMORY") { |e| + vmdoc.root.xpath("TEMPLATE/MEMORY").each { |e| mem_used += e.text.to_i } - vmdoc.root.each_element("TEMPLATE/DISK") { |e| + vmdoc.root.xpath("TEMPLATE/DISK").each { |e| type = "" - e.each_element("TYPE") { |t_elem| + e.xpath("TYPE").each { |t_elem| type = t_elem.text.upcase } if ( type == "SWAP" || type == "FS") - e.each_element("SIZE") { |size_elem| + e.xpath("SIZE").each { |size_elem| vol_used += size_elem.text.to_i } end @@ -1326,13 +1445,13 @@ module OneDBFsck vms_used += 1 # VNet quotas - vmdoc.root.each_element("TEMPLATE/NIC/NETWORK_ID") { |e| + vmdoc.root.xpath("TEMPLATE/NIC/NETWORK_ID").each { |e| vnet_usage[e.text] = 0 if vnet_usage[e.text].nil? vnet_usage[e.text] += 1 } # Image quotas - vmdoc.root.each_element("TEMPLATE/DISK/IMAGE_ID") { |e| + vmdoc.root.xpath("TEMPLATE/DISK/IMAGE_ID").each { |e| img_usage[e.text] = 0 if img_usage[e.text].nil? img_usage[e.text] += 1 } @@ -1342,29 +1461,29 @@ module OneDBFsck # VM quotas vm_elem = nil - doc.root.each_element("VM_QUOTA/VM") { |e| vm_elem = e } + doc.root.xpath("VM_QUOTA/VM").each { |e| vm_elem = e } if vm_elem.nil? - doc.root.delete_element("VM_QUOTA") + doc.root.xpath("VM_QUOTA").each { |e| e.remove } - vm_quota = doc.root.add_element("VM_QUOTA") - vm_elem = vm_quota.add_element("VM") + vm_quota = doc.root.add_child(doc.create_element("VM_QUOTA")) + vm_elem = vm_quota.add_child(doc.create_element("VM")) - vm_elem.add_element("CPU").text = "-1" - vm_elem.add_element("CPU_USED").text = "0" + vm_elem.add_child(doc.create_element("CPU")).content = "-1" + vm_elem.add_child(doc.create_element("CPU_USED")).content = "0" - vm_elem.add_element("MEMORY").text = "-1" - vm_elem.add_element("MEMORY_USED").text = "0" + vm_elem.add_child(doc.create_element("MEMORY")).content = "-1" + vm_elem.add_child(doc.create_element("MEMORY_USED")).content = "0" - vm_elem.add_element("VMS").text = "-1" - vm_elem.add_element("VMS_USED").text = "0" + vm_elem.add_child(doc.create_element("VMS")).content = "-1" + vm_elem.add_child(doc.create_element("VMS_USED")).content = "0" - vm_elem.add_element("VOLATILE_SIZE").text = "-1" - vm_elem.add_element("VOLATILE_SIZE_USED").text = "0" + vm_elem.add_child(doc.create_element("VOLATILE_SIZE")).content = "-1" + vm_elem.add_child(doc.create_element("VOLATILE_SIZE_USED")).content = "0" end - vm_elem.each_element("CPU_USED") { |e| + vm_elem.xpath("CPU_USED").each { |e| # Because of bug http://dev.opennebula.org/issues/1567 the element # may contain a float number in scientific notation. @@ -1372,6 +1491,8 @@ module OneDBFsck # Check if the float value or the string representation mismatch, # but ignoring the precision + cpu_used = (cpu_used / 100.0) + different = ( e.text.to_f != cpu_used || ![sprintf('%.2f', cpu_used), sprintf('%.1f', cpu_used), sprintf('%.0f', cpu_used)].include?(e.text) ) @@ -1379,51 +1500,51 @@ module OneDBFsck if different log_error("#{resource} #{oid} quotas: CPU_USED has #{e.text} \tis\t#{cpu_used_str}") - e.text = cpu_used_str + e.content = cpu_used_str end } - vm_elem.each_element("MEMORY_USED") { |e| + vm_elem.xpath("MEMORY_USED").each { |e| if e.text != mem_used.to_s log_error("#{resource} #{oid} quotas: MEMORY_USED has #{e.text} \tis\t#{mem_used}") - e.text = mem_used.to_s + e.content = mem_used.to_s end } - vm_elem.each_element("VMS_USED") { |e| + vm_elem.xpath("VMS_USED").each { |e| if e.text != vms_used.to_s log_error("#{resource} #{oid} quotas: VMS_USED has #{e.text} \tis\t#{vms_used}") - e.text = vms_used.to_s + e.content = vms_used.to_s end } - vm_elem.each_element("VOLATILE_SIZE_USED") { |e| + vm_elem.xpath("VOLATILE_SIZE_USED").each { |e| if e.text != vol_used.to_s log_error("#{resource} #{oid} quotas: VOLATILE_SIZE_USED has #{e.text} \tis\t#{vol_used}") - e.text = vol_used.to_s + e.content = vol_used.to_s end } # VNet quotas net_quota = nil - doc.root.each_element("NETWORK_QUOTA") { |e| net_quota = e } + doc.root.xpath("NETWORK_QUOTA").each { |e| net_quota = e } if net_quota.nil? - net_quota = doc.root.add_element("NETWORK_QUOTA") + net_quota = doc.root.add_child(doc.create_element("NETWORK_QUOTA")) end - net_quota.each_element("NETWORK") { |net_elem| - vnet_id = net_elem.get_text("ID").to_s + net_quota.xpath("NETWORK").each { |net_elem| + vnet_id = net_elem.at_xpath("ID").text leases_used = vnet_usage.delete(vnet_id) leases_used = 0 if leases_used.nil? - net_elem.each_element("LEASES_USED") { |e| + net_elem.xpath("LEASES_USED").each { |e| if e.text != leases_used.to_s log_error("#{resource} #{oid} quotas: VNet #{vnet_id}\tLEASES_USED has #{e.text} \tis\t#{leases_used}") - e.text = leases_used.to_s + e.content = leases_used.to_s end } } @@ -1431,34 +1552,34 @@ module OneDBFsck vnet_usage.each { |vnet_id, leases_used| log_error("#{resource} #{oid} quotas: VNet #{vnet_id}\tLEASES_USED has 0 \tis\t#{leases_used}") - new_elem = net_quota.add_element("NETWORK") + new_elem = net_quota.add_child(doc.create_element("NETWORK")) - new_elem.add_element("ID").text = vnet_id - new_elem.add_element("LEASES").text = "-1" - new_elem.add_element("LEASES_USED").text = leases_used.to_s + new_elem.add_child(doc.create_element("ID")).content = vnet_id + new_elem.add_child(doc.create_element("LEASES")).content = "-1" + new_elem.add_child(doc.create_element("LEASES_USED")).content = leases_used.to_s } # Image quotas img_quota = nil - doc.root.each_element("IMAGE_QUOTA") { |e| img_quota = e } + doc.root.xpath("IMAGE_QUOTA").each { |e| img_quota = e } if img_quota.nil? - img_quota = doc.root.add_element("IMAGE_QUOTA") + img_quota = doc.root.add_child(doc.create_element("IMAGE_QUOTA")) end - img_quota.each_element("IMAGE") { |img_elem| - img_id = img_elem.get_text("ID").to_s + img_quota.xpath("IMAGE").each { |img_elem| + img_id = img_elem.at_xpath("ID").text rvms = img_usage.delete(img_id) rvms = 0 if rvms.nil? - img_elem.each_element("RVMS_USED") { |e| + img_elem.xpath("RVMS_USED").each { |e| if e.text != rvms.to_s log_error("#{resource} #{oid} quotas: Image #{img_id}\tRVMS has #{e.text} \tis\t#{rvms}") - e.text = rvms.to_s + e.content = rvms.to_s end } } @@ -1466,11 +1587,11 @@ module OneDBFsck img_usage.each { |img_id, rvms| log_error("#{resource} #{oid} quotas: Image #{img_id}\tRVMS has 0 \tis\t#{rvms}") - new_elem = img_quota.add_element("IMAGE") + new_elem = img_quota.add_child(doc.create_element("IMAGE")) - new_elem.add_element("ID").text = img_id - new_elem.add_element("RVMS").text = "-1" - new_elem.add_element("RVMS_USED").text = rvms.to_s + new_elem.add_child(doc.create_element("ID")).content = img_id + new_elem.add_child(doc.create_element("RVMS")).content = "-1" + new_elem.add_child(doc.create_element("RVMS_USED")).content = rvms.to_s } @@ -1479,44 +1600,44 @@ module OneDBFsck ds_usage = {} @db.fetch("SELECT body FROM image_pool WHERE #{where_filter}") do |img_row| - img_doc = Document.new(img_row[:body]) + img_doc = Nokogiri::XML(img_row[:body]) - img_doc.root.each_element("DATASTORE_ID") { |e| + img_doc.root.xpath("DATASTORE_ID").each { |e| ds_usage[e.text] = [0,0] if ds_usage[e.text].nil? ds_usage[e.text][0] += 1 - img_doc.root.each_element("SIZE") { |size| + img_doc.root.xpath("SIZE").each { |size| ds_usage[e.text][1] += size.text.to_i } } end ds_quota = nil - doc.root.each_element("DATASTORE_QUOTA") { |e| ds_quota = e } + doc.root.xpath("DATASTORE_QUOTA").each { |e| ds_quota = e } if ds_quota.nil? - ds_quota = doc.root.add_element("DATASTORE_QUOTA") + ds_quota = doc.root.add_child(doc.create_element("DATASTORE_QUOTA")) end - ds_quota.each_element("DATASTORE") { |ds_elem| - ds_id = ds_elem.get_text("ID").to_s + ds_quota.xpath("DATASTORE").each { |ds_elem| + ds_id = ds_elem.at_xpath("ID").text images_used,size_used = ds_usage.delete(ds_id) images_used = 0 if images_used.nil? size_used = 0 if size_used.nil? - ds_elem.each_element("IMAGES_USED") { |e| + ds_elem.xpath("IMAGES_USED").each { |e| if e.text != images_used.to_s log_error("#{resource} #{oid} quotas: Datastore #{ds_id}\tIMAGES_USED has #{e.text} \tis\t#{images_used}") - e.text = images_used.to_s + e.content = images_used.to_s end } - ds_elem.each_element("SIZE_USED") { |e| + ds_elem.xpath("SIZE_USED").each { |e| if e.text != size_used.to_s log_error("#{resource} #{oid} quotas: Datastore #{ds_id}\tSIZE_USED has #{e.text} \tis\t#{size_used}") - e.text = size_used.to_s + e.content = size_used.to_s end } } @@ -1527,15 +1648,15 @@ module OneDBFsck log_error("#{resource} #{oid} quotas: Datastore #{ds_id}\tIMAGES_USED has 0 \tis\t#{images_used}") log_error("#{resource} #{oid} quotas: Datastore #{ds_id}\tSIZE_USED has 0 \tis\t#{size_used}") - new_elem = ds_quota.add_element("DATASTORE") + new_elem = ds_quota.add_child(doc.create_element("DATASTORE")) - new_elem.add_element("ID").text = ds_id + new_elem.add_child(doc.create_element("ID")).content = ds_id - new_elem.add_element("IMAGES").text = "-1" - new_elem.add_element("IMAGES_USED").text = images_used.to_s + new_elem.add_child(doc.create_element("IMAGES")).content = "-1" + new_elem.add_child(doc.create_element("IMAGES_USED")).content = images_used.to_s - new_elem.add_element("SIZE").text = "-1" - new_elem.add_element("SIZE_USED").text = size_used.to_s + new_elem.add_child(doc.create_element("SIZE")).content = "-1" + new_elem.add_child(doc.create_element("SIZE_USED")).content = size_used.to_s } end end diff --git a/src/onedb/import_slave.rb b/src/onedb/import_slave.rb index ec11ed7186..ad21d66920 100644 --- a/src/onedb/import_slave.rb +++ b/src/onedb/import_slave.rb @@ -14,8 +14,6 @@ # limitations under the License. # #--------------------------------------------------------------------------- # -ONE_LOCATION = ENV["ONE_LOCATION"] - if !ONE_LOCATION LOG_LOCATION = "/var/log/one" else @@ -31,9 +29,32 @@ include OpenNebula module OneDBImportSlave VERSION = "4.5.0" + LOCAL_VERSION = "4.5.0" - def db_version - VERSION + def check_db_version(master_db_version, slave_db_version) + if ( master_db_version[:version] != VERSION || + master_db_version[:local_version] != LOCAL_VERSION ) + + raise <<-EOT +Version mismatch: import slave file is for version +Shared: #{VERSION}, Local: #{LOCAL_VERSION} + +Current master database is version +Shared: #{master_db_version[:version]}, Local: #{master_db_version[:local_version]} +EOT + elsif ( slave_db_version[:version] != VERSION || + slave_db_version[:local_version] != LOCAL_VERSION ) + + raise <<-EOT +Version mismatch: import slave file is for version +Shared: #{VERSION}, Local: #{LOCAL_VERSION} + +Current slave database is version +Shared: #{master_db_version[:version]}, Local: #{master_db_version[:local_version]} +EOT + elsif master_db_version[:is_slave] + raise "Master database is an OpenNebula federation slave" + end end def one_version @@ -364,6 +385,28 @@ EOT end end + slave_template = slave_doc.root.at_xpath("TEMPLATE") + master_template = master_doc.root.at_xpath("TEMPLATE") + + # Avoid duplicated template attributes, removing + # them from the slave template + master_template.children.each do |e| + if slave_template.at_xpath(e.name) + slave_template.at_xpath(e.name).remove + end + end + + # Add slave template attributes to master template + master_template << slave_template.children + + # Merge resource providers + slave_doc.root.xpath("RESOURCE_PROVIDER").each do |elem| + # Zone ID must be 0, will be changed to the target ID + elem.at_xpath("ZONE_ID").content = zone_id + + master_doc.root << elem + end + @db[:group_pool].where(:oid => new_group[:oid]).update( :body => master_doc.root.to_s) else @@ -382,6 +425,12 @@ EOT slave_doc.root.add_child(new_elem) + # Update resource providers + slave_doc.root.xpath("RESOURCE_PROVIDER").each do |elem| + # Zone ID must be 0, will be changed to the target ID + elem.at_xpath("ZONE_ID").content = zone_id + end + @db[:group_pool].insert( :oid => new_group[:oid], :name => new_group[:name], @@ -479,6 +528,19 @@ EOT ((row[:resource] & 0xFFFFFFFF00000000) | groups[gid][:oid]) end + elsif ( (row[:resource] & Acl::RESOURCES["GROUP"]) == Acl::RESOURCES["GROUP"] && + (row[:resource] & Acl::USERS["UID"]) == Acl::USERS["UID"] ) + + gid = (row[:resource] & 0xFFFFFFFF) + + if (groups[gid].nil?) + insert = false + error_str = "Group ##{gid} does not exist" + else + new_resource = + ((row[:resource] & 0xFFFFFFFF00000000) | groups[gid][:oid]) + end + elsif ( (row[:resource] & Acl::RESOURCES["USER"]) == Acl::RESOURCES["USER"] && (row[:resource] & Acl::USERS["UID"]) == Acl::USERS["UID"] ) @@ -494,52 +556,63 @@ EOT end + if ( (row[:resource] & Acl::RESOURCES["ZONE"]) == Acl::RESOURCES["ZONE"] && + (row[:resource] & Acl::USERS["UID"]) == Acl::USERS["UID"] ) + + zid = (row[:resource] & 0xFFFFFFFF) + + if (zid != 0) + insert = false + error_str = "Zone ##{zid} is unknown for the slave" + else + new_resource = (Acl::USERS["UID"] | zone_id) + end + end + if ( (row[:zone] & Acl::USERS["UID"]) == Acl::USERS["UID"] ) zid = (row[:zone] & 0xFFFFFFFF) if (zid != 0) insert = false + error_str = "Zone ##{zid} is unknown for the slave" else new_zone = (Acl::USERS["UID"] | zone_id) end end - if (!insert) + # Avoid duplicated ACL rules + @db.fetch("SELECT oid FROM acl WHERE "<< + "user = #{new_user} AND resource = #{new_resource} "<< + "AND rights = #{row[:rights]} AND "<< + "zone = #{new_zone}") do |acl_row| + + insert = false + error_str = "the same Rule exists with ID ##{acl_row[:oid]}" + end + + + if (insert) + last_acl_oid += 1 + + log("Slave DB ACL Rule ##{row[:oid]} imported with ID ##{last_acl_oid}") + + @db[:acl].insert( + :oid => last_acl_oid, + :user => new_user, + :resource => new_resource, + :rights => row[:rights], + :zone => new_zone) + else log("Slave DB ACL Rule ##{row[:oid]} will not be "<< "imported to the master DB, " << error_str) - else - # Avoid duplicated ACL rules - @db.fetch("SELECT oid FROM acl WHERE "<< - "user = #{new_user} AND resource = #{new_resource} "<< - "AND rights = #{row[:rights]} AND "<< - "zone = #{row[:zone]}") do |acl_row| - - insert = false - end - - if (insert) - last_acl_oid += 1 - - log("New ACL Rule imported with ID ##{last_acl_oid}") - - @db[:acl].insert( - :oid => last_acl_oid, - :user => new_user, - :resource => new_resource, - :rights => row[:rights], - :zone => new_zone) - end end end end ######################################################################## - # Init slave_db_versioning table + # Cleanup shared tables form slave DB ######################################################################## - @slave_db.run "CREATE TABLE slave_db_versioning (oid INTEGER PRIMARY KEY, version VARCHAR(256), timestamp INTEGER, comment VARCHAR(256));" - @slave_db.run "INSERT INTO slave_db_versioning (oid, version, timestamp, comment) VALUES (0, '#{VERSION}', #{Time.now.to_i}, 'onedb import tool');" - @slave_db.run "DROP TABLE old_document_pool;" @slave_db.run "DROP TABLE old_image_pool;" @slave_db.run "DROP TABLE old_network_pool;" diff --git a/src/onedb/onedb.rb b/src/onedb/onedb.rb index faf6dd3f4b..6403c1d189 100644 --- a/src/onedb/onedb.rb +++ b/src/onedb/onedb.rb @@ -16,6 +16,9 @@ require 'onedb_backend' +# If set to true, extra verbose time log will be printed for each migrator +LOG_TIME = false + class OneDB def initialize(ops) if ops[:backend] == :sqlite @@ -91,16 +94,32 @@ class OneDB end def version(ops) - version, timestamp, comment = @backend.read_db_version + ret = @backend.read_db_version if(ops[:verbose]) - puts "Version: #{version}" + puts "Shared tables version: #{ret[:version]}" - time = version == "2.0" ? Time.now : Time.at(timestamp) + time = ret[:version] == "2.0" ? Time.now : Time.at(ret[:timestamp]) puts "Timestamp: #{time.strftime("%m/%d %H:%M:%S")}" - puts "Comment: #{comment}" + puts "Comment: #{ret[:comment]}" + + if ret[:local_version] + puts + puts "Local tables version: #{ret[:local_version]}" + + time = Time.at(ret[:local_timestamp]) + puts "Timestamp: #{time.strftime("%m/%d %H:%M:%S")}" + puts "Comment: #{ret[:local_comment]}" + + if ret[:is_slave] + puts + puts "This database is a federation slave" + end + end + else - puts version + puts "Shared: #{ret[:version]}" + puts "Local: #{ret[:version]}" end return 0 @@ -114,58 +133,58 @@ class OneDB # max_version is ignored for now, as this is the first onedb release. # May be used in next releases def upgrade(max_version, ops) - version, timestamp, comment = @backend.read_db_version + db_version = @backend.read_db_version if ops[:verbose] - puts "Version read:" - puts "#{version} : #{comment}" + pretty_print_db_version(db_version) + puts "" end - matches = Dir.glob("#{RUBY_LIB_LOCATION}/onedb/#{version}_to_*.rb") - - if ( matches.size > 0 ) - # At least one upgrade will be executed, make DB backup - backup(ops[:backup], ops) - end + backup(ops[:backup], ops) begin - result = nil - i = 0 + timea = Time.now - while ( matches.size > 0 ) - if ( matches.size > 1 ) - raise "There are more than one file that match \ - \"#{RUBY_LIB_LOCATION}/onedb/#{version}_to_*.rb\"" + # Upgrade shared (federation) tables, only for standalone and master + if !db_version[:is_slave] + puts + puts ">>> Running migrators for shared tables" + + dir_prefix = "#{RUBY_LIB_LOCATION}/onedb/shared" + + result = apply_migrators(dir_prefix, db_version[:version], ops) + + # Modify db_versioning table + if result != nil + @backend.update_db_version(db_version[:version]) + else + puts "Database already uses version #{db_version[:version]}" end - - file = matches[0] - - puts " > Running migrator #{file}" if ops[:verbose] - - load(file) - @backend.extend Migrator - result = @backend.up - - if !result - raise "Error while upgrading from #{version} to " << - " #{@backend.db_version}" - end - - puts " > Done" if ops[:verbose] - puts "" if ops[:verbose] - - matches = Dir.glob( - "#{RUBY_LIB_LOCATION}/onedb/#{@backend.db_version}_to_*.rb") end + db_version = @backend.read_db_version + + # Upgrade local tables, for standalone, master, and slave + + puts + puts ">>> Running migrators for local tables" + + dir_prefix = "#{RUBY_LIB_LOCATION}/onedb/local" + + result = apply_migrators(dir_prefix, db_version[:local_version], ops) + # Modify db_versioning table if result != nil - @backend.update_db_version(version) + @backend.update_local_db_version(db_version[:local_version]) else - puts "Database already uses version #{version}" + puts "Database already uses version #{db_version[:local_version]}" end + timeb = Time.now + + puts "Total time: #{"%0.02f" % (timeb - timea).to_s}s" if ops[:verbose] + return 0 rescue Exception => e @@ -182,12 +201,50 @@ class OneDB end end + def apply_migrators(prefix, db_version, ops) + result = nil + i = 0 + + matches = Dir.glob("#{prefix}/#{db_version}_to_*.rb") + + while ( matches.size > 0 ) + if ( matches.size > 1 ) + raise "There are more than one file that match \ + \"#{prefix}/#{db_version}_to_*.rb\"" + end + + file = matches[0] + + puts " > Running migrator #{file}" if ops[:verbose] + + time0 = Time.now + + load(file) + @backend.extend Migrator + result = @backend.up + + time1 = Time.now + + if !result + raise "Error while upgrading from #{db_version} to " << + " #{@backend.db_version}" + end + + puts " > Done in #{"%0.02f" % (time1 - time0).to_s}s" if ops[:verbose] + puts "" if ops[:verbose] + + matches = Dir.glob( + "#{prefix}/#{@backend.db_version}_to_*.rb") + end + + return result + end + def fsck(ops) - version, timestamp, comment = @backend.read_db_version + ret = @backend.read_db_version if ops[:verbose] - puts "Version read:" - puts "#{version} : #{comment}" + pretty_print_db_version(ret) puts "" end @@ -200,10 +257,7 @@ class OneDB load(file) @backend.extend OneDBFsck - if ( version != @backend.db_version ) - raise "Version mismatch: fsck file is for version "<< - "#{@backend.db_version}, current database version is #{version}" - end + @backend.check_db_version() # FSCK will be executed, make DB backup backup(ops[:backup], ops) @@ -211,20 +265,28 @@ class OneDB begin puts " > Running fsck" if ops[:verbose] + time0 = Time.now + result = @backend.fsck if !result - raise "Error running fsck version #{version}" + raise "Error running fsck version #{ret[:version]}" end puts " > Done" if ops[:verbose] puts "" if ops[:verbose] + time1 = Time.now + + if LOG_TIME + puts " > Total time: #{time1 - time0}s" if ops[:verbose] + end + return 0 rescue Exception => e puts e.message - puts "Error running fsck version #{version}" + puts "Error running fsck version #{ret[:version]}" puts "The database will be restored" ops[:force] = true @@ -256,17 +318,17 @@ class OneDB :db_name => ops[:slave_db_name] ) - version, timestamp, comment = @backend.read_db_version + db_version = @backend.read_db_version - slave_version, slave_timestamp, slave_comment = - slave_backend.read_db_version + slave_db_version = slave_backend.read_db_version if ops[:verbose] - puts "Master version read:" - puts "#{version} : #{comment}" + puts "Master database information:" + pretty_print_db_version(db_version) puts "" - puts "Slave version read:" - puts "#{slave_version} : #{slave_comment}" + puts "" + puts "Slave database information:" + pretty_print_db_version(slave_db_version) puts "" end @@ -279,19 +341,7 @@ class OneDB load(file) @backend.extend OneDBImportSlave - if ( version != @backend.db_version ) - raise "Version mismatch: import slave file is for version "<< - "#{@backend.db_version}, current master database version is #{version}" - end - - if ( slave_version != @backend.db_version ) - raise "Version mismatch: import slave file is for version "<< - "#{@backend.db_version}, current slave database version is #{version}" - end - - # Import will be executed, make DB backup - backup(ops[:backup], ops) - backup(ops[:"slave-backup"], ops, slave_backend) + @backend.check_db_version(db_version, slave_db_version) puts <<-EOT Before running this tool, it is required to create a new Zone in the @@ -343,6 +393,10 @@ is preserved. merge_groups = input == "Y" + # Import will be executed, make DB backup + backup(ops[:backup], ops) + backup(ops[:"slave-backup"], ops, slave_backend) + begin puts " > Running slave import" if ops[:verbose] @@ -350,7 +404,7 @@ is preserved. merge_groups, zone_id) if !result - raise "Error running slave import version #{version}" + raise "Error running slave import" end puts " > Done" if ops[:verbose] @@ -360,7 +414,7 @@ is preserved. rescue Exception => e puts e.message - puts "Error running slave import version #{version}" + puts "Error running slave import" puts "The databases will be restored" ops[:force] = true @@ -383,4 +437,18 @@ is preserved. raise "First stop OpenNebula. Lock file found: #{LOCK_FILE}" end end + + def pretty_print_db_version(db_version) + puts "Version read:" + puts "Shared tables #{db_version[:version]} : #{db_version[:comment]}" + + if db_version[:local_version] + puts "Local tables #{db_version[:local_version]} : #{db_version[:local_comment]}" + end + + if db_version[:is_slave] + puts + puts "This database is a federation slave" + end + end end diff --git a/src/onedb/onedb_backend.rb b/src/onedb/onedb_backend.rb index 3832bc20ab..653cfa4e14 100644 --- a/src/onedb/onedb_backend.rb +++ b/src/onedb/onedb_backend.rb @@ -28,19 +28,41 @@ class OneDBBacKEnd def read_db_version connect_db + ret = {} + begin - version = "2.0" - timestamp = 0 - comment = "" + ret[:version] = "2.0" + ret[:timestamp] = 0 + ret[:comment] = "" @db.fetch("SELECT version, timestamp, comment FROM db_versioning " + "WHERE oid=(SELECT MAX(oid) FROM db_versioning)") do |row| - version = row[:version] - timestamp = row[:timestamp] - comment = row[:comment] + ret[:version] = row[:version] + ret[:timestamp] = row[:timestamp] + ret[:comment] = row[:comment] end - return [version, timestamp, comment] + ret[:local_version] = ret[:version] + ret[:local_timestamp] = ret[:timestamp] + ret[:local_comment] = ret[:comment] + ret[:is_slave] = false + + begin + @db.fetch("SELECT version, timestamp, comment, is_slave FROM "+ + "local_db_versioning WHERE oid=(SELECT MAX(oid) "+ + "FROM local_db_versioning)") do |row| + ret[:local_version] = row[:version] + ret[:local_timestamp] = row[:timestamp] + ret[:local_comment] = row[:comment] + ret[:is_slave] = row[:is_slave] + end + rescue Exception => e + if e.class == Sequel::DatabaseConnectionError + raise e + end + end + + return ret rescue Exception => e if e.class == Sequel::DatabaseConnectionError @@ -62,7 +84,7 @@ class OneDBBacKEnd comment = "Could not read any previous db_versioning data, " << "assuming it is an OpenNebula 2.0 or 2.2 DB." - return [version, timestamp, comment] + return ret end end @@ -110,6 +132,37 @@ class OneDBBacKEnd puts comment end + def update_local_db_version(version) + comment = "Database migrated from #{version} to #{db_version}"+ + " (#{one_version}) by onedb command." + + max_oid = nil + @db.fetch("SELECT MAX(oid) FROM local_db_versioning") do |row| + max_oid = row[:"MAX(oid)"].to_i + end + + max_oid = 0 if max_oid.nil? + + is_slave = 0 + + @db.fetch("SELECT is_slave FROM local_db_versioning "<< + "WHERE oid=#{max_oid}") do |row| + is_slave = row[:is_slave] + end + + @db.run( + "INSERT INTO local_db_versioning (oid, version, timestamp, comment, is_slave) "<< + "VALUES (" << + "#{max_oid+1}, " << + "'#{db_version}', " << + "#{Time.new.to_i}, " << + "'#{comment}'," << + "#{is_slave})" + ) + + puts comment + end + def db() return @db end @@ -129,6 +182,20 @@ class OneDBBacKEnd return found end + + def init_log_time() + @block_n = 0 + @time0 = Time.now + end + + def log_time() + if LOG_TIME + @time1 = Time.now + puts " > #{db_version} Time for block #{@block_n}: #{"%0.02f" % (@time1 - @time0).to_s}s" + @time0 = Time.now + @block_n += 1 + end + end end class BackEndMySQL < OneDBBacKEnd @@ -166,7 +233,7 @@ class BackEndMySQL < OneDBBacKEnd end def backup(bck_file) - cmd = "mysqldump -u #{@user} -p#{@passwd} -h #{@server} " + + cmd = "mysqldump -u #{@user} -p'#{@passwd}' -h #{@server} " + "-P #{@port} #{@db_name} > #{bck_file}" rc = system(cmd) @@ -188,7 +255,7 @@ class BackEndMySQL < OneDBBacKEnd " use -f to overwrite." end - mysql_cmd = "mysql -u #{@user} -p#{@passwd} -h #{@server} -P #{@port} " + mysql_cmd = "mysql -u #{@user} -p'#{@passwd}' -h #{@server} -P #{@port} " drop_cmd = mysql_cmd + "-e 'DROP DATABASE IF EXISTS #{@db_name};'" rc = system(drop_cmd) diff --git a/src/onedb/2.0_to_2.9.80.rb b/src/onedb/shared/2.0_to_2.9.80.rb similarity index 100% rename from src/onedb/2.0_to_2.9.80.rb rename to src/onedb/shared/2.0_to_2.9.80.rb diff --git a/src/onedb/2.9.80_to_2.9.85.rb b/src/onedb/shared/2.9.80_to_2.9.85.rb similarity index 100% rename from src/onedb/2.9.80_to_2.9.85.rb rename to src/onedb/shared/2.9.80_to_2.9.85.rb diff --git a/src/onedb/2.9.85_to_2.9.90.rb b/src/onedb/shared/2.9.85_to_2.9.90.rb similarity index 100% rename from src/onedb/2.9.85_to_2.9.90.rb rename to src/onedb/shared/2.9.85_to_2.9.90.rb diff --git a/src/onedb/2.9.90_to_3.0.0.rb b/src/onedb/shared/2.9.90_to_3.0.0.rb similarity index 100% rename from src/onedb/2.9.90_to_3.0.0.rb rename to src/onedb/shared/2.9.90_to_3.0.0.rb diff --git a/src/onedb/3.0.0_to_3.1.0.rb b/src/onedb/shared/3.0.0_to_3.1.0.rb similarity index 100% rename from src/onedb/3.0.0_to_3.1.0.rb rename to src/onedb/shared/3.0.0_to_3.1.0.rb diff --git a/src/onedb/3.1.0_to_3.1.80.rb b/src/onedb/shared/3.1.0_to_3.1.80.rb similarity index 100% rename from src/onedb/3.1.0_to_3.1.80.rb rename to src/onedb/shared/3.1.0_to_3.1.80.rb diff --git a/src/onedb/3.1.80_to_3.2.0.rb b/src/onedb/shared/3.1.80_to_3.2.0.rb similarity index 100% rename from src/onedb/3.1.80_to_3.2.0.rb rename to src/onedb/shared/3.1.80_to_3.2.0.rb diff --git a/src/onedb/3.2.0_to_3.2.1.rb b/src/onedb/shared/3.2.0_to_3.2.1.rb similarity index 100% rename from src/onedb/3.2.0_to_3.2.1.rb rename to src/onedb/shared/3.2.0_to_3.2.1.rb diff --git a/src/onedb/3.2.1_to_3.3.0.rb b/src/onedb/shared/3.2.1_to_3.3.0.rb similarity index 100% rename from src/onedb/3.2.1_to_3.3.0.rb rename to src/onedb/shared/3.2.1_to_3.3.0.rb diff --git a/src/onedb/3.3.0_to_3.3.80.rb b/src/onedb/shared/3.3.0_to_3.3.80.rb similarity index 100% rename from src/onedb/3.3.0_to_3.3.80.rb rename to src/onedb/shared/3.3.0_to_3.3.80.rb diff --git a/src/onedb/3.3.80_to_3.4.0.rb b/src/onedb/shared/3.3.80_to_3.4.0.rb similarity index 100% rename from src/onedb/3.3.80_to_3.4.0.rb rename to src/onedb/shared/3.3.80_to_3.4.0.rb diff --git a/src/onedb/3.4.0_to_3.4.1.rb b/src/onedb/shared/3.4.0_to_3.4.1.rb similarity index 100% rename from src/onedb/3.4.0_to_3.4.1.rb rename to src/onedb/shared/3.4.0_to_3.4.1.rb diff --git a/src/onedb/3.4.1_to_3.5.80.rb b/src/onedb/shared/3.4.1_to_3.5.80.rb similarity index 100% rename from src/onedb/3.4.1_to_3.5.80.rb rename to src/onedb/shared/3.4.1_to_3.5.80.rb diff --git a/src/onedb/3.5.80_to_3.6.0.rb b/src/onedb/shared/3.5.80_to_3.6.0.rb similarity index 100% rename from src/onedb/3.5.80_to_3.6.0.rb rename to src/onedb/shared/3.5.80_to_3.6.0.rb diff --git a/src/onedb/3.6.0_to_3.7.80.rb b/src/onedb/shared/3.6.0_to_3.7.80.rb similarity index 100% rename from src/onedb/3.6.0_to_3.7.80.rb rename to src/onedb/shared/3.6.0_to_3.7.80.rb diff --git a/src/onedb/3.7.80_to_3.8.0.rb b/src/onedb/shared/3.7.80_to_3.8.0.rb similarity index 100% rename from src/onedb/3.7.80_to_3.8.0.rb rename to src/onedb/shared/3.7.80_to_3.8.0.rb diff --git a/src/onedb/3.8.0_to_3.8.1.rb b/src/onedb/shared/3.8.0_to_3.8.1.rb similarity index 68% rename from src/onedb/3.8.0_to_3.8.1.rb rename to src/onedb/shared/3.8.0_to_3.8.1.rb index 293c537a60..371b47559d 100644 --- a/src/onedb/3.8.0_to_3.8.1.rb +++ b/src/onedb/shared/3.8.0_to_3.8.1.rb @@ -15,6 +15,7 @@ #--------------------------------------------------------------------------- # require 'set' +require 'nokogiri' require "rexml/document" include REXML @@ -29,6 +30,8 @@ module Migrator def up + init_log_time() + ######################################################################## # Bug : Add VM IDs Collection to Hosts & Images ######################################################################## @@ -49,17 +52,17 @@ module Migrator counters[:image][row[:oid]] = Set.new end + log_time() # Aggregate information of the RUNNING vms @db.fetch("SELECT oid,body FROM vm_pool WHERE state<>6") do |row| - vm_doc = Document.new(row[:body]) - - state = vm_doc.root.get_text('STATE').to_s.to_i - lcm_state = vm_doc.root.get_text('LCM_STATE').to_s.to_i + vm_doc = Nokogiri::XML(row[:body]) + state = vm_doc.root.at_xpath('STATE').text.to_i + lcm_state = vm_doc.root.at_xpath('LCM_STATE').text.to_i # Images used by this VM - vm_doc.root.each_element("TEMPLATE/DISK/IMAGE_ID") do |e| + vm_doc.root.xpath("TEMPLATE/DISK/IMAGE_ID").each do |e| img_id = e.text.to_i if counters[:image][img_id].nil? @@ -78,7 +81,7 @@ module Migrator # Get hostid hid = -1 - vm_doc.root.each_element("HISTORY_RECORDS/HISTORY[last()]/HID") { |e| + vm_doc.root.xpath("HISTORY_RECORDS/HISTORY[last()]/HID").each { |e| hid = e.text.to_i } @@ -89,6 +92,8 @@ module Migrator end end + log_time() + ######################################################################## # Hosts # @@ -103,38 +108,41 @@ module Migrator "UNIQUE(name));" # Calculate the host's xml and write them to host_pool_new - @db[:host_pool].each do |row| - host_doc = Document.new(row[:body]) + @db.transaction do + @db[:host_pool].each do |row| + host_doc = Document.new(row[:body]) - hid = row[:oid] + hid = row[:oid] - rvms = counters[:host][hid][:rvms].size + rvms = counters[:host][hid][:rvms].size - # rewrite running_vms - host_doc.root.each_element("HOST_SHARE/RUNNING_VMS") {|e| - if e.text != rvms.to_s - warn("Host #{hid} RUNNING_VMS has #{e.text} \tis\t#{rvms}") - e.text = rvms + # rewrite running_vms + host_doc.root.each_element("HOST_SHARE/RUNNING_VMS") {|e| + if e.text != rvms.to_s + warn("Host #{hid} RUNNING_VMS has #{e.text} \tis\t#{rvms}") + e.text = rvms + end + } + + # re-do list of VM IDs + vms_new_elem = host_doc.root.add_element("VMS") + + counters[:host][hid][:rvms].each do |id| + vms_new_elem.add_element("ID").text = id.to_s end - } - # re-do list of VM IDs - vms_new_elem = host_doc.root.add_element("VMS") + row[:body] = host_doc.to_s - counters[:host][hid][:rvms].each do |id| - vms_new_elem.add_element("ID").text = id.to_s + # commit + @db[:host_pool_new].insert(row) end - - row[:body] = host_doc.to_s - - # commit - @db[:host_pool_new].insert(row) end # Rename table @db.run("DROP TABLE host_pool") @db.run("ALTER TABLE host_pool_new RENAME TO host_pool") + log_time() ######################################################################## # Image @@ -146,39 +154,42 @@ module Migrator # Create a new empty table where we will store the new calculated values @db.run "CREATE TABLE image_pool_new (oid INTEGER PRIMARY KEY, name VARCHAR(128), body TEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name,uid) );" - # Calculate the host's xml and write them to host_pool_new - @db[:image_pool].each do |row| - doc = Document.new(row[:body]) + @db.transaction do + @db[:image_pool].each do |row| + doc = Document.new(row[:body]) - oid = row[:oid] + oid = row[:oid] - rvms = counters[:image][oid].size + rvms = counters[:image][oid].size - # rewrite running_vms - doc.root.each_element("RUNNING_VMS") {|e| - if e.text != rvms.to_s - warn("Image #{oid} RUNNING_VMS has #{e.text} \tis\t#{rvms}") - e.text = rvms + # rewrite running_vms + doc.root.each_element("RUNNING_VMS") {|e| + if e.text != rvms.to_s + warn("Image #{oid} RUNNING_VMS has #{e.text} \tis\t#{rvms}") + e.text = rvms + end + } + + # re-do list of VM IDs + vms_new_elem = doc.root.add_element("VMS") + + counters[:image][oid].each do |id| + vms_new_elem.add_element("ID").text = id.to_s end - } - # re-do list of VM IDs - vms_new_elem = doc.root.add_element("VMS") + row[:body] = doc.to_s - counters[:image][oid].each do |id| - vms_new_elem.add_element("ID").text = id.to_s + # commit + @db[:image_pool_new].insert(row) end - - row[:body] = doc.to_s - - # commit - @db[:image_pool_new].insert(row) end # Rename table @db.run("DROP TABLE image_pool") @db.run("ALTER TABLE image_pool_new RENAME TO image_pool") + log_time() + return true end end diff --git a/src/onedb/3.8.1_to_3.8.2.rb b/src/onedb/shared/3.8.1_to_3.8.2.rb similarity index 100% rename from src/onedb/3.8.1_to_3.8.2.rb rename to src/onedb/shared/3.8.1_to_3.8.2.rb diff --git a/src/onedb/3.8.2_to_3.8.3.rb b/src/onedb/shared/3.8.2_to_3.8.3.rb similarity index 100% rename from src/onedb/3.8.2_to_3.8.3.rb rename to src/onedb/shared/3.8.2_to_3.8.3.rb diff --git a/src/onedb/3.8.3_to_3.8.4.rb b/src/onedb/shared/3.8.3_to_3.8.4.rb similarity index 100% rename from src/onedb/3.8.3_to_3.8.4.rb rename to src/onedb/shared/3.8.3_to_3.8.4.rb diff --git a/src/onedb/3.8.4_to_3.8.5.rb b/src/onedb/shared/3.8.4_to_3.8.5.rb similarity index 100% rename from src/onedb/3.8.4_to_3.8.5.rb rename to src/onedb/shared/3.8.4_to_3.8.5.rb diff --git a/src/onedb/shared/3.8.5_to_3.9.80.rb b/src/onedb/shared/3.8.5_to_3.9.80.rb new file mode 100644 index 0000000000..6cb2b5ca8b --- /dev/null +++ b/src/onedb/shared/3.8.5_to_3.9.80.rb @@ -0,0 +1,654 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +require 'set' +require "rexml/document" +include REXML + +require 'nokogiri' + +class String + def red + colorize(31) + end + +private + + def colorize(color_code) + "\e[#{color_code}m#{self}\e[0m" + end +end + +module Migrator + def db_version + "3.9.80" + end + + def one_version + "OpenNebula 3.9.80" + end + + def up + + init_log_time() + + ######################################################################## + # Add Cloning Image ID collection to Images + ######################################################################## + + counters = {} + counters[:image] = {} + + # Init image counters + @db.fetch("SELECT oid,body FROM image_pool") do |row| + if counters[:image][row[:oid]].nil? + counters[:image][row[:oid]] = { + :clones => Set.new + } + end + + doc = Document.new(row[:body]) + + doc.root.each_element("CLONING_ID") do |e| + img_id = e.text.to_i + + if counters[:image][img_id].nil? + counters[:image][img_id] = { + :clones => Set.new + } + end + + counters[:image][img_id][:clones].add(row[:oid]) + end + end + + log_time() + + ######################################################################## + # Image + # + # IMAGE/CLONING_OPS + # IMAGE/CLONES/ID + ######################################################################## + + @db.run "CREATE TABLE image_pool_new (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name,uid) );" + + @db.transaction do + @db[:image_pool].each do |row| + doc = Document.new(row[:body]) + + oid = row[:oid] + + n_cloning_ops = counters[:image][oid][:clones].size + + # Rewrite number of clones + doc.root.each_element("CLONING_OPS") { |e| + if e.text != n_cloning_ops.to_s + warn("Image #{oid} CLONING_OPS has #{e.text} \tis\t#{n_cloning_ops}") + e.text = n_cloning_ops + end + } + + # re-do list of Images cloning this one + clones_new_elem = doc.root.add_element("CLONES") + + counters[:image][oid][:clones].each do |id| + clones_new_elem.add_element("ID").text = id.to_s + end + + row[:body] = doc.to_s + + # commit + @db[:image_pool_new].insert(row) + end + end + + # Rename table + @db.run("DROP TABLE image_pool") + @db.run("ALTER TABLE image_pool_new RENAME TO image_pool") + + log_time() + + ######################################################################## + # Feature #1565: New cid column in host, ds and vnet tables + ######################################################################## + + @db.run "ALTER TABLE host_pool RENAME TO old_host_pool;" + @db.run "CREATE TABLE host_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, state INTEGER, last_mon_time INTEGER, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));" + + @db.transaction do + @db.fetch("SELECT * FROM old_host_pool") do |row| + doc = Document.new(row[:body]) + + cluster_id = doc.root.get_text('CLUSTER_ID').to_s + + @db[:host_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => row[:body], + :state => row[:state], + :last_mon_time => row[:last_mon_time], + :uid => row[:uid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u], + :cid => cluster_id) + end + end + + @db.run "DROP TABLE old_host_pool;" + + log_time() + + ######################################################################## + # Feature #1565: New cid column + # Feature #471: IPv6 addresses + ######################################################################## + + @db.run "ALTER TABLE network_pool RENAME TO old_network_pool;" + @db.run "CREATE TABLE network_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name,uid));" + + @db.transaction do + @db.fetch("SELECT * FROM old_network_pool") do |row| + doc = Document.new(row[:body]) + + cluster_id = doc.root.get_text('CLUSTER_ID').to_s + + doc.root.add_element("GLOBAL_PREFIX") + doc.root.add_element("SITE_PREFIX") + + @db[:network_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:uid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u], + :cid => cluster_id) + end + end + + @db.run "DROP TABLE old_network_pool;" + + log_time() + + ######################################################################## + # Feature #1617 + # New datastore, 2 "files" + # DATASTORE/SYSTEM is now DATASTORE/TYPE + # + # Feature #1565: New cid column in host, ds and vnet tables + ######################################################################## + + @db.run "ALTER TABLE datastore_pool RENAME TO old_datastore_pool;" + @db.run "CREATE TABLE datastore_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));" + + @db.transaction do + @db.fetch("SELECT * FROM old_datastore_pool") do |row| + doc = Document.new(row[:body]) + + type = "0" # IMAGE_DS + + system_elem = doc.root.delete_element("SYSTEM") + + if ( !system_elem.nil? && system_elem.text == "1" ) + type = "1" # SYSTEM_DS + end + + doc.root.add_element("TYPE").text = type + + doc.root.each_element("TEMPLATE") do |e| + e.delete_element("SYSTEM") + e.add_element("TYPE").text = type == "0" ? "IMAGE_DS" : "SYSTEM_DS" + end + + cluster_id = doc.root.get_text('CLUSTER_ID').to_s + + @db[:datastore_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:uid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u], + :cid => cluster_id) + end + end + + @db.run "DROP TABLE old_datastore_pool;" + + log_time() + + user_0_name = "oneadmin" + + @db.fetch("SELECT name FROM user_pool WHERE oid=0") do |row| + user_0_name = row[:name] + end + + group_0_name = "oneadmin" + + @db.fetch("SELECT name FROM group_pool WHERE oid=0") do |row| + group_0_name = row[:name] + end + + base_path = "/var/lib/one/datastores/2" + + @db.fetch("SELECT body FROM datastore_pool WHERE oid=0") do |row| + doc = Document.new(row[:body]) + + doc.root.each_element("BASE_PATH") do |e| + base_path = e.text + base_path[-1] = "2" + end + end + + @db.run "INSERT INTO datastore_pool VALUES(2,'files','200#{user_0_name}#{group_0_name}files110100100fsssh#{base_path}20-1',0,0,1,1,1,-1);" + + log_time() + + ######################################################################## + # Feature #1611: Default quotas + ######################################################################## + + @db.run("CREATE TABLE IF NOT EXISTS system_attributes (name VARCHAR(128) PRIMARY KEY, body MEDIUMTEXT)") + @db.run("INSERT INTO system_attributes VALUES('DEFAULT_GROUP_QUOTAS','');") + @db.run("INSERT INTO system_attributes VALUES('DEFAULT_USER_QUOTAS','');") + + + @db.run "ALTER TABLE user_pool RENAME TO old_user_pool;" + @db.run "CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" + + @db.transaction do + # oneadmin does not have quotas + @db.fetch("SELECT * FROM old_user_pool WHERE oid=0") do |row| + @db[:user_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => row[:body], + :uid => row[:oid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end + + @db.fetch("SELECT * FROM old_user_pool WHERE oid>0") do |row| + doc = Nokogiri::XML(row[:body]) + + set_default_quotas(doc) + + @db[:user_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:oid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end + end + + @db.run "DROP TABLE old_user_pool;" + + log_time() + + @db.run "ALTER TABLE group_pool RENAME TO old_group_pool;" + @db.run "CREATE TABLE group_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" + + @db.transaction do + # oneadmin group does not have quotas + @db.fetch("SELECT * FROM old_group_pool WHERE oid=0") do |row| + @db[:group_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => row[:body], + :uid => row[:oid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end + + @db.fetch("SELECT * FROM old_group_pool WHERE oid>0") do |row| + doc = Nokogiri::XML(row[:body]) + + set_default_quotas(doc) + + @db[:group_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:oid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end + end + + @db.run "DROP TABLE old_group_pool;" + + log_time() + + ######################################################################## + # Bug #1694: SYSTEM_DS is now set with the method adddatastore + ######################################################################## + + @db.run "ALTER TABLE cluster_pool RENAME TO old_cluster_pool;" + @db.run "CREATE TABLE cluster_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" + + @db.transaction do + @db.fetch("SELECT * FROM old_cluster_pool") do |row| + doc = Document.new(row[:body]) + + system_ds = 0 + + doc.root.each_element("TEMPLATE") do |e| + elem = e.delete_element("SYSTEM_DS") + + if !elem.nil? + system_ds = elem.text.to_i + end + end + + if system_ds != 0 + updated_body = nil + + @db.fetch("SELECT body FROM datastore_pool WHERE oid=#{system_ds}") do |ds_row| + ds_doc = Document.new(ds_row[:body]) + + type = "0" # IMAGE_DS + + ds_doc.root.each_element("TYPE") do |e| + type = e.text + end + + if type != "1" + puts " > Cluster #{row[:oid]} has the "<< + "System Datastore set to Datastore #{system_ds}, "<< + "but its type is not SYSTEM_DS. The System Datastore "<< + "for this Cluster will be set to 0" + + system_ds = 0 + else + cluster_id = "-1" + + ds_doc.root.each_element("CLUSTER_ID") do |e| + cluster_id = e.text + end + + if row[:oid] != cluster_id.to_i + puts " > Cluster #{row[:oid]} has the "<< + "System Datastore set to Datastore #{system_ds}, "<< + "but it is not part of the Cluster. It will be added now." + + ds_doc.root.each_element("CLUSTER_ID") do |e| + e.text = row[:oid] + end + + ds_doc.root.each_element("CLUSTER") do |e| + e.text = row[:name] + end + + updated_body = ds_doc.root.to_s + end + end + end + + if !updated_body.nil? + @db[:datastore_pool].where(:oid => system_ds).update( + :body => updated_body) + end + end + + doc.root.add_element("SYSTEM_DS").text = system_ds.to_s + + @db[:cluster_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:uid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end + end + + @db.run "DROP TABLE old_cluster_pool;" + + log_time() + + ######################################################################## + # Feature #1556: New elem USER_TEMPLATE + # + # Feature #1483: Move scheduling attributes + # /VM/TEMPLATE/REQUIREMENTS -> USER_TEMPLATE/SCHED_REQUIREMENTS + # /VM/TEMPLATE/RANK -> USER_TEMPLATE/SCHED_RANK + ######################################################################## + + @db.run "ALTER TABLE vm_pool RENAME TO old_vm_pool;" + @db.run "CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, last_poll INTEGER, state INTEGER, lcm_state INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);" + + @db.transaction do + @db.fetch("SELECT * FROM old_vm_pool") do |row| + + doc = Nokogiri::XML(row[:body]) + user_template = doc.create_element("USER_TEMPLATE") + + e = doc.root.at_xpath("TEMPLATE") + elem = e.at_xpath("REQUIREMENTS") + + if !elem.nil? + new_elem = doc.create_element("SCHED_REQUIREMENTS") + new_elem.content = elem.text + elem.remove + + user_template.add_child(new_elem) + end + + elem = e.at_xpath("RANK") + + if !elem.nil? + new_elem = doc.create_element("SCHED_RANK") + new_elem.content = elem.text + elem.remove + + user_template.add_child(new_elem) + end + + doc.root << user_template + + @db[:vm_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:uid], + :gid => row[:gid], + :last_poll => row[:last_poll], + :state => row[:state], + :lcm_state => row[:lcm_state], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end + end + + @db.run "DROP TABLE old_vm_pool;" + + log_time() + + ######################################################################## + # Feature #1483: Move scheduling attributes + # /VMTEMPLATE/TEMPLATE/REQUIREMENTS -> /VMTEMPLATE/TEMPLATE/SCHED_REQUIREMENTS + # /VMTEMPLATE/TEMPLATE/RANK -> /VMTEMPLATE/TEMPLATE/SCHED_RANK + ######################################################################## + + @db.run "ALTER TABLE template_pool RENAME TO old_template_pool;" + @db.run "CREATE TABLE template_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);" + + @db.transaction do + @db.fetch("SELECT * FROM old_template_pool") do |row| + + doc = Nokogiri::XML(row[:body]) + + template = doc.root.at_xpath("TEMPLATE") + + elem = template.at_xpath("REQUIREMENTS") + + if !elem.nil? + new_elem = doc.create_element("SCHED_REQUIREMENTS") + new_elem.content = elem.text + elem.remove + + template.add_child(new_elem) + end + + elem = template.at_xpath("RANK") + + if !elem.nil? + new_elem = doc.create_element("SCHED_RANK") + new_elem.content = elem.text + elem.remove + + template.add_child(new_elem) + end + + @db[:template_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:uid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end + end + + @db.run "DROP TABLE old_template_pool;" + + log_time() + + ######################################################################## + # Feature #1691 Add new attribute NIC/NIC_ID + ######################################################################## + + @db.run "ALTER TABLE vm_pool RENAME TO old_vm_pool;" + @db.run "CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, last_poll INTEGER, state INTEGER, lcm_state INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);" + + @db.transaction do + @db.fetch("SELECT * FROM old_vm_pool") do |row| + if ( row[:state] != 6 ) # DONE + doc = Nokogiri::XML(row[:body]) + + nic_id = 0 + + doc.root.xpath("TEMPLATE/NIC").each { |e| + e.xpath("NIC_ID").each {|n| n.remove} + e.add_child(doc.create_element("NIC_ID")).content = + (nic_id).to_s + + nic_id += 1 + } + + row[:body] = doc.root.to_s + end + + @db[:vm_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => row[:body], + :uid => row[:uid], + :gid => row[:gid], + :last_poll => row[:last_poll], + :state => row[:state], + :lcm_state => row[:lcm_state], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end + end + + @db.run "DROP TABLE old_vm_pool;" + + log_time() + + ######################################################################## + # + # Banner for the new /var/lib/one/vms directory + # + ######################################################################## + + puts + puts "ATTENTION: manual intervention required".red + puts <<-END.gsub(/^ {8}/, '') + Virtual Machine deployment files have been moved from /var/lib/one to + /var/lib/one/vms. You need to move these files manually: + + $ mv /var/lib/one/[0-9]* /var/lib/one/vms + + END + + return true + end + + + def set_default_quotas(doc) + + # VM quotas + + doc.root.xpath("VM_QUOTA/VM/CPU").each do |e| + e.content = "-1" if e.text.to_f == 0 + end + + doc.root.xpath("VM_QUOTA/VM/MEMORY").each do |e| + e.content = "-1" if e.text.to_i == 0 + end + + doc.root.xpath("VM_QUOTA/VM/VMS").each do |e| + e.content = "-1" if e.text.to_i == 0 + end + + # VNet quotas + + doc.root.xpath("NETWORK_QUOTA/NETWORK/LEASES").each do |e| + e.content = "-1" if e.text.to_i == 0 + end + + # Image quotas + + doc.root.xpath("IMAGE_QUOTA/IMAGE/RVMS").each do |e| + e.content = "-1" if e.text.to_i == 0 + end + + # Datastore quotas + + doc.root.xpath("DATASTORE_QUOTA/DATASTORE/IMAGES").each do |e| + e.content = "-1" if e.text.to_i == 0 + end + + doc.root.xpath("DATASTORE_QUOTA/DATASTORE/SIZE").each do |e| + e.content = "-1" if e.text.to_i == 0 + end + end +end diff --git a/src/onedb/3.9.80_to_3.9.90.rb b/src/onedb/shared/3.9.80_to_3.9.90.rb similarity index 69% rename from src/onedb/3.9.80_to_3.9.90.rb rename to src/onedb/shared/3.9.80_to_3.9.90.rb index 2c11a48328..e51f471b7a 100644 --- a/src/onedb/3.9.80_to_3.9.90.rb +++ b/src/onedb/shared/3.9.80_to_3.9.90.rb @@ -14,8 +14,7 @@ # limitations under the License. # #--------------------------------------------------------------------------- # -require "rexml/document" -include REXML +require "nokogiri" class String def red @@ -39,6 +38,7 @@ module Migrator end def up + init_log_time() ######################################################################## # Feature #1631: Add ACTION to history entries @@ -47,49 +47,57 @@ module Migrator @db.run "ALTER TABLE vm_pool RENAME TO old_vm_pool;" @db.run "CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, last_poll INTEGER, state INTEGER, lcm_state INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);" - @db.fetch("SELECT * FROM old_vm_pool") do |row| - doc = Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT * FROM old_vm_pool") do |row| + doc = Nokogiri::XML(row[:body]) - doc.root.each_element("HISTORY_RECORDS/HISTORY") do |e| - update_history(e) + doc.root.xpath("HISTORY_RECORDS/HISTORY").each do |e| + update_history(e) + end + + @db[:vm_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:uid], + :gid => row[:gid], + :last_poll => row[:last_poll], + :state => row[:state], + :lcm_state => row[:lcm_state], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) end - - @db[:vm_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:uid], - :gid => row[:gid], - :last_poll => row[:last_poll], - :state => row[:state], - :lcm_state => row[:lcm_state], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) end @db.run "DROP TABLE old_vm_pool;" + log_time() + @db.run "ALTER TABLE history RENAME TO old_history;" @db.run "CREATE TABLE history (vid INTEGER, seq INTEGER, body MEDIUMTEXT, stime INTEGER, etime INTEGER,PRIMARY KEY(vid,seq));" - @db.fetch("SELECT * FROM old_history") do |row| - doc = Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT * FROM old_history") do |row| + doc = Nokogiri::XML(row[:body]) - doc.root.each_element("/HISTORY") do |e| - update_history(e) + doc.root.xpath("/HISTORY").each do |e| + update_history(e) + end + + @db[:history].insert( + :vid => row[:vid], + :seq => row[:seq], + :body => doc.root.to_s, + :stime => row[:stime], + :etime => row[:etime]) end - - @db[:history].insert( - :vid => row[:vid], - :seq => row[:seq], - :body => doc.root.to_s, - :stime => row[:stime], - :etime => row[:etime]) end @db.run "DROP TABLE old_history;" + log_time() + ######################################################################## # Banner for drivers renamed ######################################################################## @@ -135,16 +143,18 @@ module Migrator end def update_history(history_elem) - history_elem.add_element("ACTION").text = "0" # NONE_ACTION + # NONE_ACTION + history_elem.add_child( + history_elem.document.create_element("ACTION")).content = "0" # History reason enum has changed from # NONE, ERROR, STOP_RESUME, USER, CANCEL to # NONE, ERROR, USER - history_elem.each_element("REASON") do |reason_e| + history_elem.xpath("REASON").each do |reason_e| reason = reason_e.text.to_i - if reason > 1 # STOP_RESUME, USER, CANCEL - reason_e.text = "2" # USER + if reason > 1 # STOP_RESUME, USER, CANCEL + reason_e.content = "2" # USER end end end diff --git a/src/onedb/3.9.90_to_4.0.0.rb b/src/onedb/shared/3.9.90_to_4.0.0.rb similarity index 100% rename from src/onedb/3.9.90_to_4.0.0.rb rename to src/onedb/shared/3.9.90_to_4.0.0.rb diff --git a/src/onedb/4.0.0_to_4.0.1.rb b/src/onedb/shared/4.0.0_to_4.0.1.rb similarity index 100% rename from src/onedb/4.0.0_to_4.0.1.rb rename to src/onedb/shared/4.0.0_to_4.0.1.rb diff --git a/src/onedb/4.0.1_to_4.1.80.rb b/src/onedb/shared/4.0.1_to_4.1.80.rb similarity index 62% rename from src/onedb/4.0.1_to_4.1.80.rb rename to src/onedb/shared/4.0.1_to_4.1.80.rb index f44fe554cf..965d93ef92 100644 --- a/src/onedb/4.0.1_to_4.1.80.rb +++ b/src/onedb/shared/4.0.1_to_4.1.80.rb @@ -15,9 +15,9 @@ #--------------------------------------------------------------------------- # require 'fileutils' -require 'rexml/document' require 'openssl' +require "nokogiri" module Migrator def db_version @@ -42,30 +42,35 @@ module Migrator puts "Please copy the files manually." end + init_log_time() + @db.run "ALTER TABLE user_pool RENAME TO old_user_pool;" @db.run "CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" - @db.fetch("SELECT * FROM old_user_pool") do |row| - doc = REXML::Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT * FROM old_user_pool") do |row| + doc = Nokogiri::XML(row[:body]) - doc.root.each_element("TEMPLATE") do |e| - e.add_element("TOKEN_PASSWORD").text = - OpenSSL::Digest::SHA1.hexdigest( rand().to_s ) + doc.root.at_xpath("TEMPLATE") + .add_child(doc.create_element("TOKEN_PASSWORD")) + .content = OpenSSL::Digest::SHA1.hexdigest( rand().to_s ) + + @db[:user_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:oid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) end - - @db[:user_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:oid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) end @db.run "DROP TABLE old_user_pool;" + log_time() + ######################################################################## # Feature #1613 ######################################################################## @@ -73,27 +78,31 @@ module Migrator @db.run "ALTER TABLE datastore_pool RENAME TO old_datastore_pool;" @db.run "CREATE TABLE datastore_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));" - @db.fetch("SELECT * FROM old_datastore_pool") do |row| - doc = REXML::Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT * FROM old_datastore_pool") do |row| + doc = Nokogiri::XML(row[:body]) - doc.root.add_element("TOTAL_MB").text = "0" - doc.root.add_element("FREE_MB").text = "0" - doc.root.add_element("USED_MB").text = "0" + doc.root.add_child(doc.create_element("TOTAL_MB")).content = "0" + doc.root.add_child(doc.create_element("FREE_MB")).content = "0" + doc.root.add_child(doc.create_element("USED_MB")).content = "0" - @db[:datastore_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:uid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u], - :cid => row[:cid]) + @db[:datastore_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:uid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u], + :cid => row[:cid]) + end end @db.run "DROP TABLE old_datastore_pool;" + log_time() + return true end end diff --git a/src/onedb/4.1.80_to_4.2.0.rb b/src/onedb/shared/4.1.80_to_4.2.0.rb similarity index 100% rename from src/onedb/4.1.80_to_4.2.0.rb rename to src/onedb/shared/4.1.80_to_4.2.0.rb diff --git a/src/onedb/shared/4.2.0_to_4.3.80.rb b/src/onedb/shared/4.2.0_to_4.3.80.rb new file mode 100644 index 0000000000..be408b512c --- /dev/null +++ b/src/onedb/shared/4.2.0_to_4.3.80.rb @@ -0,0 +1,434 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +require 'rexml/document' +require 'nokogiri' + +TM_MAD_CONF = { + "dummy" => { + :ln_target => "NONE", + :clone_target => "SYSTEM" + }, + "lvm" => { + :ln_target => "NONE", + :clone_target => "SELF" + }, + "shared" => { + :ln_target => "NONE", + :clone_target => "SYSTEM" + }, + "shared_lvm" => { + :ln_target => "SYSTEM", + :clone_target => "SYSTEM" + }, + "qcow2" => { + :ln_target => "NONE", + :clone_target => "SYSTEM" + }, + "ssh" => { + :ln_target => "SYSTEM", + :clone_target => "SYSTEM" + }, + "vmfs" => { + :ln_target => "NONE", + :clone_target => "SYSTEM" + }, + "iscsi" => { + :ln_target => "NONE", + :clone_target => "SELF" + }, + "ceph" => { + :ln_target => "NONE", + :clone_target => "SELF" + } +} + +class String + def red + colorize(31) + end + +private + + def colorize(color_code) + "\e[#{color_code}m#{self}\e[0m" + end +end + +module Migrator + def db_version + "4.3.80" + end + + def one_version + "OpenNebula 4.3.80" + end + + def up + + init_log_time() + + ######################################################################## + # Feature #1742 & #1612 + ######################################################################## + + @db.run "ALTER TABLE user_pool RENAME TO old_user_pool;" + @db.run "CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" + + @db.transaction do + @db.fetch("SELECT * FROM old_user_pool") do |row| + doc = Nokogiri::XML(row[:body]) + + g_elem = doc.create_element("GROUPS") + g_elem.add_child(doc.create_element("ID")).content = row[:gid].to_s + + doc.root.add_child(g_elem) + + # oneadmin does not have quotas + if row[:oid] != 0 + redo_vm_quotas(doc, "uid=#{row[:oid]}") + end + + @db[:user_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:oid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end + end + + @db.run "DROP TABLE old_user_pool;" + + log_time() + + ######################################################################## + # Feature #1612 + ######################################################################## + + @db.run "ALTER TABLE group_pool RENAME TO old_group_pool;" + @db.run "CREATE TABLE group_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" + + @db.transaction do + # oneadmin group does not have quotas + @db.fetch("SELECT * FROM old_group_pool WHERE oid=0") do |row| + @db[:group_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => row[:body], + :uid => row[:oid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end + + @db.fetch("SELECT * FROM old_group_pool WHERE oid>0") do |row| + doc = Nokogiri::XML(row[:body]) + + redo_vm_quotas(doc, "gid=#{row[:oid]}") + + @db[:group_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:oid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end + end + + @db.run "DROP TABLE old_group_pool;" + + log_time() + + ######################################################################## + # Bug #2330 & Feature #1678 + ######################################################################## + + @db.run "ALTER TABLE datastore_pool RENAME TO old_datastore_pool;" + @db.run "CREATE TABLE datastore_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));" + + #tm_mads = {} + @db.transaction do + @db.fetch("SELECT * FROM old_datastore_pool") do |row| + doc = REXML::Document.new(row[:body]) + + doc.root.each_element("TEMPLATE/HOST") do |e| + e.name = "BRIDGE_LIST" + end + + tm_mad = "" + doc.root.each_element("TM_MAD"){ |e| tm_mad = e.text } + + type = 0 + doc.root.each_element("TYPE"){ |e| type = e.text.to_i } + + if (type == 1) # System DS + doc.root.each_element("TEMPLATE") do |e| + e.add_element("SHARED").text = + (tm_mad == "ssh" ? "NO" : "YES") + end + else + #tm_mads[row[:oid].to_i] = tm_mad + + conf = TM_MAD_CONF[tm_mad] + + if conf.nil? + puts + puts "ATTENTION: manual intervention required".red + puts <<-END +The Datastore ##{row[:oid]} #{row[:name]} is using the +custom TM MAD '#{tm_mad}'. You will need to define new +configuration parameters in oned.conf for this driver, see +http://opennebula.org/documentation:rel4.4:upgrade + END + else + doc.root.each_element("TEMPLATE") do |e| + e.add_element("LN_TARGET").text = conf[:ln_target] + e.add_element("CLONE_TARGET").text = conf[:clone_target] + end + end + end + + @db[:datastore_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:uid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u], + :cid => row[:cid]) + end + end + + @db.run "DROP TABLE old_datastore_pool;" + + log_time() + + ######################################################################## + # Feature #2392 + ######################################################################## + + @db.run "ALTER TABLE vm_pool RENAME TO old_vm_pool;" + @db.run "CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, last_poll INTEGER, state INTEGER, lcm_state INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);" + + @db.transaction do + @db.fetch("SELECT * FROM old_vm_pool") do |row| + doc = Nokogiri::XML(row[:body]) + + doc.root.xpath("HISTORY_RECORDS/HISTORY").each do |e| + update_history(e) + end + + @db[:vm_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:uid], + :gid => row[:gid], + :last_poll => row[:last_poll], + :state => row[:state], + :lcm_state => row[:lcm_state], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end + end + + @db.run "DROP TABLE old_vm_pool;" + + log_time() + + @db.run "ALTER TABLE history RENAME TO old_history;" + @db.run "CREATE TABLE history (vid INTEGER, seq INTEGER, body MEDIUMTEXT, stime INTEGER, etime INTEGER,PRIMARY KEY(vid,seq));" + + @db.transaction do + @db.fetch("SELECT * FROM old_history") do |row| + doc = Nokogiri::XML(row[:body]) + + doc.root.xpath("/HISTORY").each do |e| + update_history(e) + end + + @db[:history].insert( + :vid => row[:vid], + :seq => row[:seq], + :body => doc.root.to_s, + :stime => row[:stime], + :etime => row[:etime]) + end + end + + @db.run "DROP TABLE old_history;" + + log_time() + + ######################################################################## + # Feature #1678 + ######################################################################## + + @db.run "ALTER TABLE host_pool RENAME TO old_host_pool;" + @db.run "CREATE TABLE host_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, state INTEGER, last_mon_time INTEGER, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));" + + @db.transaction do + @db.fetch("SELECT * FROM old_host_pool") do |row| + doc = Nokogiri::XML(row[:body]) + + doc.root.at_xpath("HOST_SHARE") + .add_child(doc.create_element("DATASTORES")) + + @db[:host_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :state => row[:state], + :last_mon_time => row[:last_mon_time], + :uid => row[:uid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u], + :cid => row[:cid]) + end + end + + @db.run "DROP TABLE old_host_pool;" + + log_time() + + # TODO: + # For Feature #1678, VMs have new disk elements: + # VM/DISK/CLONE_TARGET + # VM/DISK/LN_TARGET + # VM/DISK/SIZE + # + # These elements are only used to schedule new deployments, so if we + # don't add them it will only affect automatic deployment of VMs + # recreated (onevm delete --recreate). Manual deployments will still + # work without problems. + + return true + end + + def redo_vm_quotas(doc, where_filter) + cpu_limit = "-1" + mem_limit = "-1" + vms_limit = "-1" + vol_limit = "-1" + + doc.root.xpath("VM_QUOTA/VM/CPU").each { |e| + cpu_limit = e.text + } + + doc.root.xpath("VM_QUOTA/VM/MEMORY").each { |e| + mem_limit = e.text + } + + doc.root.xpath("VM_QUOTA/VM/VMS").each { |e| + vms_limit = e.text + } + + doc.root.xpath("VM_QUOTA").each { |e| + e.remove + } + + vm_quota = doc.root.add_child(doc.create_element("VM_QUOTA")) + + # VM quotas + cpu_used = 0 + mem_used = 0 + vms_used = 0 + vol_used = 0 + + @db.fetch("SELECT body FROM vm_pool WHERE #{where_filter} AND state<>6") do |vm_row| + vmdoc = Nokogiri::XML(vm_row[:body]) + + # VM quotas + vmdoc.root.xpath("TEMPLATE/CPU").each { |e| + cpu_used += e.text.to_f + } + + vmdoc.root.xpath("TEMPLATE/MEMORY").each { |e| + mem_used += e.text.to_i + } + + vmdoc.root.xpath("TEMPLATE/DISK").each { |e| + type = "" + + e.xpath("TYPE").each { |t_elem| + type = t_elem.text.upcase + } + + if ( type == "SWAP" || type == "FS") + e.xpath("SIZE").each { |size_elem| + vol_used += size_elem.text.to_i + } + end + } + + vms_used += 1 + end + + if (vms_used != 0 || + cpu_limit != "-1" || mem_limit != "-1" || vms_limit != "-1" || vol_limit != "-1" ) + + # VM quotas + vm_elem = vm_quota.add_child(doc.create_element("VM")) + + vm_elem.add_child(doc.create_element("CPU")).content = cpu_limit + vm_elem.add_child(doc.create_element("CPU_USED")).content = sprintf('%.2f', cpu_used) + + vm_elem.add_child(doc.create_element("MEMORY")).content = mem_limit + vm_elem.add_child(doc.create_element("MEMORY_USED")).content = mem_used.to_s + + vm_elem.add_child(doc.create_element("VMS")).content = vms_limit + vm_elem.add_child(doc.create_element("VMS_USED")).content = vms_used.to_s + + vm_elem.add_child(doc.create_element("VOLATILE_SIZE")).content = vol_limit + vm_elem.add_child(doc.create_element("VOLATILE_SIZE_USED")).content = vol_used.to_s + end + end + + def update_history(history_elem) + hid = nil + + history_elem.xpath("HID").each do |e| + hid = e.text + end + + new_elem = history_elem.add_child( + history_elem.document.create_element("CID")) + + new_elem.content = "-1" # Cluster None + + if hid.nil? + return + end + + @db.fetch("SELECT cid FROM host_pool WHERE oid = #{hid}") do |row| + new_elem.content = row[:cid].to_s + end + end + +end diff --git a/src/onedb/4.3.80_to_4.3.85.rb b/src/onedb/shared/4.3.80_to_4.3.85.rb similarity index 100% rename from src/onedb/4.3.80_to_4.3.85.rb rename to src/onedb/shared/4.3.80_to_4.3.85.rb diff --git a/src/onedb/4.3.85_to_4.3.90.rb b/src/onedb/shared/4.3.85_to_4.3.90.rb similarity index 100% rename from src/onedb/4.3.85_to_4.3.90.rb rename to src/onedb/shared/4.3.85_to_4.3.90.rb diff --git a/src/onedb/4.3.90_to_4.4.0.rb b/src/onedb/shared/4.3.90_to_4.4.0.rb similarity index 100% rename from src/onedb/4.3.90_to_4.4.0.rb rename to src/onedb/shared/4.3.90_to_4.4.0.rb diff --git a/src/onedb/shared/4.4.0_to_4.4.1.rb b/src/onedb/shared/4.4.0_to_4.4.1.rb new file mode 100644 index 0000000000..781169753e --- /dev/null +++ b/src/onedb/shared/4.4.0_to_4.4.1.rb @@ -0,0 +1,29 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +module Migrator + def db_version + "4.4.1" + end + + def one_version + "OpenNebula 4.4.1" + end + + def up + return true + end +end diff --git a/src/onedb/shared/4.4.1_to_4.5.80.rb b/src/onedb/shared/4.4.1_to_4.5.80.rb new file mode 100644 index 0000000000..a6bd9be56a --- /dev/null +++ b/src/onedb/shared/4.4.1_to_4.5.80.rb @@ -0,0 +1,152 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +require 'nokogiri' + +module Migrator + def db_version + "4.5.80" + end + + def one_version + "OpenNebula 4.5.80" + end + + def up + + init_log_time() + + @db.run "ALTER TABLE acl RENAME TO old_acl;" + @db.run "CREATE TABLE acl (oid INT PRIMARY KEY, user BIGINT, resource BIGINT, rights BIGINT, zone BIGINT, UNIQUE(user, resource, rights, zone));" + + @db.transaction do + @db.fetch("SELECT * FROM old_acl") do |row| + @db[:acl].insert( + :oid => row[:oid], + :user => row[:user], + :resource => row[:resource], + :rights => row[:rights], + :zone => 4294967296) + end + end + + @db.run "DROP TABLE old_acl;" + + log_time() + + # Move USER/QUOTA to user_quotas table + + @db.run "ALTER TABLE user_pool RENAME TO old_user_pool;" + @db.run "CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" + + @db.run "CREATE TABLE user_quotas (user_oid INTEGER PRIMARY KEY, body MEDIUMTEXT);" + + @db.transaction do + @db.fetch("SELECT * FROM old_user_pool") do |row| + doc = Nokogiri::XML(row[:body]) + + quotas_doc = extract_quotas(doc) + + @db[:user_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:oid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + + @db[:user_quotas].insert( + :user_oid => row[:oid], + :body => quotas_doc.root.to_s) + end + end + + @db.run "DROP TABLE old_user_pool;" + + log_time() + + # GROUP/RESOURCE_PROVIDER is not needed + + # Move GROUP/QUOTA to group_quotas table + # Add GROUP/TEMPLATE + + @db.run "ALTER TABLE group_pool RENAME TO old_group_pool;" + @db.run "CREATE TABLE group_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" + + @db.run "CREATE TABLE group_quotas (group_oid INTEGER PRIMARY KEY, body MEDIUMTEXT);" + + @db.transaction do + @db.fetch("SELECT * FROM old_group_pool") do |row| + doc = Nokogiri::XML(row[:body]) + + quotas_doc = extract_quotas(doc) + + doc.root.add_child(doc.create_element("TEMPLATE")) + + @db[:group_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:oid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + + @db[:group_quotas].insert( + :group_oid => row[:oid], + :body => quotas_doc.root.to_s) + end + end + + @db.run "DROP TABLE old_group_pool;" + + log_time() + + # Default ZONE + @db.run "CREATE TABLE zone_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" + @db.run "INSERT INTO zone_pool VALUES(0,'OpenNebula','0OpenNebula',0,0,1,0,0);" + + @db.run "INSERT INTO pool_control VALUES('zone_pool',99);" + + # New versioning table + @db.run "CREATE TABLE local_db_versioning (oid INTEGER PRIMARY KEY, version VARCHAR(256), timestamp INTEGER, comment VARCHAR(256), is_slave BOOLEAN);" + @db.run "INSERT INTO local_db_versioning VALUES(0,'#{db_version()}',#{Time.now.to_i},'Database migrated from 4.4.1 to 4.5.80 (OpenNebula 4.5.80) by onedb command.',0);" + + return true + end + + def extract_quotas(doc) + ds_quota = doc.root.at_xpath("DATASTORE_QUOTA").remove + net_quota = doc.root.at_xpath("NETWORK_QUOTA").remove + vm_quota = doc.root.at_xpath("VM_QUOTA").remove + img_quota = doc.root.at_xpath("IMAGE_QUOTA").remove + + quotas_doc = Nokogiri::XML("") + + quotas_doc.root.add_child(quotas_doc.create_element("ID")) + .content = doc.root.at_xpath("ID").text + + quotas_doc.root.add_child(ds_quota) + quotas_doc.root.add_child(net_quota) + quotas_doc.root.add_child(vm_quota) + quotas_doc.root.add_child(img_quota) + + return quotas_doc + end +end diff --git a/src/rm/RequestManager.cc b/src/rm/RequestManager.cc index 54d57f33d1..83eb937f23 100644 --- a/src/rm/RequestManager.cc +++ b/src/rm/RequestManager.cc @@ -437,6 +437,7 @@ void RequestManager::register_xml_methods() /* Group related methods */ xmlrpc_c::method * group_allocate_pt; + xmlrpc_c::method * group_update_pt; xmlrpc_c::method * group_delete_pt; xmlrpc_c::method * group_add_provider_pt; xmlrpc_c::method * group_del_provider_pt; @@ -447,6 +448,7 @@ void RequestManager::register_xml_methods() group_delete_pt = new RequestManagerProxy("one.group.delete"); group_add_provider_pt = new RequestManagerProxy("one.group.addprovider"); group_del_provider_pt = new RequestManagerProxy("one.group.delprovider"); + group_update_pt = new RequestManagerProxy("one.group.update"); } else { @@ -454,12 +456,14 @@ void RequestManager::register_xml_methods() group_delete_pt = new GroupDelete(); group_add_provider_pt = new GroupAddProvider(); group_del_provider_pt = new GroupDelProvider(); + group_update_pt = new GroupUpdateTemplate(); } xmlrpc_c::methodPtr group_allocate(group_allocate_pt); xmlrpc_c::methodPtr group_delete(group_delete_pt); xmlrpc_c::methodPtr group_add_provider(group_add_provider_pt); xmlrpc_c::methodPtr group_del_provider(group_del_provider_pt); + xmlrpc_c::methodPtr group_update(group_update_pt); xmlrpc_c::methodPtr group_info(new GroupInfo()); xmlrpc_c::methodPtr group_set_quota(new GroupSetQuota()); @@ -467,12 +471,13 @@ void RequestManager::register_xml_methods() xmlrpc_c::methodPtr group_get_default_quota(new GroupQuotaInfo()); xmlrpc_c::methodPtr group_set_default_quota(new GroupQuotaUpdate()); - RequestManagerRegistry.addMethod("one.group.allocate", group_allocate); - RequestManagerRegistry.addMethod("one.group.delete", group_delete); - RequestManagerRegistry.addMethod("one.group.info", group_info); - RequestManagerRegistry.addMethod("one.group.quota", group_set_quota); + RequestManagerRegistry.addMethod("one.group.allocate", group_allocate); + RequestManagerRegistry.addMethod("one.group.delete", group_delete); + RequestManagerRegistry.addMethod("one.group.info", group_info); + RequestManagerRegistry.addMethod("one.group.quota", group_set_quota); RequestManagerRegistry.addMethod("one.group.addprovider",group_add_provider); RequestManagerRegistry.addMethod("one.group.delprovider",group_del_provider); + RequestManagerRegistry.addMethod("one.group.update", group_update); RequestManagerRegistry.addMethod("one.grouppool.info", grouppool_info); diff --git a/src/rm/RequestManagerDelete.cc b/src/rm/RequestManagerDelete.cc index 57ec0211a9..fd1afa404d 100644 --- a/src/rm/RequestManagerDelete.cc +++ b/src/rm/RequestManagerDelete.cc @@ -302,3 +302,18 @@ int UserDelete::drop(int oid, PoolObjectSQL * object, string& error_msg) return rc; } + +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +int ZoneDelete::drop(int oid, PoolObjectSQL * object, string& error_msg) +{ + int rc = RequestManagerDelete::drop(oid, object, error_msg); + + if ( rc == 0 ) + { + aclm->del_zid_rules(oid); + } + + return rc; +} diff --git a/src/rm/RequestManagerGroup.cc b/src/rm/RequestManagerGroup.cc index 785c3d1d6b..985caf6382 100644 --- a/src/rm/RequestManagerGroup.cc +++ b/src/rm/RequestManagerGroup.cc @@ -197,8 +197,6 @@ void GroupEditProvider::request_execute( return; } - edit_acl_rules(group_id, zone_id, cluster_id, error_str); - success_response(cluster_id, att); } @@ -214,154 +212,8 @@ int GroupAddProvider::edit_resource_provider( /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -int GroupAddProvider::edit_acl_rules( - int group_id, int zone_id, int cluster_id, string& error_msg) -{ - int rc = 0; - - long long mask_prefix; - - if (cluster_id == ClusterPool::ALL_RESOURCES) - { - mask_prefix = AclRule::ALL_ID; - } - else - { - mask_prefix = AclRule::CLUSTER_ID | cluster_id; - } - - // @ HOST/% MANAGE # - rc += aclm->add_rule( - AclRule::GROUP_ID | - group_id, - - mask_prefix | - PoolObjectSQL::HOST, - - AuthRequest::MANAGE, - - AclRule::INDIVIDUAL_ID | - zone_id, - - error_msg); - - // @ DATASTORE+NET/% USE # - rc += aclm->add_rule( - AclRule::GROUP_ID | - group_id, - - mask_prefix | - PoolObjectSQL::DATASTORE | - PoolObjectSQL::NET, - - AuthRequest::USE, - - AclRule::INDIVIDUAL_ID | - zone_id, - - error_msg); - - // @ ZONE/# USE * - rc += aclm->add_rule( - AclRule::GROUP_ID | - group_id, - - PoolObjectSQL::ZONE | - AclRule::INDIVIDUAL_ID | - zone_id, - - AuthRequest::USE, - - AclRule::ALL_ID, - - error_msg); - - if (rc != 0) - { - return -1; - } - - return 0; -} - -/* -------------------------------------------------------------------------- */ -/* -------------------------------------------------------------------------- */ - int GroupDelProvider::edit_resource_provider( Group* group, int zone_id, int cluster_id, string& error_msg) { return group->del_resource_provider(zone_id, cluster_id, error_msg); } - -/* -------------------------------------------------------------------------- */ -/* -------------------------------------------------------------------------- */ - -int GroupDelProvider::edit_acl_rules( - int group_id, int zone_id, int cluster_id, string& error_msg) -{ - int rc = 0; - - long long mask_prefix; - - if (cluster_id == ClusterPool::ALL_RESOURCES) - { - mask_prefix = AclRule::ALL_ID; - } - else - { - mask_prefix = AclRule::CLUSTER_ID | cluster_id; - } - - // @ HOST/% MANAGE # - rc += aclm->del_rule( - AclRule::GROUP_ID | - group_id, - - mask_prefix | - PoolObjectSQL::HOST, - - AuthRequest::MANAGE, - - AclRule::INDIVIDUAL_ID | - zone_id, - - error_msg); - - // @ DATASTORE+NET/% USE # - rc += aclm->del_rule( - AclRule::GROUP_ID | - group_id, - - mask_prefix | - PoolObjectSQL::DATASTORE | - PoolObjectSQL::NET, - - AuthRequest::USE, - - AclRule::INDIVIDUAL_ID | - zone_id, - - error_msg); - - // @ ZONE/# USE * - rc += aclm->del_rule( - AclRule::GROUP_ID | - group_id, - - PoolObjectSQL::ZONE | - AclRule::INDIVIDUAL_ID | - zone_id, - - AuthRequest::USE, - - AclRule::ALL_ID, - - error_msg); - - if (rc != 0) - { - return -1; - } - - return 0; -} diff --git a/src/rm/RequestManagerSystem.cc b/src/rm/RequestManagerSystem.cc index a324569c58..a9bc1582fd 100644 --- a/src/rm/RequestManagerSystem.cc +++ b/src/rm/RequestManagerSystem.cc @@ -30,7 +30,7 @@ void SystemVersion::request_execute(xmlrpc_c::paramList const& paramList, // Should we make the version call accessible even // if no user is provided? - success_response(Nebula::instance().db_version(), att); + success_response(Nebula::instance().code_version(), att); return; } diff --git a/src/rm/RequestManagerUser.cc b/src/rm/RequestManagerUser.cc index 03ab3f6316..0a98d057ae 100644 --- a/src/rm/RequestManagerUser.cc +++ b/src/rm/RequestManagerUser.cc @@ -15,7 +15,6 @@ /* -------------------------------------------------------------------------- */ #include "RequestManagerUser.h" -#include "NebulaUtil.h" using namespace std; @@ -70,11 +69,6 @@ int UserChangePassword::user_action(int user_id, return -1; } - if (user->get_auth_driver() == UserPool::CORE_AUTH) - { - new_pass = one_util::sha1_digest(new_pass); - } - int rc = user->set_password(new_pass, error_str); if ( rc == 0 ) @@ -125,20 +119,20 @@ int UserChangeAuth::user_action(int user_id, return -1; } - if ( !new_pass.empty() ) - { - if ( new_auth == UserPool::CORE_AUTH) - { - new_pass = one_util::sha1_digest(new_pass); - } + string old_auth = user->get_auth_driver(); - // The password may be invalid, try to change it first + rc = user->set_auth_driver(new_auth, error_str); + + if ( rc == 0 && !new_pass.empty() ) + { rc = user->set_password(new_pass, error_str); - } - if ( rc == 0 ) - { - rc = user->set_auth_driver(new_auth, error_str); + if (rc != 0) + { + string tmp_str; + + user->set_auth_driver(old_auth, tmp_str); + } } if ( rc == 0 ) diff --git a/src/scheduler/etc/sched.conf b/src/scheduler/etc/sched.conf index 05e5897b37..0156ca8bbb 100644 --- a/src/scheduler/etc/sched.conf +++ b/src/scheduler/etc/sched.conf @@ -22,9 +22,6 @@ # # LIVE_RESCHEDS: Perform live (1) or cold migrations (0) when rescheduling a VM # -# HYPERVISOR_MEM: Fraction of total MEMORY reserved for the hypervisor. -# E.g. 0.1 means that only 90% of the total MEMORY will be used -# # DEFAULT_SCHED: Definition of the default scheduling algorithm # - policy: # 0 = Packing. Heuristic that minimizes the number of hosts in use by @@ -71,8 +68,6 @@ MAX_HOST = 1 LIVE_RESCHEDS = 0 -HYPERVISOR_MEM = 0.1 - DEFAULT_SCHED = [ policy = 1 ] diff --git a/src/scheduler/include/HostPoolXML.h b/src/scheduler/include/HostPoolXML.h index d2378b1a9e..49a8e3dd86 100644 --- a/src/scheduler/include/HostPoolXML.h +++ b/src/scheduler/include/HostPoolXML.h @@ -28,10 +28,7 @@ class HostPoolXML : public PoolXML { public: - HostPoolXML(Client* client, float mem):PoolXML(client) - { - HostXML::set_hypervisor_mem(mem); - }; + HostPoolXML(Client* client):PoolXML(client) {}; ~HostPoolXML(){}; diff --git a/src/scheduler/include/HostXML.h b/src/scheduler/include/HostXML.h index 3aabe433f4..c9f5bc0dd2 100644 --- a/src/scheduler/include/HostXML.h +++ b/src/scheduler/include/HostXML.h @@ -113,15 +113,6 @@ public: */ int search(const char *name, int& value); - /** - * Sets the memory fraction reserved for the hypervisor. This function - * should be called before using the host pool. - */ - static void set_hypervisor_mem(float mem) - { - hypervisor_mem = 1.0 - mem; - }; - /** * Checks if the host is a remote public cloud * @return true if the host is a remote public cloud @@ -151,8 +142,6 @@ private: bool public_cloud; // Configuration attributes - static float hypervisor_mem; /**< Fraction of memory for the VMs */ - static const char *host_paths[]; /**< paths for search function */ static int host_num_paths; /**< number of paths*/ diff --git a/src/scheduler/include/Scheduler.h b/src/scheduler/include/Scheduler.h index 1a659f28d0..f05131c863 100644 --- a/src/scheduler/include/Scheduler.h +++ b/src/scheduler/include/Scheduler.h @@ -60,7 +60,6 @@ protected: machines_limit(0), dispatch_limit(0), host_dispatch_limit(0), - hypervisor_mem(0), client(0) { am.addListener(this); @@ -171,11 +170,6 @@ private: */ unsigned int host_dispatch_limit; - /** - * Memory reserved for the hypervisor - */ - float hypervisor_mem; - /** * OpenNebula zone id. */ diff --git a/src/scheduler/src/pool/HostXML.cc b/src/scheduler/src/pool/HostXML.cc index 748a2617fb..5fc38f6941 100644 --- a/src/scheduler/src/pool/HostXML.cc +++ b/src/scheduler/src/pool/HostXML.cc @@ -22,9 +22,6 @@ /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ - -float HostXML::hypervisor_mem; - int HostXML::host_num_paths = 4; const char *HostXML::host_paths[] = { @@ -51,10 +48,6 @@ void HostXML::init_attributes() running_vms = atoll(((*this)["/HOST/HOST_SHARE/RUNNING_VMS"])[0].c_str()); - //Reserve memory for the hypervisor - max_mem = static_cast(hypervisor_mem * static_cast(max_mem)); - - public_cloud = false; vector public_cloud_vector = (*this)["/HOST/TEMPLATE/PUBLIC_CLOUD"]; diff --git a/src/scheduler/src/sched/Scheduler.cc b/src/scheduler/src/sched/Scheduler.cc index 608231dcab..04dea96140 100644 --- a/src/scheduler/src/sched/Scheduler.cc +++ b/src/scheduler/src/sched/Scheduler.cc @@ -122,8 +122,6 @@ void Scheduler::start() conf.get("LIVE_RESCHEDS", live_rescheds); - conf.get("HYPERVISOR_MEM", hypervisor_mem); - // ----------------------------------------------------------- // Log system & Configuration File // ----------------------------------------------------------- @@ -285,7 +283,7 @@ void Scheduler::start() // Pools // ------------------------------------------------------------------------- - hpool = new HostPoolXML(client, hypervisor_mem); + hpool = new HostPoolXML(client); clpool = new ClusterPoolXML(client); vmpool = new VirtualMachinePoolXML(client,machines_limit,(live_rescheds==1)); diff --git a/src/scheduler/src/sched/SchedulerTemplate.cc b/src/scheduler/src/sched/SchedulerTemplate.cc index e4d32e91e8..c1e9fa9a7b 100644 --- a/src/scheduler/src/sched/SchedulerTemplate.cc +++ b/src/scheduler/src/sched/SchedulerTemplate.cc @@ -44,7 +44,6 @@ void SchedulerTemplate::set_conf_default() # DEFAULT_SCHED # DEFAULT_DS_SCHED # LIVE_RESCHEDS -# HYPERVISOR_MEM # LOG #------------------------------------------------------------------------------- */ @@ -104,12 +103,6 @@ void SchedulerTemplate::set_conf_default() vattribute = new VectorAttribute("DEFAULT_DS_SCHED",vvalue); conf_default.insert(make_pair(vattribute->name(),vattribute)); - //HYPERVISOR_MEM - value = "0.1"; - - attribute = new SingleAttribute("HYPERVISOR_MEM",value); - conf_default.insert(make_pair(attribute->name(),attribute)); - //LOG CONFIGURATION vvalue.clear(); vvalue.insert(make_pair("SYSTEM","file")); diff --git a/src/sunstone/models/OpenNebulaJSON/GroupJSON.rb b/src/sunstone/models/OpenNebulaJSON/GroupJSON.rb index eae06612ff..a5e7576902 100644 --- a/src/sunstone/models/OpenNebulaJSON/GroupJSON.rb +++ b/src/sunstone/models/OpenNebulaJSON/GroupJSON.rb @@ -37,6 +37,7 @@ module OpenNebulaJSON rc = case action_hash['perform'] when "chown" then self.chown(action_hash['params']) + when "update" then self.update(action_hash['params']) when "set_quota" then self.set_quota(action_hash['params']) when "add_provider" then self.add_provider(action_hash['params']) when "del_provider" then self.del_provider(action_hash['params']) @@ -51,6 +52,10 @@ module OpenNebulaJSON super(params['owner_id'].to_i) end + def update(params=Hash.new) + super(params['template_raw']) + end + def set_quota(params=Hash.new) quota_json = params['quotas'] quota_template = template_to_str(quota_json) diff --git a/src/sunstone/models/OpenNebulaJSON/ZoneJSON.rb b/src/sunstone/models/OpenNebulaJSON/ZoneJSON.rb index 181dd3d08c..6e26ed004d 100644 --- a/src/sunstone/models/OpenNebulaJSON/ZoneJSON.rb +++ b/src/sunstone/models/OpenNebulaJSON/ZoneJSON.rb @@ -1,5 +1,5 @@ # -------------------------------------------------------------------------- # -# Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs # +# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs # # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # # not use this file except in compliance with the License. You may obtain # diff --git a/src/sunstone/models/SunstoneViews.rb b/src/sunstone/models/SunstoneViews.rb index 2326a9975d..66fa6700ab 100644 --- a/src/sunstone/models/SunstoneViews.rb +++ b/src/sunstone/models/SunstoneViews.rb @@ -49,9 +49,26 @@ class SunstoneViews end def available_views(user_name, group_name) - available_views = @views_config['users'][user_name] if @views_config['users'] - available_views ||= @views_config['groups'][group_name] if @views_config['groups'] - available_views ||= @views_config['default'] + user = OpenNebula::User.new_with_id( + OpenNebula::User::SELF, + $cloud_auth.client(user_name)) + user.info + + group = OpenNebula::Group.new_with_id(user.gid, $cloud_auth.client(user_name)) + group.info + + available_views = Array.new + if group["TEMPLATE/SUNSTONE_VIEWS"] + available_views = group["TEMPLATE/SUNSTONE_VIEWS"].split(",") + end + + static_views = @views_config['users'][user_name] if @views_config['users'] + static_views ||= @views_config['groups'][group_name] if @views_config['groups'] + static_views ||= @views_config['default'] + + available_views.concat(static_views) + available_views.select!{|view_name| @views[view_name]} + available_views.uniq! return available_views end diff --git a/src/sunstone/public/js/opennebula.js b/src/sunstone/public/js/opennebula.js index e5497c875c..1f70137a08 100644 --- a/src/sunstone/public/js/opennebula.js +++ b/src/sunstone/public/js/opennebula.js @@ -932,6 +932,13 @@ var OpenNebula = { } }); }, + "update": function(params){ + var action_obj = {"template_raw" : params.data.extra_param }; + OpenNebula.Action.simple_action(params, + OpenNebula.Group.resource, + "update", + action_obj); + }, "set_quota" : function(params){ var action_obj = { quotas : params.data.extra_param }; OpenNebula.Action.simple_action(params,OpenNebula.Group.resource,"set_quota",action_obj); diff --git a/src/sunstone/public/js/plugins/clusters-tab.js b/src/sunstone/public/js/plugins/clusters-tab.js index ac99ae6ebf..687d738bb3 100644 --- a/src/sunstone/public/js/plugins/clusters-tab.js +++ b/src/sunstone/public/js/plugins/clusters-tab.js @@ -848,7 +848,6 @@ function updateClusterDatastoresView(request, list){ var list_array = []; $.each(list,function(){ - if(this.DATASTORE.ID!=0) list_array.push( datastoreElementArray(this)); }); diff --git a/src/sunstone/public/js/plugins/groups-tab.js b/src/sunstone/public/js/plugins/groups-tab.js index 3c261a06f7..76db980c32 100644 --- a/src/sunstone/public/js/plugins/groups-tab.js +++ b/src/sunstone/public/js/plugins/groups-tab.js @@ -239,6 +239,16 @@ var group_actions = { error: onError }, + "Group.update_template" : { + type: "single", + call: OpenNebula.Group.update, + callback: function(request) { + notifyMessage("Template updated correctly"); + Sunstone.runAction('Group.showinfo',request.request.data[0][0]); + }, + error: onError + }, + "Group.delete" : { type: "multiple", call : OpenNebula.Group.del, @@ -381,6 +391,7 @@ var group_buttons = { }; var group_info_panel = { + }; var groups_tab = { @@ -573,6 +584,36 @@ function fromJSONtoProvidersTable(group_info){ function updateGroupInfo(request,group){ var info = group.GROUP; + var info_tab = { + title: tr("Information"), + content: + '
\ +
\ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ +
'+tr("Group")+' - '+info.NAME+'
'+tr("ID")+''+info.ID+'
'+tr("Name")+''+info.NAME+'
\ +
\ +
' + + insert_extended_template_table(info.TEMPLATE, + "Group", + info.ID, + "Configuration & Tags") + + '
\ +
' + } + var default_group_quotas = Quotas.default_quotas(info.DEFAULT_GROUP_QUOTAS); var quotas_tab_html = '
' + Quotas.vms(info, default_group_quotas) + '
'; quotas_tab_html += '
' + Quotas.cpu(info, default_group_quotas) + '
'; @@ -617,7 +658,7 @@ function updateGroupInfo(request,group){ ' }; - + Sunstone.updateInfoPanelTab("group_info_panel","group_info_tab",info_tab); Sunstone.updateInfoPanelTab("group_info_panel","group_quotas_tab",quotas_tab); Sunstone.updateInfoPanelTab("group_info_panel","group_providers_tab",providers_tab); Sunstone.popUpInfoPanel("group_info_panel", 'groups-tab'); diff --git a/src/sunstone/public/js/plugins/images-tab.js b/src/sunstone/public/js/plugins/images-tab.js index 0b0d844a8b..46e55b6679 100644 --- a/src/sunstone/public/js/plugins/images-tab.js +++ b/src/sunstone/public/js/plugins/images-tab.js @@ -491,11 +491,6 @@ var image_buttons = { layout: "del", text: tr("Delete") }, - //"Image.help" : { - // type: "action", - // text: '?', - // alwaysActive: true - //} } var image_info_panel = { diff --git a/src/sunstone/public/js/plugins/templates-tab.js b/src/sunstone/public/js/plugins/templates-tab.js index 7efb519615..ea510b5a40 100644 --- a/src/sunstone/public/js/plugins/templates-tab.js +++ b/src/sunstone/public/js/plugins/templates-tab.js @@ -2353,6 +2353,45 @@ function add_osTab(dialog) { ''+ ''+ ''+ + '
'+ + '
'+ + ''+ + ''+ + '
'+ + '
'+ + ''+ + ''+ + '
'+ + '
'+ + '
'+ + '
'+ + ''+ + ''+ + '
'+ + '
'+ + ''+ + '
'+ + '
'+ '
'+ '
'+ '
diff --git a/src/sunstone/views/login.erb b/src/sunstone/views/login.erb index 2f1921db7d..b04a3720df 100644 --- a/src/sunstone/views/login.erb +++ b/src/sunstone/views/login.erb @@ -26,7 +26,7 @@ diff --git a/src/tm_mad/ceph/clone b/src/tm_mad/ceph/clone index 4ab9a63e0b..4b819a50fd 100755 --- a/src/tm_mad/ceph/clone +++ b/src/tm_mad/ceph/clone @@ -47,11 +47,40 @@ SRC_PATH=`arg_path $SRC` DISK_ID=$(echo $DST|awk -F. '{print $NF}') RBD_DST="${SRC_PATH}-${VM_ID}-${DISK_ID}" +RBD_SNAP="${VM_ID}-${DISK_ID}" + +#------------------------------------------------------------------------------- +# Get Image information +#------------------------------------------------------------------------------- + +XPATH="${DRIVER_PATH}/../../datastore/xpath.rb --stdin" + +unset i j XPATH_ELEMENTS + +while IFS= read -r -d '' element; do + XPATH_ELEMENTS[i++]="$element" +done < <(onevm show -x $VM_ID| $XPATH \ + /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/RBD_FORMAT) + +RBD_FORMAT="${XPATH_ELEMENTS[j++]}" #------------------------------------------------------------------------------- # Clone the image #------------------------------------------------------------------------------- -ssh_exec_and_log "$DST_HOST" "$RBD copy $SRC_PATH $RBD_DST" \ +CLONE_CMD=$(cat </one---" RBD_SRC="${SRC}-${VM_ID}-${DISK_ID}" +RBD_SNAP="${VM_ID}-${DISK_ID}" #------------------------------------------------------------------------------- # Delete the device @@ -87,7 +90,19 @@ log "Deleting $DST_PATH" # Note that this command, as opposed to the rest of $RBD commands in this set of # drivers, is executed in the worker node and not in the CEPH frontend. -ssh_exec_and_log "$DST_HOST" "$RBD rm $RBD_SRC" \ +DELETE_CMD=$(cat </one---" RBD_DST="${RBD_SRC}-${VM_ID}-${DISK_ID}" +RBD_SNAP="${VM_ID}-${DISK_ID}" #------------------------------------------------------------------------------- # Move the image back to the datastore @@ -76,7 +79,20 @@ RBD_DST="${RBD_SRC}-${VM_ID}-${DISK_ID}" log "Dumping $RBD_DST to $DST" -ssh_exec_and_log "$SRC_HOST" "$RBD rename $RBD_DST $DST" \ +DUMP_CMD=$(cat < boots; @@ -137,20 +138,29 @@ int LibVirtDriver::deployment_description_kvm( const VectorAttribute * graphics; - string listen = ""; - string port = ""; - string passwd = ""; - string keymap = ""; + string listen = ""; + string port = ""; + string passwd = ""; + string keymap = ""; + string spice_options = ""; const VectorAttribute * input; const VectorAttribute * features; - bool pae = false; - bool acpi = false; + bool pae = false; + bool acpi = false; + bool apic = false; + bool hyperv = false; + bool localtime = false; - int pae_found = -1; - int acpi_found = -1; + int pae_found = -1; + int acpi_found = -1; + int apic_found = -1; + int hyperv_found = -1; + int localtime_found = -1; + + string hyperv_options = ""; const VectorAttribute * raw; string default_raw; @@ -240,6 +250,7 @@ int LibVirtDriver::deployment_description_kvm( kernel_cmd = os->vector_value("KERNEL_CMD"); bootloader = os->vector_value("BOOTLOADER"); arch = os->vector_value("ARCH"); + machine = os->vector_value("MACHINE"); } } @@ -253,7 +264,19 @@ int LibVirtDriver::deployment_description_kvm( } } - file << "\t\thvm" << endl; + if ( machine.empty() ) + { + get_default("OS", "MACHINE", machine); + } + + file << "\t\thvm" << endl; if ( kernel.empty() ) { @@ -712,6 +735,16 @@ int LibVirtDriver::deployment_description_kvm( } file << "/>" << endl; + + if ( type == "spice" ) + { + get_default("SPICE_OPTIONS", spice_options); + + if ( spice_options != "" ) + { + file << "\t\t" << spice_options << endl; + } + } } else { @@ -766,8 +799,11 @@ int LibVirtDriver::deployment_description_kvm( if ( features != 0 ) { - pae_found = features->vector_value("PAE", pae); - acpi_found = features->vector_value("ACPI", acpi); + pae_found = features->vector_value("PAE", pae); + acpi_found = features->vector_value("ACPI", acpi); + apic_found = features->vector_value("APIC", apic); + hyperv_found = features->vector_value("HYPERV", hyperv); + localtime_found = features->vector_value("LOCALTIME", localtime); } } @@ -781,7 +817,22 @@ int LibVirtDriver::deployment_description_kvm( get_default("FEATURES", "ACPI", acpi); } - if( acpi || pae ) + if ( apic_found != 0 ) + { + get_default("FEATURES", "APIC", apic); + } + + if ( hyperv_found != 0 ) + { + get_default("FEATURES", "HYPERV", hyperv); + } + + if ( localtime_found != 0 ) + { + get_default("FEATURES", "LOCALTIME", localtime); + } + + if ( acpi || pae || apic || hyperv ) { file << "\t" << endl; @@ -795,9 +846,28 @@ int LibVirtDriver::deployment_description_kvm( file << "\t\t" << endl; } + if ( apic ) + { + file << "\t\t" << endl; + } + + if ( hyperv ) + { + get_default("HYPERV_OPTIONS", hyperv_options); + + file << "\t\t" << endl; + file << hyperv_options << endl; + file << "\t\t" << endl; + } + file << "\t" << endl; } + if ( localtime ) + { + file << "\t" << endl; + } + attrs.clear(); // ------------------------------------------------------------------------ diff --git a/src/vmm/XenDriver.cc b/src/vmm/XenDriver.cc index c93104ed0d..3d1828dcc7 100644 --- a/src/vmm/XenDriver.cc +++ b/src/vmm/XenDriver.cc @@ -21,6 +21,12 @@ #include #include + +string on_off_string(bool value) +{ + return value? "1" : "0"; +} + int XenDriver::deployment_description( const VirtualMachine * vm, const string& file_name) const @@ -76,6 +82,24 @@ int XenDriver::deployment_description( string passwd = ""; string keymap = ""; + const VectorAttribute * input; + + string bus = ""; + + const VectorAttribute * features; + + bool pae = false; + bool acpi = false; + bool apic = false; + string device_model = ""; + bool localtime = false; + + int pae_found = -1; + int acpi_found = -1; + int apic_found = -1; + int device_model_found = -1; + int localtime_found = -1; + const VectorAttribute * raw; string data; string default_raw; @@ -577,6 +601,119 @@ int XenDriver::deployment_description( attrs.clear(); + // ------------------------------------------------------------------------ + // Input (only usb tablet) + // ------------------------------------------------------------------------ + + if ( vm->get_template_attribute("INPUT",attrs) > 0 ) + { + input = dynamic_cast(attrs[0]); + + if ( input != 0 ) + { + type = input->vector_value("TYPE"); + bus = input->vector_value("BUS"); + + if ( type == "tablet" && bus == "usb" ) + { + file << "usb = 1" << endl; + file << "usbdevice = 'tablet'" << endl; + } + else + { + vm->log("VMM", Log::WARNING, + "Not supported input, only usb tablet, ignored."); + } + } + } + + attrs.clear(); + + // ------------------------------------------------------------------------ + // Features (only for HVM) + // ------------------------------------------------------------------------ + + if ( is_hvm ) + { + num = vm->get_template_attribute("FEATURES",attrs); + + if ( num > 0 ) + { + features = dynamic_cast(attrs[0]); + + if ( features != 0 ) + { + pae_found = features->vector_value("PAE", pae); + acpi_found = features->vector_value("ACPI", acpi); + apic_found = features->vector_value("APIC", apic); + localtime_found = + features->vector_value("LOCALTIME", localtime); + + device_model = features->vector_value("DEVICE_MODEL"); + if ( device_model != "" ) + { + device_model_found = 0; + } + } + } + + if ( pae_found != 0 && get_default("FEATURES", "PAE", pae) ) + { + pae_found = 0; + } + + if ( acpi_found != 0 && get_default("FEATURES", "ACPI", acpi) ) + { + acpi_found = 0; + } + + if ( apic_found != 0 && get_default("FEATURES", "APIC", apic) ) + { + apic_found = 0; + } + + if ( device_model_found != 0 ) + { + get_default("FEATURES", "DEVICE_MODEL", device_model); + if ( device_model != "" ) + { + device_model_found = 0; + } + } + + if ( localtime_found != 0 ) + { + get_default("FEATURES", "LOCALTIME", localtime); + } + + if ( pae_found == 0) + { + file << "pae = " << on_off_string(pae) << endl; + } + + if ( acpi_found == 0) + { + file << "acpi = " << on_off_string(acpi) << endl; + } + + if ( apic_found == 0) + { + file << "apic = " << on_off_string(apic) << endl; + } + + if ( device_model_found == 0) + { + file << "device_model = '" << device_model << "'" << endl; + } + + if ( localtime ) + { + file << "localtime = 'yes'" << endl; + } + + attrs.clear(); + } + // ------------------------------------------------------------------------ // Raw XEN attributes // ------------------------------------------------------------------------ diff --git a/src/vmm_mad/exec/vmm_exec_kvm.conf b/src/vmm_mad/exec/vmm_exec_kvm.conf index 4f3bb432da..8e068e40f7 100644 --- a/src/vmm_mad/exec/vmm_exec_kvm.conf +++ b/src/vmm_mad/exec/vmm_exec_kvm.conf @@ -18,12 +18,13 @@ # (all domains will use these values as defaults). These values can # be overridden in each VM template. Valid atributes are: # - emulator -# - os [kernel,initrd,boot,root,kernel_cmd,arch] +# - os [kernel,initrd,boot,root,kernel_cmd,arch,machine] # - vcpu -# - features [acpi, pae] +# - features [acpi, pae, apic, hyperv, localtime] # - disk [driver, cache, io] # - nic [filter, model] # - raw +# - hyperv_options: options used for FEATURES = [ HYPERV = yes ] # NOTE: raw attribute value is appended to that on the VM template #EMULATOR = /usr/libexec/qemu-kvm @@ -31,9 +32,24 @@ #VCPU = 1 OS = [ boot = "hd", arch = "i686" ] -FEATURES = [ PAE = "no", ACPI = "yes" ] +FEATURES = [ PAE = "no", ACPI = "yes", APIC = "no", HYPERV = "no" ] DISK = [ driver = "raw" , cache = "none"] #NIC = [ filter = "clean-traffic", model="virtio" ] #RAW = [ type = "kvm", data = "" ] + +HYPERV_OPTIONS="" + +SPICE_OPTIONS=" + + + + + + + + " + diff --git a/src/vmm_mad/exec/vmm_exec_xen4.conf b/src/vmm_mad/exec/vmm_exec_xen4.conf index 77fd4624a0..eabf2dbc30 100644 --- a/src/vmm_mad/exec/vmm_exec_xen4.conf +++ b/src/vmm_mad/exec/vmm_exec_xen4.conf @@ -20,12 +20,14 @@ # - credit # - os [kernel,initrd,root,kernel_cmd,hvm] # - vcpu +# - features [acpi, pae, apic, device_model, localtime] # - disk[driver] # - nic[model] # - raw #VCPU = 1 #OS = [ kernel="/vmlinuz", initrd="/initrd.img", root="sda1", kernel_cmd="ro", hvm="yes" ] +#FEATURES = [ PAE = "no", ACPI = "yes", APIC = "yes" ] CREDIT = 256 DISK = [ driver = "raw:" ] diff --git a/src/vmm_mad/remotes/poll_xen_kvm.rb b/src/vmm_mad/remotes/poll_xen_kvm.rb index 7014461bf9..5f8d6b1b03 100755 --- a/src/vmm_mad/remotes/poll_xen_kvm.rb +++ b/src/vmm_mad/remotes/poll_xen_kvm.rb @@ -67,15 +67,17 @@ module KVM if names.length!=0 names.each do |vm| dominfo=dom_info(vm) - psinfo=process_info(dominfo['UUID']) + if dominfo + psinfo=process_info(dominfo['UUID']) - info={} - info[:dominfo]=dominfo - info[:psinfo]=psinfo - info[:name]=vm - info[:pid]=psinfo[1] + info={} + info[:dominfo]=dominfo + info[:psinfo]=psinfo + info[:name]=vm + info[:pid]=psinfo[1] - vms[vm]=info + vms[vm]=info + end end cpu=get_cpu_info(vms) diff --git a/src/vnm/VirtualNetwork.cc b/src/vnm/VirtualNetwork.cc index c880e39555..07389e9317 100644 --- a/src/vnm/VirtualNetwork.cc +++ b/src/vnm/VirtualNetwork.cc @@ -255,7 +255,16 @@ int VirtualNetwork::insert(SqlDB * db, string& error_str) { ostringstream oss; - oss << "onebr" << oid; + oss << "onebr"; + + if (!vlan_id.empty()) + { + oss << "." << vlan_id; + } + else + { + oss << oid; + } bridge = oss.str(); } diff --git a/src/zone/SConstruct b/src/zone/SConstruct index d646c1953e..c5f2fcfd56 100644 --- a/src/zone/SConstruct +++ b/src/zone/SConstruct @@ -1,7 +1,7 @@ # SConstruct for src/zone # -------------------------------------------------------------------------- # -# Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs # +# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs # # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # # not use this file except in compliance with the License. You may obtain # diff --git a/src/zone/Zone.cc b/src/zone/Zone.cc index 2796294b09..82aea0b5ee 100644 --- a/src/zone/Zone.cc +++ b/src/zone/Zone.cc @@ -1,5 +1,5 @@ /* ------------------------------------------------------------------------ */ -/* Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs */ +/* Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs */ /* */ /* Licensed under the Apache License, Version 2.0 (the "License"); you may */ /* not use this file except in compliance with the License. You may obtain */ diff --git a/src/zone/ZonePool.cc b/src/zone/ZonePool.cc index d9aa6f504d..aa001448ca 100644 --- a/src/zone/ZonePool.cc +++ b/src/zone/ZonePool.cc @@ -1,5 +1,5 @@ /* -------------------------------------------------------------------------- */ -/* Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs */ +/* Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs */ /* */ /* Licensed under the Apache License, Version 2.0 (the "License"); you may */ /* not use this file except in compliance with the License. You may obtain */