From ec3ec7f768178851fb7bb9f013a36ceb12d96cc0 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Thu, 30 Jan 2014 17:35:04 +0100 Subject: [PATCH 01/80] bug #2630: only process domains with information --- src/vmm_mad/remotes/poll_xen_kvm.rb | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/vmm_mad/remotes/poll_xen_kvm.rb b/src/vmm_mad/remotes/poll_xen_kvm.rb index 7014461bf9..5f8d6b1b03 100755 --- a/src/vmm_mad/remotes/poll_xen_kvm.rb +++ b/src/vmm_mad/remotes/poll_xen_kvm.rb @@ -67,15 +67,17 @@ module KVM if names.length!=0 names.each do |vm| dominfo=dom_info(vm) - psinfo=process_info(dominfo['UUID']) + if dominfo + psinfo=process_info(dominfo['UUID']) - info={} - info[:dominfo]=dominfo - info[:psinfo]=psinfo - info[:name]=vm - info[:pid]=psinfo[1] + info={} + info[:dominfo]=dominfo + info[:psinfo]=psinfo + info[:name]=vm + info[:pid]=psinfo[1] - vms[vm]=info + vms[vm]=info + end end cpu=get_cpu_info(vms) From e5d79176d55e1b3d06f714a6ab8bc8e905a9ec1b Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Wed, 5 Feb 2014 17:07:45 +0100 Subject: [PATCH 02/80] feature #1762: add pae, acpi, apic options to xen --- src/vmm/XenDriver.cc | 69 +++++++++++++++++++++++++++++ src/vmm_mad/exec/vmm_exec_xen4.conf | 2 + 2 files changed, 71 insertions(+) diff --git a/src/vmm/XenDriver.cc b/src/vmm/XenDriver.cc index c93104ed0d..5b0617f7c0 100644 --- a/src/vmm/XenDriver.cc +++ b/src/vmm/XenDriver.cc @@ -21,6 +21,12 @@ #include #include + +string on_off_string(bool value) +{ + return value? "1" : "0"; +} + int XenDriver::deployment_description( const VirtualMachine * vm, const string& file_name) const @@ -76,6 +82,16 @@ int XenDriver::deployment_description( string passwd = ""; string keymap = ""; + const VectorAttribute * features; + + bool pae = false; + bool acpi = false; + bool apic = false; + + int pae_found = -1; + int acpi_found = -1; + int apic_found = -1; + const VectorAttribute * raw; string data; string default_raw; @@ -577,6 +593,59 @@ int XenDriver::deployment_description( attrs.clear(); + // ------------------------------------------------------------------------ + // Features (only for HVM) + // ------------------------------------------------------------------------ + + if ( is_hvm ) + { + num = vm->get_template_attribute("FEATURES",attrs); + + if ( num > 0 ) + { + features = dynamic_cast(attrs[0]); + + if ( features != 0 ) + { + pae_found = features->vector_value("PAE", pae); + acpi_found = features->vector_value("ACPI", acpi); + apic_found = features->vector_value("APIC", apic); + } + } + + if ( pae_found != 0 && get_default("FEATURES", "PAE", pae) ) + { + pae_found = 0; + } + + if ( acpi_found != 0 && get_default("FEATURES", "ACPI", acpi) ) + { + acpi_found = 0; + } + + if ( apic_found != 0 && get_default("FEATURES", "APIC", apic) ) + { + apic_found = 0; + } + + if ( pae_found == 0) + { + file << "pae = " << on_off_string(pae) << endl; + } + + if ( acpi_found == 0) + { + file << "acpi = " << on_off_string(acpi) << endl; + } + + if ( apic_found == 0) + { + file << "apic = " << on_off_string(apic) << endl; + } + + attrs.clear(); + } + // ------------------------------------------------------------------------ // Raw XEN attributes // ------------------------------------------------------------------------ diff --git a/src/vmm_mad/exec/vmm_exec_xen4.conf b/src/vmm_mad/exec/vmm_exec_xen4.conf index 77fd4624a0..3b10fc961d 100644 --- a/src/vmm_mad/exec/vmm_exec_xen4.conf +++ b/src/vmm_mad/exec/vmm_exec_xen4.conf @@ -20,12 +20,14 @@ # - credit # - os [kernel,initrd,root,kernel_cmd,hvm] # - vcpu +# - features [acpi, pae, apic] # - disk[driver] # - nic[model] # - raw #VCPU = 1 #OS = [ kernel="/vmlinuz", initrd="/initrd.img", root="sda1", kernel_cmd="ro", hvm="yes" ] +#FEATURES = [ PAE = "no", ACPI = "yes", APIC = "yes" ] CREDIT = 256 DISK = [ driver = "raw:" ] From 6494e8086bd9281bd6c1a3b74ccae66357aee394 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Wed, 5 Feb 2014 17:17:08 +0100 Subject: [PATCH 03/80] feature #1762: add apic feature to kvm --- src/vmm/LibVirtDriverKVM.cc | 15 ++++++++++++++- src/vmm_mad/exec/vmm_exec_kvm.conf | 4 ++-- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/src/vmm/LibVirtDriverKVM.cc b/src/vmm/LibVirtDriverKVM.cc index 93e01caa85..fb64704792 100644 --- a/src/vmm/LibVirtDriverKVM.cc +++ b/src/vmm/LibVirtDriverKVM.cc @@ -96,9 +96,11 @@ int LibVirtDriver::deployment_description_kvm( bool pae = false; bool acpi = false; + bool apic = false; int pae_found = -1; int acpi_found = -1; + int apic_found = -1; const VectorAttribute * raw; string default_raw; @@ -720,6 +722,7 @@ int LibVirtDriver::deployment_description_kvm( { pae_found = features->vector_value("PAE", pae); acpi_found = features->vector_value("ACPI", acpi); + apic_found = features->vector_value("APIC", apic); } } @@ -733,7 +736,12 @@ int LibVirtDriver::deployment_description_kvm( get_default("FEATURES", "ACPI", acpi); } - if( acpi || pae ) + if ( apic_found != 0 ) + { + get_default("FEATURES", "APIC", apic); + } + + if( acpi || pae || apic ) { file << "\t" << endl; @@ -747,6 +755,11 @@ int LibVirtDriver::deployment_description_kvm( file << "\t\t" << endl; } + if ( apic ) + { + file << "\t\t" << endl; + } + file << "\t" << endl; } diff --git a/src/vmm_mad/exec/vmm_exec_kvm.conf b/src/vmm_mad/exec/vmm_exec_kvm.conf index 4f3bb432da..07764bd559 100644 --- a/src/vmm_mad/exec/vmm_exec_kvm.conf +++ b/src/vmm_mad/exec/vmm_exec_kvm.conf @@ -20,7 +20,7 @@ # - emulator # - os [kernel,initrd,boot,root,kernel_cmd,arch] # - vcpu -# - features [acpi, pae] +# - features [acpi, pae, apic] # - disk [driver, cache, io] # - nic [filter, model] # - raw @@ -31,7 +31,7 @@ #VCPU = 1 OS = [ boot = "hd", arch = "i686" ] -FEATURES = [ PAE = "no", ACPI = "yes" ] +FEATURES = [ PAE = "no", ACPI = "yes", APIC = "no" ] DISK = [ driver = "raw" , cache = "none"] From 91dcb0648406cf8197788434d0048eb0cac785b7 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Wed, 5 Feb 2014 18:18:44 +0100 Subject: [PATCH 04/80] feature #1762: add tablet usb input to xen --- src/vmm/XenDriver.cc | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/src/vmm/XenDriver.cc b/src/vmm/XenDriver.cc index 5b0617f7c0..3c29866dc9 100644 --- a/src/vmm/XenDriver.cc +++ b/src/vmm/XenDriver.cc @@ -82,6 +82,10 @@ int XenDriver::deployment_description( string passwd = ""; string keymap = ""; + const VectorAttribute * input; + + string bus = ""; + const VectorAttribute * features; bool pae = false; @@ -593,6 +597,34 @@ int XenDriver::deployment_description( attrs.clear(); + // ------------------------------------------------------------------------ + // Input (only usb tablet) + // ------------------------------------------------------------------------ + + if ( vm->get_template_attribute("INPUT",attrs) > 0 ) + { + input = dynamic_cast(attrs[0]); + + if ( input != 0 ) + { + type = input->vector_value("TYPE"); + bus = input->vector_value("BUS"); + + if ( type == "tablet" && bus == "usb" ) + { + file << "usb = 1" << endl; + file << "usbdevice = 'tablet'" << endl; + } + else + { + vm->log("VMM", Log::WARNING, + "Not supported input, only usb tablet, ignored."); + } + } + } + + attrs.clear(); + // ------------------------------------------------------------------------ // Features (only for HVM) // ------------------------------------------------------------------------ From 869b6d5ddd2606851c9792b44c15adfbff9c5e68 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Thu, 6 Feb 2014 11:43:33 +0100 Subject: [PATCH 05/80] feature #1762: add device_model to xen features --- src/vmm/XenDriver.cc | 22 ++++++++++++++++++++++ src/vmm_mad/exec/vmm_exec_xen4.conf | 2 +- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/src/vmm/XenDriver.cc b/src/vmm/XenDriver.cc index 3c29866dc9..35ff90b7ed 100644 --- a/src/vmm/XenDriver.cc +++ b/src/vmm/XenDriver.cc @@ -91,10 +91,12 @@ int XenDriver::deployment_description( bool pae = false; bool acpi = false; bool apic = false; + string device_model = ""; int pae_found = -1; int acpi_found = -1; int apic_found = -1; + int device_model_found = -1; const VectorAttribute * raw; string data; @@ -642,6 +644,12 @@ int XenDriver::deployment_description( pae_found = features->vector_value("PAE", pae); acpi_found = features->vector_value("ACPI", acpi); apic_found = features->vector_value("APIC", apic); + + device_model = features->vector_value("DEVICE_MODEL"); + if ( device_model != "" ) + { + device_model_found = 0; + } } } @@ -660,6 +668,15 @@ int XenDriver::deployment_description( apic_found = 0; } + if ( device_model_found != 0 ) + { + get_default("FEATURES", "DEVICE_MODEL", device_model); + if ( device_model != "" ) + { + device_model_found = 0; + } + } + if ( pae_found == 0) { file << "pae = " << on_off_string(pae) << endl; @@ -675,6 +692,11 @@ int XenDriver::deployment_description( file << "apic = " << on_off_string(apic) << endl; } + if ( device_model_found == 0) + { + file << "device_model = '" << device_model << "'" << endl; + } + attrs.clear(); } diff --git a/src/vmm_mad/exec/vmm_exec_xen4.conf b/src/vmm_mad/exec/vmm_exec_xen4.conf index 3b10fc961d..534612fa46 100644 --- a/src/vmm_mad/exec/vmm_exec_xen4.conf +++ b/src/vmm_mad/exec/vmm_exec_xen4.conf @@ -20,7 +20,7 @@ # - credit # - os [kernel,initrd,root,kernel_cmd,hvm] # - vcpu -# - features [acpi, pae, apic] +# - features [acpi, pae, apic, device_model] # - disk[driver] # - nic[model] # - raw From b2332e1250283158fdb49e8150f07733dcd31c76 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Thu, 6 Feb 2014 15:29:43 +0100 Subject: [PATCH 06/80] feature #2143: add machine type to KVM deployment file Based on a patch submited by Vladislav Gorbunov in http://dev.opennebula.org/issues/2143#note-2 --- src/vmm/LibVirtDriverKVM.cc | 16 +++++++++++++++- src/vmm_mad/exec/vmm_exec_kvm.conf | 2 +- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/src/vmm/LibVirtDriverKVM.cc b/src/vmm/LibVirtDriverKVM.cc index fb64704792..d404e8e8dd 100644 --- a/src/vmm/LibVirtDriverKVM.cc +++ b/src/vmm/LibVirtDriverKVM.cc @@ -46,6 +46,7 @@ int LibVirtDriver::deployment_description_kvm( string kernel_cmd = ""; string bootloader = ""; string arch = ""; + string machine = ""; vector boots; @@ -190,6 +191,7 @@ int LibVirtDriver::deployment_description_kvm( kernel_cmd = os->vector_value("KERNEL_CMD"); bootloader = os->vector_value("BOOTLOADER"); arch = os->vector_value("ARCH"); + machine = os->vector_value("MACHINE"); } } @@ -203,7 +205,19 @@ int LibVirtDriver::deployment_description_kvm( } } - file << "\t\thvm" << endl; + if ( machine.empty() ) + { + get_default("OS", "MACHINE", machine); + } + + file << "\t\thvm" << endl; if ( kernel.empty() ) { diff --git a/src/vmm_mad/exec/vmm_exec_kvm.conf b/src/vmm_mad/exec/vmm_exec_kvm.conf index 07764bd559..c3c1961498 100644 --- a/src/vmm_mad/exec/vmm_exec_kvm.conf +++ b/src/vmm_mad/exec/vmm_exec_kvm.conf @@ -18,7 +18,7 @@ # (all domains will use these values as defaults). These values can # be overridden in each VM template. Valid atributes are: # - emulator -# - os [kernel,initrd,boot,root,kernel_cmd,arch] +# - os [kernel,initrd,boot,root,kernel_cmd,arch,machine] # - vcpu # - features [acpi, pae, apic] # - disk [driver, cache, io] From cbb1d32b7348ff7d9e40aff7540136f2a3b980cd Mon Sep 17 00:00:00 2001 From: Bill Campbell Date: Fri, 7 Feb 2014 10:30:08 +0100 Subject: [PATCH 07/80] Feature #2568: Support for RBD Format 2 images --- src/datastore_mad/remotes/ceph/ceph.conf | 7 +++- src/datastore_mad/remotes/ceph/clone | 11 +----- src/datastore_mad/remotes/ceph/cp | 50 +++++++++++++++--------- src/datastore_mad/remotes/ceph/mkfs | 14 ++----- src/datastore_mad/remotes/ceph/monitor | 17 +++----- src/datastore_mad/remotes/ceph/rm | 15 +++---- src/tm_mad/ceph/clone | 13 ++++-- src/tm_mad/ceph/delete | 5 +++ src/tm_mad/ceph/mvds | 8 ++++ 9 files changed, 74 insertions(+), 66 deletions(-) diff --git a/src/datastore_mad/remotes/ceph/ceph.conf b/src/datastore_mad/remotes/ceph/ceph.conf index ebd0123739..7af0151c81 100644 --- a/src/datastore_mad/remotes/ceph/ceph.conf +++ b/src/datastore_mad/remotes/ceph/ceph.conf @@ -15,10 +15,13 @@ #--------------------------------------------------------------------------- # # Default POOL_NAME -POOL_NAME=one +POOL_NAME=rbd + +# Default Ceph server host. Storage operations will be performed in this host. +HOST=localhost # Staging directory # A directory in the Ceph server host where image will be transferred to # temporarily during the create/mkfs processes. This directoy MUST exist, # have enough space and be writeable by 'oneadmin' -STAGING_DIR=/var/tmp +STAGING_DIR=/var/tmp/one diff --git a/src/datastore_mad/remotes/ceph/clone b/src/datastore_mad/remotes/ceph/clone index 315711fb1e..b32ac9f42b 100755 --- a/src/datastore_mad/remotes/ceph/clone +++ b/src/datastore_mad/remotes/ceph/clone @@ -45,7 +45,7 @@ unset i XPATH_ELEMENTS while IFS= read -r -d '' element; do XPATH_ELEMENTS[i++]="$element" done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \ - /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/BRIDGE_LIST \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/HOST \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/POOL_NAME \ /DS_DRIVER_ACTION_DATA/IMAGE/PATH \ /DS_DRIVER_ACTION_DATA/IMAGE/SIZE) @@ -53,18 +53,11 @@ done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \ unset i BASE_PATH="${XPATH_ELEMENTS[i++]}" -BRIDGE_LIST="${XPATH_ELEMENTS[i++]}" +DST_HOST="${XPATH_ELEMENTS[i++]:-$HOST}" POOL_NAME="${XPATH_ELEMENTS[i++]:-$POOL_NAME}" SRC="${XPATH_ELEMENTS[i++]}" SIZE="${XPATH_ELEMENTS[i++]}" -DST_HOST=`get_destination_host $ID` - -if [ -z "$DST_HOST" ]; then - error_message "Datastore template missing 'BRIDGE_LIST' attribute." - exit -1 -fi - SAFE_DIRS="" IMAGE_NAME="one-${ID}" diff --git a/src/datastore_mad/remotes/ceph/cp b/src/datastore_mad/remotes/ceph/cp index 8a55e8f159..04b29fda9c 100755 --- a/src/datastore_mad/remotes/ceph/cp +++ b/src/datastore_mad/remotes/ceph/cp @@ -51,7 +51,7 @@ while IFS= read -r -d '' element; do done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/RESTRICTED_DIRS \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/SAFE_DIRS \ - /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/BRIDGE_LIST \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/HOST \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/POOL_NAME \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/STAGING_DIR \ /DS_DRIVER_ACTION_DATA/IMAGE/PATH \ @@ -66,7 +66,7 @@ unset i BASE_PATH="${XPATH_ELEMENTS[i++]}" RESTRICTED_DIRS="${XPATH_ELEMENTS[i++]}" SAFE_DIRS="${XPATH_ELEMENTS[i++]}" -BRIDGE_LIST="${XPATH_ELEMENTS[i++]}" +DST_HOST="${XPATH_ELEMENTS[i++]:-$HOST}" POOL_NAME="${XPATH_ELEMENTS[i++]:-$POOL_NAME}" STAGING_DIR="${XPATH_ELEMENTS[i++]:-$STAGING_DIR}" SRC="${XPATH_ELEMENTS[i++]}" @@ -76,13 +76,6 @@ SHA1="${XPATH_ELEMENTS[i++]}" NO_DECOMPRESS="${XPATH_ELEMENTS[i++]}" LIMIT_TRANSFER_BW="${XPATH_ELEMENTS[i++]}" -DST_HOST=`get_destination_host $ID` - -if [ -z "$DST_HOST" ]; then - error_message "Datastore template missing 'BRIDGE_LIST' attribute." - exit -1 -fi - set_up_datastore "$BASE_PATH" "$RESTRICTED_DIRS" "$SAFE_DIRS" IMAGE_HASH=`generate_image_hash` @@ -118,18 +111,37 @@ esac exec_and_log "eval $DUMP | $SSH $DST_HOST $DD of=$TMP_DST bs=64k" \ "Error dumping $SRC to $DST_HOST:$TMP_DST" -REGISTER_CMD=$(cat </one---" RBD_SRC="${SRC}-${VM_ID}-${DISK_ID}" +RBD_SNAP="${VM_ID}-${DISK_ID}" #------------------------------------------------------------------------------- # Delete the device @@ -89,5 +90,9 @@ log "Deleting $DST_PATH" ssh_exec_and_log "$DST_HOST" "$RBD rm $RBD_SRC" \ "Error deleting $RBD_SRC in $DST_HOST" +ssh_exec_and_log "$DST_HOST" "$RBD snap unprotect $SRC@$RBD_SNAP" \ + "Error unprotecting snapshot $RBD_SNAP of image $SRC" +ssh_exec_and_log "$DST_HOST" "$RBD snap rm $SRC@$RBD_SNAP" \ + "Error removing snapshot $RBD_SNAP of image $SRC" exit 0 diff --git a/src/tm_mad/ceph/mvds b/src/tm_mad/ceph/mvds index df6b5f0b40..6d97e29322 100755 --- a/src/tm_mad/ceph/mvds +++ b/src/tm_mad/ceph/mvds @@ -69,6 +69,7 @@ PERSISTENT="${XPATH_ELEMENTS[j++]}" # non persistent, so the name will be "/one---" RBD_DST="${RBD_SRC}-${VM_ID}-${DISK_ID}" +RBD_SNAP="${VM_ID}-${DISK_ID}" #------------------------------------------------------------------------------- # Move the image back to the datastore @@ -76,7 +77,14 @@ RBD_DST="${RBD_SRC}-${VM_ID}-${DISK_ID}" log "Dumping $RBD_DST to $DST" +ssh_exec_and_log "$SRC_HOST" "$RBD flatten $RBD_DST" \ + "Error copying data from master image $RBD_SRC to $RBD_DST" +ssh_exec_and_log "$SRC_HOST" "$RBD snap unprotect $RBD_SRC@$RBD_SNAP" \ + "Error unprotecting snapshot $RBD_SNAP of image $RBD_SRC" +ssh_exec_and_log "$SRC_HOST" "$RBD snap rm $RBD_SRC@$RBD_SNAP" \ + "Error removing snapshot $RBD_SNAP of image $RBD_SRC" ssh_exec_and_log "$SRC_HOST" "$RBD rename $RBD_DST $DST" \ "Error saving $RBD_DST as $DST in $SRC_HOST" + exit 0 From ae55d2b0088dc30d83278f3ea9b78052f72214a5 Mon Sep 17 00:00:00 2001 From: Jaime Melis Date: Fri, 7 Feb 2014 10:35:04 +0100 Subject: [PATCH 08/80] Feature #2568: Refactor RBD2 format contribution by Bill Campbell: - Use BRIDGE_LIST instead of HOST - Minimize the number of SSH connections - Use default STAGING_DIR - Add possibility to choose the RBD_FORMAT - Add possibility to add "-O rbd" to the "rbd" command line args --- src/datastore_mad/remotes/ceph/ceph.conf | 15 ++++-- src/datastore_mad/remotes/ceph/clone | 11 ++++- src/datastore_mad/remotes/ceph/cp | 58 ++++++++++++------------ src/datastore_mad/remotes/ceph/mkfs | 13 ++++-- src/datastore_mad/remotes/ceph/monitor | 15 ++++-- src/datastore_mad/remotes/ceph/rm | 15 ++++-- src/tm_mad/ceph/clone | 19 ++++---- src/tm_mad/ceph/delete | 15 ++++-- src/tm_mad/ceph/mvds | 19 ++++---- 9 files changed, 111 insertions(+), 69 deletions(-) diff --git a/src/datastore_mad/remotes/ceph/ceph.conf b/src/datastore_mad/remotes/ceph/ceph.conf index 7af0151c81..54aff4729a 100644 --- a/src/datastore_mad/remotes/ceph/ceph.conf +++ b/src/datastore_mad/remotes/ceph/ceph.conf @@ -15,13 +15,18 @@ #--------------------------------------------------------------------------- # # Default POOL_NAME -POOL_NAME=rbd - -# Default Ceph server host. Storage operations will be performed in this host. -HOST=localhost +POOL_NAME=one # Staging directory # A directory in the Ceph server host where image will be transferred to # temporarily during the create/mkfs processes. This directoy MUST exist, # have enough space and be writeable by 'oneadmin' -STAGING_DIR=/var/tmp/one +STAGING_DIR=/var/tmp + +# Default RBD_FORMAT +RBD_FORMAT=2 + +# Extra arguments send to "qemu-img convert". Depending on the qemu-img version +# it using "-O rbd" can be either recommended or may cause segfaults. Uncomment +# the following line to add "-O rbd" to the qemu-img convert command +# QEMU_IMG_CONVERT_ARGS="-O rbd" diff --git a/src/datastore_mad/remotes/ceph/clone b/src/datastore_mad/remotes/ceph/clone index b32ac9f42b..315711fb1e 100755 --- a/src/datastore_mad/remotes/ceph/clone +++ b/src/datastore_mad/remotes/ceph/clone @@ -45,7 +45,7 @@ unset i XPATH_ELEMENTS while IFS= read -r -d '' element; do XPATH_ELEMENTS[i++]="$element" done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \ - /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/HOST \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/BRIDGE_LIST \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/POOL_NAME \ /DS_DRIVER_ACTION_DATA/IMAGE/PATH \ /DS_DRIVER_ACTION_DATA/IMAGE/SIZE) @@ -53,11 +53,18 @@ done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \ unset i BASE_PATH="${XPATH_ELEMENTS[i++]}" -DST_HOST="${XPATH_ELEMENTS[i++]:-$HOST}" +BRIDGE_LIST="${XPATH_ELEMENTS[i++]}" POOL_NAME="${XPATH_ELEMENTS[i++]:-$POOL_NAME}" SRC="${XPATH_ELEMENTS[i++]}" SIZE="${XPATH_ELEMENTS[i++]}" +DST_HOST=`get_destination_host $ID` + +if [ -z "$DST_HOST" ]; then + error_message "Datastore template missing 'BRIDGE_LIST' attribute." + exit -1 +fi + SAFE_DIRS="" IMAGE_NAME="one-${ID}" diff --git a/src/datastore_mad/remotes/ceph/cp b/src/datastore_mad/remotes/ceph/cp index 04b29fda9c..505ce2389d 100755 --- a/src/datastore_mad/remotes/ceph/cp +++ b/src/datastore_mad/remotes/ceph/cp @@ -51,9 +51,10 @@ while IFS= read -r -d '' element; do done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/RESTRICTED_DIRS \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/SAFE_DIRS \ - /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/HOST \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/BRIDGE_LIST \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/POOL_NAME \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/STAGING_DIR \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/RBD_FORMAT \ /DS_DRIVER_ACTION_DATA/IMAGE/PATH \ /DS_DRIVER_ACTION_DATA/IMAGE/SIZE \ /DS_DRIVER_ACTION_DATA/IMAGE/TEMPLATE/MD5 \ @@ -66,9 +67,10 @@ unset i BASE_PATH="${XPATH_ELEMENTS[i++]}" RESTRICTED_DIRS="${XPATH_ELEMENTS[i++]}" SAFE_DIRS="${XPATH_ELEMENTS[i++]}" -DST_HOST="${XPATH_ELEMENTS[i++]:-$HOST}" +BRIDGE_LIST="${XPATH_ELEMENTS[i++]}" POOL_NAME="${XPATH_ELEMENTS[i++]:-$POOL_NAME}" STAGING_DIR="${XPATH_ELEMENTS[i++]:-$STAGING_DIR}" +RBD_FORMAT="${XPATH_ELEMENTS[i++]:-$RBD_FORMAT}" SRC="${XPATH_ELEMENTS[i++]}" SIZE="${XPATH_ELEMENTS[i++]}" MD5="${XPATH_ELEMENTS[i++]}" @@ -76,6 +78,13 @@ SHA1="${XPATH_ELEMENTS[i++]}" NO_DECOMPRESS="${XPATH_ELEMENTS[i++]}" LIMIT_TRANSFER_BW="${XPATH_ELEMENTS[i++]}" +DST_HOST=`get_destination_host $ID` + +if [ -z "$DST_HOST" ]; then + error_message "Datastore template missing 'BRIDGE_LIST' attribute." + exit -1 +fi + set_up_datastore "$BASE_PATH" "$RESTRICTED_DIRS" "$SAFE_DIRS" IMAGE_HASH=`generate_image_hash` @@ -111,37 +120,28 @@ esac exec_and_log "eval $DUMP | $SSH $DST_HOST $DD of=$TMP_DST bs=64k" \ "Error dumping $SRC to $DST_HOST:$TMP_DST" -# Determine if source file is a TAR archive and process accordingly -TARFILE=$(ssh $DST_HOST file -b --mime-type $TMP_DST) +REGISTER_CMD=$(cat < Date: Fri, 7 Feb 2014 11:41:57 +0100 Subject: [PATCH 09/80] Reapply fixes for Bug #2110 and Bug #2271 --- src/datastore_mad/remotes/ceph/mkfs | 1 + src/datastore_mad/remotes/ceph/monitor | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/datastore_mad/remotes/ceph/mkfs b/src/datastore_mad/remotes/ceph/mkfs index b242c51561..a81f4a6c68 100755 --- a/src/datastore_mad/remotes/ceph/mkfs +++ b/src/datastore_mad/remotes/ceph/mkfs @@ -94,6 +94,7 @@ MKFS_CMD=`mkfs_command $TMP_DST $FSTYPE $SIZE` REGISTER_CMD=$(cat < Date: Fri, 7 Feb 2014 12:09:05 +0100 Subject: [PATCH 10/80] Feature #2568: RBD format 1 as default. Handle properly both formats --- src/datastore_mad/remotes/ceph/ceph.conf | 7 +++++-- src/tm_mad/ceph/clone | 10 +++++++--- src/tm_mad/ceph/delete | 7 +++++-- src/tm_mad/ceph/mvds | 9 ++++++--- 4 files changed, 23 insertions(+), 10 deletions(-) diff --git a/src/datastore_mad/remotes/ceph/ceph.conf b/src/datastore_mad/remotes/ceph/ceph.conf index 54aff4729a..b3885c2bd5 100644 --- a/src/datastore_mad/remotes/ceph/ceph.conf +++ b/src/datastore_mad/remotes/ceph/ceph.conf @@ -23,8 +23,11 @@ POOL_NAME=one # have enough space and be writeable by 'oneadmin' STAGING_DIR=/var/tmp -# Default RBD_FORMAT -RBD_FORMAT=2 +# Default RBD_FORMAT. By default RBD format 1 will be used. Uncomment the +# following options to enable support for RBD 2. This value affects all the ceph +# datastores, however it can be enabled per ceph datastore using the same +# option in the datastore template +# RBD_FORMAT=2 # Extra arguments send to "qemu-img convert". Depending on the qemu-img version # it using "-O rbd" can be either recommended or may cause segfaults. Uncomment diff --git a/src/tm_mad/ceph/clone b/src/tm_mad/ceph/clone index bbe1386ae0..f7632c9af2 100755 --- a/src/tm_mad/ceph/clone +++ b/src/tm_mad/ceph/clone @@ -56,9 +56,13 @@ RBD_SNAP="${VM_ID}-${DISK_ID}" CLONE_CMD=$(cat < Date: Fri, 7 Feb 2014 12:14:40 +0100 Subject: [PATCH 11/80] Feature #2556: Create image without filesystem on ceph datastore --- src/datastore_mad/remotes/ceph/mkfs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/datastore_mad/remotes/ceph/mkfs b/src/datastore_mad/remotes/ceph/mkfs index a81f4a6c68..216a9677cd 100755 --- a/src/datastore_mad/remotes/ceph/mkfs +++ b/src/datastore_mad/remotes/ceph/mkfs @@ -88,6 +88,9 @@ if [ "$FSTYPE" = "save_as" ]; then exit 0 fi +if [ "$FSTYPE" = "raw" ]; then + REGISTER_CMD="$QEMU_IMG create rbd:$RBD_SOURCE ${SIZE}M" +else # ------------ Create the image in the repository ------------ MKFS_CMD=`mkfs_command $TMP_DST $FSTYPE $SIZE` @@ -107,6 +110,7 @@ REGISTER_CMD=$(cat < Date: Fri, 7 Feb 2014 12:17:00 +0100 Subject: [PATCH 12/80] Feature #2556: Refactor Vladislav Gorbunov's contribution --- src/datastore_mad/remotes/ceph/mkfs | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/datastore_mad/remotes/ceph/mkfs b/src/datastore_mad/remotes/ceph/mkfs index 216a9677cd..4556884ca7 100755 --- a/src/datastore_mad/remotes/ceph/mkfs +++ b/src/datastore_mad/remotes/ceph/mkfs @@ -88,9 +88,6 @@ if [ "$FSTYPE" = "save_as" ]; then exit 0 fi -if [ "$FSTYPE" = "raw" ]; then - REGISTER_CMD="$QEMU_IMG create rbd:$RBD_SOURCE ${SIZE}M" -else # ------------ Create the image in the repository ------------ MKFS_CMD=`mkfs_command $TMP_DST $FSTYPE $SIZE` @@ -99,18 +96,21 @@ REGISTER_CMD=$(cat < Date: Fri, 7 Feb 2014 12:39:48 +0100 Subject: [PATCH 13/80] Feature #2568: Fetch RBD format attribute from the VM in the TM scripts --- share/etc/oned.conf | 1 + src/tm_mad/ceph/clone | 15 +++++++++++++++ src/tm_mad/ceph/delete | 4 +++- src/tm_mad/ceph/mvds | 4 +++- 4 files changed, 22 insertions(+), 2 deletions(-) diff --git a/share/etc/oned.conf b/share/etc/oned.conf index 18eb97706f..28abcbff7f 100644 --- a/share/etc/oned.conf +++ b/share/etc/oned.conf @@ -734,5 +734,6 @@ IMAGE_RESTRICTED_ATTR = "SOURCE" INHERIT_DATASTORE_ATTR = "CEPH_HOST" INHERIT_DATASTORE_ATTR = "CEPH_SECRET" INHERIT_DATASTORE_ATTR = "CEPH_USER" +INHERIT_DATASTORE_ATTR = "RBD_FORMAT" INHERIT_VNET_ATTR = "VLAN_TAGGED_ID" diff --git a/src/tm_mad/ceph/clone b/src/tm_mad/ceph/clone index f7632c9af2..4b819a50fd 100755 --- a/src/tm_mad/ceph/clone +++ b/src/tm_mad/ceph/clone @@ -49,6 +49,21 @@ DISK_ID=$(echo $DST|awk -F. '{print $NF}') RBD_DST="${SRC_PATH}-${VM_ID}-${DISK_ID}" RBD_SNAP="${VM_ID}-${DISK_ID}" +#------------------------------------------------------------------------------- +# Get Image information +#------------------------------------------------------------------------------- + +XPATH="${DRIVER_PATH}/../../datastore/xpath.rb --stdin" + +unset i j XPATH_ELEMENTS + +while IFS= read -r -d '' element; do + XPATH_ELEMENTS[i++]="$element" +done < <(onevm show -x $VM_ID| $XPATH \ + /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/RBD_FORMAT) + +RBD_FORMAT="${XPATH_ELEMENTS[j++]}" + #------------------------------------------------------------------------------- # Clone the image #------------------------------------------------------------------------------- diff --git a/src/tm_mad/ceph/delete b/src/tm_mad/ceph/delete index 4c2833fe5b..f8cbf8333c 100755 --- a/src/tm_mad/ceph/delete +++ b/src/tm_mad/ceph/delete @@ -67,10 +67,12 @@ while IFS= read -r -d '' element; do XPATH_ELEMENTS[i++]="$element" done < <(onevm show -x $VM_ID| $XPATH \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/SOURCE \ - /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/PERSISTENT) + /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/PERSISTENT \ + /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/RBD_FORMAT) SRC="${XPATH_ELEMENTS[j++]}" PERSISTENT="${XPATH_ELEMENTS[j++]}" +RBD_FORMAT="${XPATH_ELEMENTS[j++]}" # Exit if persistent [ -n "$PERSISTENT" ] && exit 0 diff --git a/src/tm_mad/ceph/mvds b/src/tm_mad/ceph/mvds index 7ed60d6b45..b2648df00b 100755 --- a/src/tm_mad/ceph/mvds +++ b/src/tm_mad/ceph/mvds @@ -59,10 +59,12 @@ while IFS= read -r -d '' element; do XPATH_ELEMENTS[i++]="$element" done < <(onevm show -x $VM_ID| $XPATH \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/SOURCE \ - /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/PERSISTENT) + /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/PERSISTENT \ + /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/RBD_FORMAT) RBD_SRC="${XPATH_ELEMENTS[j++]}" PERSISTENT="${XPATH_ELEMENTS[j++]}" +RBD_FORMAT="${XPATH_ELEMENTS[j++]}" # Exit if persistent [ -n "$PERSISTENT" ] && exit 0 From 965ec46234729a8b6a0fc4becc50c79149dc937b Mon Sep 17 00:00:00 2001 From: Michael Kutzner Date: Fri, 7 Feb 2014 16:37:40 +0100 Subject: [PATCH 14/80] feature #2567: add KVM hyperv feature Signed-off-by: Javi Fontan --- src/sunstone/public/js/plugins/templates-tab.js | 15 +++++++++++++++ src/vmm/LibVirtDriverKVM.cc | 15 ++++++++++++++- src/vmm_mad/exec/vmm_exec_kvm.conf | 2 +- 3 files changed, 30 insertions(+), 2 deletions(-) diff --git a/src/sunstone/public/js/plugins/templates-tab.js b/src/sunstone/public/js/plugins/templates-tab.js index cfd8296740..b42fe88b5b 100644 --- a/src/sunstone/public/js/plugins/templates-tab.js +++ b/src/sunstone/public/js/plugins/templates-tab.js @@ -2655,6 +2655,21 @@ function add_osTab(dialog) { '
'+tr("Add support in the VM for Physical Address Extension (PAE)")+'
'+ ''+ ''+ + '
'+ + '
'+ + ''+ + '
'+ + '
'+ + ''+ + '
'+ + '
'+ + '
'+tr("Add support in the VM for hyper-v features (HYPERV)")+'
'+ + '
'+ + '
'+ ''+ '
'+ '
'+ diff --git a/src/vmm/LibVirtDriverKVM.cc b/src/vmm/LibVirtDriverKVM.cc index d404e8e8dd..c8083b22f8 100644 --- a/src/vmm/LibVirtDriverKVM.cc +++ b/src/vmm/LibVirtDriverKVM.cc @@ -98,10 +98,12 @@ int LibVirtDriver::deployment_description_kvm( bool pae = false; bool acpi = false; bool apic = false; + bool hyperv = false; int pae_found = -1; int acpi_found = -1; int apic_found = -1; + int hyperv_found = -1; const VectorAttribute * raw; string default_raw; @@ -737,6 +739,7 @@ int LibVirtDriver::deployment_description_kvm( pae_found = features->vector_value("PAE", pae); acpi_found = features->vector_value("ACPI", acpi); apic_found = features->vector_value("APIC", apic); + hyperv_found = features->vector_value("HYPERV", hyperv); } } @@ -755,7 +758,12 @@ int LibVirtDriver::deployment_description_kvm( get_default("FEATURES", "APIC", apic); } - if( acpi || pae || apic ) + if ( hyperv_found != 0 ) + { + get_default("FEATURES", "HYPERV", hyperv); + } + + if( acpi || pae || apic || hyperv ) { file << "\t" << endl; @@ -774,6 +782,11 @@ int LibVirtDriver::deployment_description_kvm( file << "\t\t" << endl; } + if ( hyperv ) + { + file << "\t\t\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t" << endl; + } + file << "\t" << endl; } diff --git a/src/vmm_mad/exec/vmm_exec_kvm.conf b/src/vmm_mad/exec/vmm_exec_kvm.conf index c3c1961498..dc762a2e9c 100644 --- a/src/vmm_mad/exec/vmm_exec_kvm.conf +++ b/src/vmm_mad/exec/vmm_exec_kvm.conf @@ -31,7 +31,7 @@ #VCPU = 1 OS = [ boot = "hd", arch = "i686" ] -FEATURES = [ PAE = "no", ACPI = "yes", APIC = "no" ] +FEATURES = [ PAE = "no", ACPI = "yes", APIC = "no", HYPERV="no" ] DISK = [ driver = "raw" , cache = "none"] From 1c5111e0ed94c89f678c3671b554b2bea41294b4 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Fri, 7 Feb 2014 17:26:43 +0100 Subject: [PATCH 15/80] feature #2567: move hyperv options to kvm config file --- src/vmm/LibVirtDriverKVM.cc | 30 ++++++++++++++++++------------ src/vmm_mad/exec/vmm_exec_kvm.conf | 7 +++++-- 2 files changed, 23 insertions(+), 14 deletions(-) diff --git a/src/vmm/LibVirtDriverKVM.cc b/src/vmm/LibVirtDriverKVM.cc index c8083b22f8..6f5c476e92 100644 --- a/src/vmm/LibVirtDriverKVM.cc +++ b/src/vmm/LibVirtDriverKVM.cc @@ -95,15 +95,17 @@ int LibVirtDriver::deployment_description_kvm( const VectorAttribute * features; - bool pae = false; - bool acpi = false; - bool apic = false; + bool pae = false; + bool acpi = false; + bool apic = false; bool hyperv = false; - int pae_found = -1; - int acpi_found = -1; - int apic_found = -1; - int hyperv_found = -1; + int pae_found = -1; + int acpi_found = -1; + int apic_found = -1; + int hyperv_found = -1; + + string hyperv_options = ""; const VectorAttribute * raw; string default_raw; @@ -736,10 +738,10 @@ int LibVirtDriver::deployment_description_kvm( if ( features != 0 ) { - pae_found = features->vector_value("PAE", pae); - acpi_found = features->vector_value("ACPI", acpi); - apic_found = features->vector_value("APIC", apic); - hyperv_found = features->vector_value("HYPERV", hyperv); + pae_found = features->vector_value("PAE", pae); + acpi_found = features->vector_value("ACPI", acpi); + apic_found = features->vector_value("APIC", apic); + hyperv_found= features->vector_value("HYPERV", hyperv); } } @@ -784,7 +786,11 @@ int LibVirtDriver::deployment_description_kvm( if ( hyperv ) { - file << "\t\t\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t" << endl; + get_default("HYPERV_OPTIONS", hyperv_options); + + file << "\t\t" << endl; + file << hyperv_options << endl; + file << "\t\t" << endl; } file << "\t" << endl; diff --git a/src/vmm_mad/exec/vmm_exec_kvm.conf b/src/vmm_mad/exec/vmm_exec_kvm.conf index dc762a2e9c..e78ab9957e 100644 --- a/src/vmm_mad/exec/vmm_exec_kvm.conf +++ b/src/vmm_mad/exec/vmm_exec_kvm.conf @@ -20,10 +20,11 @@ # - emulator # - os [kernel,initrd,boot,root,kernel_cmd,arch,machine] # - vcpu -# - features [acpi, pae, apic] +# - features [acpi, pae, apic, hyperv] # - disk [driver, cache, io] # - nic [filter, model] # - raw +# - hyperv_options: options used for FEATURES = [ HYPERV = yes ] # NOTE: raw attribute value is appended to that on the VM template #EMULATOR = /usr/libexec/qemu-kvm @@ -31,9 +32,11 @@ #VCPU = 1 OS = [ boot = "hd", arch = "i686" ] -FEATURES = [ PAE = "no", ACPI = "yes", APIC = "no", HYPERV="no" ] +FEATURES = [ PAE = "no", ACPI = "yes", APIC = "no", HYPERV = "no" ] DISK = [ driver = "raw" , cache = "none"] #NIC = [ filter = "clean-traffic", model="virtio" ] #RAW = [ type = "kvm", data = "" ] + +HYPERV_OPTIONS="" From f9c6ac386e63543e6987c57e1bcf431c24544c30 Mon Sep 17 00:00:00 2001 From: Vladislav Gorbunov Date: Fri, 7 Feb 2014 18:58:11 +0100 Subject: [PATCH 16/80] feature #2547: add localtime feature to kvm Signed-off-by: Javi Fontan --- src/vmm/LibVirtDriverKVM.cc | 31 +++++++++++++++++++++--------- src/vmm_mad/exec/vmm_exec_kvm.conf | 2 +- 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/src/vmm/LibVirtDriverKVM.cc b/src/vmm/LibVirtDriverKVM.cc index 6f5c476e92..587462147c 100644 --- a/src/vmm/LibVirtDriverKVM.cc +++ b/src/vmm/LibVirtDriverKVM.cc @@ -95,15 +95,17 @@ int LibVirtDriver::deployment_description_kvm( const VectorAttribute * features; - bool pae = false; - bool acpi = false; - bool apic = false; - bool hyperv = false; + bool pae = false; + bool acpi = false; + bool apic = false; + bool hyperv = false; + bool localtime = false; int pae_found = -1; int acpi_found = -1; int apic_found = -1; int hyperv_found = -1; + int localtime_found = -1; string hyperv_options = ""; @@ -738,10 +740,11 @@ int LibVirtDriver::deployment_description_kvm( if ( features != 0 ) { - pae_found = features->vector_value("PAE", pae); - acpi_found = features->vector_value("ACPI", acpi); - apic_found = features->vector_value("APIC", apic); - hyperv_found= features->vector_value("HYPERV", hyperv); + pae_found = features->vector_value("PAE", pae); + acpi_found = features->vector_value("ACPI", acpi); + apic_found = features->vector_value("APIC", apic); + hyperv_found = features->vector_value("HYPERV", hyperv); + localtime_found = features->vector_value("LOCALTIME", localtime); } } @@ -765,7 +768,12 @@ int LibVirtDriver::deployment_description_kvm( get_default("FEATURES", "HYPERV", hyperv); } - if( acpi || pae || apic || hyperv ) + if ( localtime_found != 0 ) + { + get_default("FEATURES", "LOCALTIME", localtime); + } + + if ( acpi || pae || apic || hyperv ) { file << "\t" << endl; @@ -796,6 +804,11 @@ int LibVirtDriver::deployment_description_kvm( file << "\t" << endl; } + if ( localtime ) + { + file << "\t" << endl; + } + attrs.clear(); // ------------------------------------------------------------------------ diff --git a/src/vmm_mad/exec/vmm_exec_kvm.conf b/src/vmm_mad/exec/vmm_exec_kvm.conf index e78ab9957e..1a6f4e9e3c 100644 --- a/src/vmm_mad/exec/vmm_exec_kvm.conf +++ b/src/vmm_mad/exec/vmm_exec_kvm.conf @@ -20,7 +20,7 @@ # - emulator # - os [kernel,initrd,boot,root,kernel_cmd,arch,machine] # - vcpu -# - features [acpi, pae, apic, hyperv] +# - features [acpi, pae, apic, hyperv, localtime] # - disk [driver, cache, io] # - nic [filter, model] # - raw From e95fa8fb7eb2b7e1d5e49da146ecb5eb1589f55b Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Mon, 10 Feb 2014 16:56:04 +0100 Subject: [PATCH 17/80] feature #2485: add extra spice options to KVM --- src/vmm/LibVirtDriverKVM.cc | 19 +++++++++++++++---- src/vmm_mad/exec/vmm_exec_kvm.conf | 13 +++++++++++++ 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/src/vmm/LibVirtDriverKVM.cc b/src/vmm/LibVirtDriverKVM.cc index 587462147c..d3821d180d 100644 --- a/src/vmm/LibVirtDriverKVM.cc +++ b/src/vmm/LibVirtDriverKVM.cc @@ -86,10 +86,11 @@ int LibVirtDriver::deployment_description_kvm( const VectorAttribute * graphics; - string listen = ""; - string port = ""; - string passwd = ""; - string keymap = ""; + string listen = ""; + string port = ""; + string passwd = ""; + string keymap = ""; + string spice_options = ""; const VectorAttribute * input; @@ -686,6 +687,16 @@ int LibVirtDriver::deployment_description_kvm( } file << "/>" << endl; + + if ( type == "spice" ) + { + get_default("SPICE_OPTIONS", spice_options); + + if ( spice_options != "" ) + { + file << "\t\t" << spice_options << endl; + } + } } else { diff --git a/src/vmm_mad/exec/vmm_exec_kvm.conf b/src/vmm_mad/exec/vmm_exec_kvm.conf index 1a6f4e9e3c..8e068e40f7 100644 --- a/src/vmm_mad/exec/vmm_exec_kvm.conf +++ b/src/vmm_mad/exec/vmm_exec_kvm.conf @@ -40,3 +40,16 @@ DISK = [ driver = "raw" , cache = "none"] #RAW = [ type = "kvm", data = "" ] HYPERV_OPTIONS="" + +SPICE_OPTIONS=" + + + + + + + + " + From 3004e3c0554430e079dbc5e0a678aacb968c47ec Mon Sep 17 00:00:00 2001 From: Jaime Melis Date: Mon, 17 Feb 2014 15:47:27 +0100 Subject: [PATCH 18/80] Bug #2645: A VM disk detach leaves Images to be saved-as in LOCKED state --- include/LifeCycleManager.h | 2 +- src/lcm/LifeCycleManager.cc | 2 +- src/lcm/LifeCycleStates.cc | 15 +++++++++++++-- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/include/LifeCycleManager.h b/include/LifeCycleManager.h index d71eeab413..40999420de 100644 --- a/include/LifeCycleManager.h +++ b/include/LifeCycleManager.h @@ -207,7 +207,7 @@ private: void attach_success_action(int vid); - void attach_failure_action(int vid); + void attach_failure_action(int vid, bool release_save_as); void detach_success_action(int vid); diff --git a/src/lcm/LifeCycleManager.cc b/src/lcm/LifeCycleManager.cc index 1e4137ad4e..62f79d37b1 100644 --- a/src/lcm/LifeCycleManager.cc +++ b/src/lcm/LifeCycleManager.cc @@ -356,7 +356,7 @@ void LifeCycleManager::do_action(const string &action, void * arg) } else if (action == "ATTACH_FAILURE") { - attach_failure_action(vid); + attach_failure_action(vid, false); } else if (action == "DETACH_SUCCESS") { diff --git a/src/lcm/LifeCycleStates.cc b/src/lcm/LifeCycleStates.cc index a4a33fc995..49a296fad0 100644 --- a/src/lcm/LifeCycleStates.cc +++ b/src/lcm/LifeCycleStates.cc @@ -1340,7 +1340,7 @@ void LifeCycleManager::attach_success_action(int vid) /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -void LifeCycleManager::attach_failure_action(int vid) +void LifeCycleManager::attach_failure_action(int vid, bool release_save_as) { VirtualMachine * vm; VectorAttribute * disk; @@ -1385,6 +1385,17 @@ void LifeCycleManager::attach_failure_action(int vid) Quotas::quota_del(Quotas::IMAGE, uid, gid, &tmpl); imagem->release_image(oid, image_id, false); + + // Release non-persistent images in the detach event + if (release_save_as) + { + int save_as_id; + + if ( disk->vector_value("SAVE_AS", save_as_id) == 0 ) + { + imagem->release_image(oid, save_as_id, false); + } + } } else // Volatile disk { @@ -1408,7 +1419,7 @@ void LifeCycleManager::attach_failure_action(int vid) void LifeCycleManager::detach_success_action(int vid) { - attach_failure_action(vid); + attach_failure_action(vid, true); } /* -------------------------------------------------------------------------- */ From 859a85905d5c8950562d6dcb0c9814fd7c645f10 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Mon, 17 Feb 2014 16:25:46 +0100 Subject: [PATCH 19/80] feature #2547: add localtime feature to Xen --- src/vmm/XenDriver.cc | 28 +++++++++++++++++++++------- src/vmm_mad/exec/vmm_exec_xen4.conf | 2 +- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/src/vmm/XenDriver.cc b/src/vmm/XenDriver.cc index 35ff90b7ed..3d1828dcc7 100644 --- a/src/vmm/XenDriver.cc +++ b/src/vmm/XenDriver.cc @@ -88,15 +88,17 @@ int XenDriver::deployment_description( const VectorAttribute * features; - bool pae = false; - bool acpi = false; - bool apic = false; + bool pae = false; + bool acpi = false; + bool apic = false; string device_model = ""; + bool localtime = false; - int pae_found = -1; - int acpi_found = -1; - int apic_found = -1; - int device_model_found = -1; + int pae_found = -1; + int acpi_found = -1; + int apic_found = -1; + int device_model_found = -1; + int localtime_found = -1; const VectorAttribute * raw; string data; @@ -644,6 +646,8 @@ int XenDriver::deployment_description( pae_found = features->vector_value("PAE", pae); acpi_found = features->vector_value("ACPI", acpi); apic_found = features->vector_value("APIC", apic); + localtime_found = + features->vector_value("LOCALTIME", localtime); device_model = features->vector_value("DEVICE_MODEL"); if ( device_model != "" ) @@ -677,6 +681,11 @@ int XenDriver::deployment_description( } } + if ( localtime_found != 0 ) + { + get_default("FEATURES", "LOCALTIME", localtime); + } + if ( pae_found == 0) { file << "pae = " << on_off_string(pae) << endl; @@ -697,6 +706,11 @@ int XenDriver::deployment_description( file << "device_model = '" << device_model << "'" << endl; } + if ( localtime ) + { + file << "localtime = 'yes'" << endl; + } + attrs.clear(); } diff --git a/src/vmm_mad/exec/vmm_exec_xen4.conf b/src/vmm_mad/exec/vmm_exec_xen4.conf index 534612fa46..eabf2dbc30 100644 --- a/src/vmm_mad/exec/vmm_exec_xen4.conf +++ b/src/vmm_mad/exec/vmm_exec_xen4.conf @@ -20,7 +20,7 @@ # - credit # - os [kernel,initrd,root,kernel_cmd,hvm] # - vcpu -# - features [acpi, pae, apic, device_model] +# - features [acpi, pae, apic, device_model, localtime] # - disk[driver] # - nic[model] # - raw From 293e97b83171d1fbed30bd24dd12af6b04a5816d Mon Sep 17 00:00:00 2001 From: Lykle Voort Date: Mon, 17 Feb 2014 16:57:44 +0100 Subject: [PATCH 20/80] Feature #2732: added option http_proxy to oca ruby client --- src/oca/ruby/opennebula/client.rb | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/oca/ruby/opennebula/client.rb b/src/oca/ruby/opennebula/client.rb index 72e19c0274..d8e39f90f9 100644 --- a/src/oca/ruby/opennebula/client.rb +++ b/src/oca/ruby/opennebula/client.rb @@ -101,6 +101,8 @@ module OpenNebula # @param [Hash] options # @option params [Integer] :timeout connection timeout in seconds, # defaults to 30 + # @option params [String] :http_proxy HTTP proxy string used for + # connecting to the endpoint; defaults to no proxy # # @return [OpenNebula::Client] def initialize(secret=nil, endpoint=nil, options={}) @@ -130,7 +132,10 @@ module OpenNebula timeout=nil timeout=options[:timeout] if options[:timeout] - @server = XMLRPC::Client.new2(@one_endpoint, nil, timeout) + http_proxy=nil + http_proxy=options[:http_proxy] if options[:http_proxy] + + @server = XMLRPC::Client.new2(@one_endpoint, http_proxy, timeout) if defined?(OxStreamParser) @server.set_parser(OxStreamParser.new) From af0f1dda7da1f781dc189ce349d1fa42569de3e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Thu, 30 Jan 2014 19:35:09 +0100 Subject: [PATCH 21/80] Add migrator to 4.4.1 (cherry picked from commit b77d2b351e41767f72f5394bea7e04a0cdd5d30f) --- install.sh | 1 + src/onedb/4.4.0_to_4.4.1.rb | 29 +++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) create mode 100644 src/onedb/4.4.0_to_4.4.1.rb diff --git a/install.sh b/install.sh index 978cb48ca7..4d8adbf9f7 100755 --- a/install.sh +++ b/install.sh @@ -1069,6 +1069,7 @@ ONEDB_MIGRATOR_FILES="src/onedb/2.0_to_2.9.80.rb \ src/onedb/4.3.80_to_4.3.85.rb \ src/onedb/4.3.85_to_4.3.90.rb \ src/onedb/4.3.90_to_4.4.0.rb \ + src/onedb/4.4.0_to_4.4.1.rb \ src/onedb/fsck.rb \ src/onedb/import_slave.rb \ src/onedb/onedb.rb \ diff --git a/src/onedb/4.4.0_to_4.4.1.rb b/src/onedb/4.4.0_to_4.4.1.rb new file mode 100644 index 0000000000..781169753e --- /dev/null +++ b/src/onedb/4.4.0_to_4.4.1.rb @@ -0,0 +1,29 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +module Migrator + def db_version + "4.4.1" + end + + def one_version + "OpenNebula 4.4.1" + end + + def up + return true + end +end From 358a59d3a8f9035c60a9f07887fce4ffc812fdb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Mon, 17 Feb 2014 15:28:02 +0100 Subject: [PATCH 22/80] Feature #2586: Optimize onedb upgrade (cherry picked from commit 57099e26426ae63756f7c8cae91de7fe29ad1608) --- src/onedb/3.8.0_to_3.8.1.rb | 101 +++--- src/onedb/3.8.5_to_3.9.80.rb | 570 +++++++++++++++++++--------------- src/onedb/3.9.80_to_3.9.90.rb | 78 +++-- src/onedb/4.0.1_to_4.1.80.rb | 71 +++-- src/onedb/4.2.0_to_4.3.80.rb | 331 +++++++++++--------- src/onedb/onedb.rb | 18 ++ src/onedb/onedb_backend.rb | 14 + 7 files changed, 666 insertions(+), 517 deletions(-) diff --git a/src/onedb/3.8.0_to_3.8.1.rb b/src/onedb/3.8.0_to_3.8.1.rb index 293c537a60..371b47559d 100644 --- a/src/onedb/3.8.0_to_3.8.1.rb +++ b/src/onedb/3.8.0_to_3.8.1.rb @@ -15,6 +15,7 @@ #--------------------------------------------------------------------------- # require 'set' +require 'nokogiri' require "rexml/document" include REXML @@ -29,6 +30,8 @@ module Migrator def up + init_log_time() + ######################################################################## # Bug : Add VM IDs Collection to Hosts & Images ######################################################################## @@ -49,17 +52,17 @@ module Migrator counters[:image][row[:oid]] = Set.new end + log_time() # Aggregate information of the RUNNING vms @db.fetch("SELECT oid,body FROM vm_pool WHERE state<>6") do |row| - vm_doc = Document.new(row[:body]) - - state = vm_doc.root.get_text('STATE').to_s.to_i - lcm_state = vm_doc.root.get_text('LCM_STATE').to_s.to_i + vm_doc = Nokogiri::XML(row[:body]) + state = vm_doc.root.at_xpath('STATE').text.to_i + lcm_state = vm_doc.root.at_xpath('LCM_STATE').text.to_i # Images used by this VM - vm_doc.root.each_element("TEMPLATE/DISK/IMAGE_ID") do |e| + vm_doc.root.xpath("TEMPLATE/DISK/IMAGE_ID").each do |e| img_id = e.text.to_i if counters[:image][img_id].nil? @@ -78,7 +81,7 @@ module Migrator # Get hostid hid = -1 - vm_doc.root.each_element("HISTORY_RECORDS/HISTORY[last()]/HID") { |e| + vm_doc.root.xpath("HISTORY_RECORDS/HISTORY[last()]/HID").each { |e| hid = e.text.to_i } @@ -89,6 +92,8 @@ module Migrator end end + log_time() + ######################################################################## # Hosts # @@ -103,38 +108,41 @@ module Migrator "UNIQUE(name));" # Calculate the host's xml and write them to host_pool_new - @db[:host_pool].each do |row| - host_doc = Document.new(row[:body]) + @db.transaction do + @db[:host_pool].each do |row| + host_doc = Document.new(row[:body]) - hid = row[:oid] + hid = row[:oid] - rvms = counters[:host][hid][:rvms].size + rvms = counters[:host][hid][:rvms].size - # rewrite running_vms - host_doc.root.each_element("HOST_SHARE/RUNNING_VMS") {|e| - if e.text != rvms.to_s - warn("Host #{hid} RUNNING_VMS has #{e.text} \tis\t#{rvms}") - e.text = rvms + # rewrite running_vms + host_doc.root.each_element("HOST_SHARE/RUNNING_VMS") {|e| + if e.text != rvms.to_s + warn("Host #{hid} RUNNING_VMS has #{e.text} \tis\t#{rvms}") + e.text = rvms + end + } + + # re-do list of VM IDs + vms_new_elem = host_doc.root.add_element("VMS") + + counters[:host][hid][:rvms].each do |id| + vms_new_elem.add_element("ID").text = id.to_s end - } - # re-do list of VM IDs - vms_new_elem = host_doc.root.add_element("VMS") + row[:body] = host_doc.to_s - counters[:host][hid][:rvms].each do |id| - vms_new_elem.add_element("ID").text = id.to_s + # commit + @db[:host_pool_new].insert(row) end - - row[:body] = host_doc.to_s - - # commit - @db[:host_pool_new].insert(row) end # Rename table @db.run("DROP TABLE host_pool") @db.run("ALTER TABLE host_pool_new RENAME TO host_pool") + log_time() ######################################################################## # Image @@ -146,39 +154,42 @@ module Migrator # Create a new empty table where we will store the new calculated values @db.run "CREATE TABLE image_pool_new (oid INTEGER PRIMARY KEY, name VARCHAR(128), body TEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name,uid) );" - # Calculate the host's xml and write them to host_pool_new - @db[:image_pool].each do |row| - doc = Document.new(row[:body]) + @db.transaction do + @db[:image_pool].each do |row| + doc = Document.new(row[:body]) - oid = row[:oid] + oid = row[:oid] - rvms = counters[:image][oid].size + rvms = counters[:image][oid].size - # rewrite running_vms - doc.root.each_element("RUNNING_VMS") {|e| - if e.text != rvms.to_s - warn("Image #{oid} RUNNING_VMS has #{e.text} \tis\t#{rvms}") - e.text = rvms + # rewrite running_vms + doc.root.each_element("RUNNING_VMS") {|e| + if e.text != rvms.to_s + warn("Image #{oid} RUNNING_VMS has #{e.text} \tis\t#{rvms}") + e.text = rvms + end + } + + # re-do list of VM IDs + vms_new_elem = doc.root.add_element("VMS") + + counters[:image][oid].each do |id| + vms_new_elem.add_element("ID").text = id.to_s end - } - # re-do list of VM IDs - vms_new_elem = doc.root.add_element("VMS") + row[:body] = doc.to_s - counters[:image][oid].each do |id| - vms_new_elem.add_element("ID").text = id.to_s + # commit + @db[:image_pool_new].insert(row) end - - row[:body] = doc.to_s - - # commit - @db[:image_pool_new].insert(row) end # Rename table @db.run("DROP TABLE image_pool") @db.run("ALTER TABLE image_pool_new RENAME TO image_pool") + log_time() + return true end end diff --git a/src/onedb/3.8.5_to_3.9.80.rb b/src/onedb/3.8.5_to_3.9.80.rb index aa61860e88..6cb2b5ca8b 100644 --- a/src/onedb/3.8.5_to_3.9.80.rb +++ b/src/onedb/3.8.5_to_3.9.80.rb @@ -18,6 +18,8 @@ require 'set' require "rexml/document" include REXML +require 'nokogiri' + class String def red colorize(31) @@ -41,6 +43,8 @@ module Migrator def up + init_log_time() + ######################################################################## # Add Cloning Image ID collection to Images ######################################################################## @@ -71,6 +75,8 @@ module Migrator end end + log_time() + ######################################################################## # Image # @@ -80,38 +86,42 @@ module Migrator @db.run "CREATE TABLE image_pool_new (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name,uid) );" - @db[:image_pool].each do |row| - doc = Document.new(row[:body]) + @db.transaction do + @db[:image_pool].each do |row| + doc = Document.new(row[:body]) - oid = row[:oid] + oid = row[:oid] - n_cloning_ops = counters[:image][oid][:clones].size + n_cloning_ops = counters[:image][oid][:clones].size - # Rewrite number of clones - doc.root.each_element("CLONING_OPS") { |e| - if e.text != n_cloning_ops.to_s - warn("Image #{oid} CLONING_OPS has #{e.text} \tis\t#{n_cloning_ops}") - e.text = n_cloning_ops + # Rewrite number of clones + doc.root.each_element("CLONING_OPS") { |e| + if e.text != n_cloning_ops.to_s + warn("Image #{oid} CLONING_OPS has #{e.text} \tis\t#{n_cloning_ops}") + e.text = n_cloning_ops + end + } + + # re-do list of Images cloning this one + clones_new_elem = doc.root.add_element("CLONES") + + counters[:image][oid][:clones].each do |id| + clones_new_elem.add_element("ID").text = id.to_s end - } - # re-do list of Images cloning this one - clones_new_elem = doc.root.add_element("CLONES") + row[:body] = doc.to_s - counters[:image][oid][:clones].each do |id| - clones_new_elem.add_element("ID").text = id.to_s + # commit + @db[:image_pool_new].insert(row) end - - row[:body] = doc.to_s - - # commit - @db[:image_pool_new].insert(row) end # Rename table @db.run("DROP TABLE image_pool") @db.run("ALTER TABLE image_pool_new RENAME TO image_pool") + log_time() + ######################################################################## # Feature #1565: New cid column in host, ds and vnet tables ######################################################################## @@ -119,27 +129,31 @@ module Migrator @db.run "ALTER TABLE host_pool RENAME TO old_host_pool;" @db.run "CREATE TABLE host_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, state INTEGER, last_mon_time INTEGER, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));" - @db.fetch("SELECT * FROM old_host_pool") do |row| - doc = Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT * FROM old_host_pool") do |row| + doc = Document.new(row[:body]) - cluster_id = doc.root.get_text('CLUSTER_ID').to_s + cluster_id = doc.root.get_text('CLUSTER_ID').to_s - @db[:host_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => row[:body], - :state => row[:state], - :last_mon_time => row[:last_mon_time], - :uid => row[:uid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u], - :cid => cluster_id) + @db[:host_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => row[:body], + :state => row[:state], + :last_mon_time => row[:last_mon_time], + :uid => row[:uid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u], + :cid => cluster_id) + end end @db.run "DROP TABLE old_host_pool;" + log_time() + ######################################################################## # Feature #1565: New cid column # Feature #471: IPv6 addresses @@ -148,28 +162,32 @@ module Migrator @db.run "ALTER TABLE network_pool RENAME TO old_network_pool;" @db.run "CREATE TABLE network_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name,uid));" - @db.fetch("SELECT * FROM old_network_pool") do |row| - doc = Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT * FROM old_network_pool") do |row| + doc = Document.new(row[:body]) - cluster_id = doc.root.get_text('CLUSTER_ID').to_s + cluster_id = doc.root.get_text('CLUSTER_ID').to_s - doc.root.add_element("GLOBAL_PREFIX") - doc.root.add_element("SITE_PREFIX") + doc.root.add_element("GLOBAL_PREFIX") + doc.root.add_element("SITE_PREFIX") - @db[:network_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:uid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u], - :cid => cluster_id) + @db[:network_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:uid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u], + :cid => cluster_id) + end end @db.run "DROP TABLE old_network_pool;" + log_time() + ######################################################################## # Feature #1617 # New datastore, 2 "files" @@ -181,40 +199,43 @@ module Migrator @db.run "ALTER TABLE datastore_pool RENAME TO old_datastore_pool;" @db.run "CREATE TABLE datastore_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));" - @db.fetch("SELECT * FROM old_datastore_pool") do |row| - doc = Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT * FROM old_datastore_pool") do |row| + doc = Document.new(row[:body]) - type = "0" # IMAGE_DS + type = "0" # IMAGE_DS - system_elem = doc.root.delete_element("SYSTEM") + system_elem = doc.root.delete_element("SYSTEM") - if ( !system_elem.nil? && system_elem.text == "1" ) - type = "1" # SYSTEM_DS + if ( !system_elem.nil? && system_elem.text == "1" ) + type = "1" # SYSTEM_DS + end + + doc.root.add_element("TYPE").text = type + + doc.root.each_element("TEMPLATE") do |e| + e.delete_element("SYSTEM") + e.add_element("TYPE").text = type == "0" ? "IMAGE_DS" : "SYSTEM_DS" + end + + cluster_id = doc.root.get_text('CLUSTER_ID').to_s + + @db[:datastore_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:uid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u], + :cid => cluster_id) end - - doc.root.add_element("TYPE").text = type - - doc.root.each_element("TEMPLATE") do |e| - e.delete_element("SYSTEM") - e.add_element("TYPE").text = type == "0" ? "IMAGE_DS" : "SYSTEM_DS" - end - - cluster_id = doc.root.get_text('CLUSTER_ID').to_s - - @db[:datastore_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:uid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u], - :cid => cluster_id) end @db.run "DROP TABLE old_datastore_pool;" + log_time() user_0_name = "oneadmin" @@ -241,6 +262,7 @@ module Migrator @db.run "INSERT INTO datastore_pool VALUES(2,'files','200#{user_0_name}#{group_0_name}files110100100fsssh#{base_path}20-1',0,0,1,1,1,-1);" + log_time() ######################################################################## # Feature #1611: Default quotas @@ -254,73 +276,79 @@ module Migrator @db.run "ALTER TABLE user_pool RENAME TO old_user_pool;" @db.run "CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" - # oneadmin does not have quotas - @db.fetch("SELECT * FROM old_user_pool WHERE oid=0") do |row| - @db[:user_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => row[:body], - :uid => row[:oid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) - end + @db.transaction do + # oneadmin does not have quotas + @db.fetch("SELECT * FROM old_user_pool WHERE oid=0") do |row| + @db[:user_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => row[:body], + :uid => row[:oid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end - @db.fetch("SELECT * FROM old_user_pool WHERE oid>0") do |row| - doc = Document.new(row[:body]) + @db.fetch("SELECT * FROM old_user_pool WHERE oid>0") do |row| + doc = Nokogiri::XML(row[:body]) - set_default_quotas(doc) + set_default_quotas(doc) - @db[:user_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:oid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) + @db[:user_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:oid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end end @db.run "DROP TABLE old_user_pool;" + log_time() @db.run "ALTER TABLE group_pool RENAME TO old_group_pool;" @db.run "CREATE TABLE group_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" + @db.transaction do + # oneadmin group does not have quotas + @db.fetch("SELECT * FROM old_group_pool WHERE oid=0") do |row| + @db[:group_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => row[:body], + :uid => row[:oid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end - # oneadmin group does not have quotas - @db.fetch("SELECT * FROM old_group_pool WHERE oid=0") do |row| - @db[:group_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => row[:body], - :uid => row[:oid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) - end + @db.fetch("SELECT * FROM old_group_pool WHERE oid>0") do |row| + doc = Nokogiri::XML(row[:body]) - @db.fetch("SELECT * FROM old_group_pool WHERE oid>0") do |row| - doc = Document.new(row[:body]) + set_default_quotas(doc) - set_default_quotas(doc) - - @db[:group_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:oid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) + @db[:group_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:oid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end end @db.run "DROP TABLE old_group_pool;" + log_time() + ######################################################################## # Bug #1694: SYSTEM_DS is now set with the method adddatastore ######################################################################## @@ -328,84 +356,87 @@ module Migrator @db.run "ALTER TABLE cluster_pool RENAME TO old_cluster_pool;" @db.run "CREATE TABLE cluster_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" - @db.fetch("SELECT * FROM old_cluster_pool") do |row| - doc = Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT * FROM old_cluster_pool") do |row| + doc = Document.new(row[:body]) - system_ds = 0 + system_ds = 0 - doc.root.each_element("TEMPLATE") do |e| - elem = e.delete_element("SYSTEM_DS") + doc.root.each_element("TEMPLATE") do |e| + elem = e.delete_element("SYSTEM_DS") - if !elem.nil? - system_ds = elem.text.to_i - end - end - - if system_ds != 0 - updated_body = nil - - @db.fetch("SELECT body FROM datastore_pool WHERE oid=#{system_ds}") do |ds_row| - ds_doc = Document.new(ds_row[:body]) - - type = "0" # IMAGE_DS - - ds_doc.root.each_element("TYPE") do |e| - type = e.text + if !elem.nil? + system_ds = elem.text.to_i end + end - if type != "1" - puts " > Cluster #{row[:oid]} has the "<< - "System Datastore set to Datastore #{system_ds}, "<< - "but its type is not SYSTEM_DS. The System Datastore "<< - "for this Cluster will be set to 0" + if system_ds != 0 + updated_body = nil - system_ds = 0 - else - cluster_id = "-1" + @db.fetch("SELECT body FROM datastore_pool WHERE oid=#{system_ds}") do |ds_row| + ds_doc = Document.new(ds_row[:body]) - ds_doc.root.each_element("CLUSTER_ID") do |e| - cluster_id = e.text + type = "0" # IMAGE_DS + + ds_doc.root.each_element("TYPE") do |e| + type = e.text end - if row[:oid] != cluster_id.to_i + if type != "1" puts " > Cluster #{row[:oid]} has the "<< "System Datastore set to Datastore #{system_ds}, "<< - "but it is not part of the Cluster. It will be added now." + "but its type is not SYSTEM_DS. The System Datastore "<< + "for this Cluster will be set to 0" + + system_ds = 0 + else + cluster_id = "-1" ds_doc.root.each_element("CLUSTER_ID") do |e| - e.text = row[:oid] + cluster_id = e.text end - ds_doc.root.each_element("CLUSTER") do |e| - e.text = row[:name] - end + if row[:oid] != cluster_id.to_i + puts " > Cluster #{row[:oid]} has the "<< + "System Datastore set to Datastore #{system_ds}, "<< + "but it is not part of the Cluster. It will be added now." - updated_body = ds_doc.root.to_s + ds_doc.root.each_element("CLUSTER_ID") do |e| + e.text = row[:oid] + end + + ds_doc.root.each_element("CLUSTER") do |e| + e.text = row[:name] + end + + updated_body = ds_doc.root.to_s + end end end + + if !updated_body.nil? + @db[:datastore_pool].where(:oid => system_ds).update( + :body => updated_body) + end end - if !updated_body.nil? - @db[:datastore_pool].where(:oid => system_ds).update( - :body => updated_body) - end + doc.root.add_element("SYSTEM_DS").text = system_ds.to_s + + @db[:cluster_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:uid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) end - - doc.root.add_element("SYSTEM_DS").text = system_ds.to_s - - @db[:cluster_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:uid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) end @db.run "DROP TABLE old_cluster_pool;" + log_time() ######################################################################## # Feature #1556: New elem USER_TEMPLATE @@ -418,41 +449,53 @@ module Migrator @db.run "ALTER TABLE vm_pool RENAME TO old_vm_pool;" @db.run "CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, last_poll INTEGER, state INTEGER, lcm_state INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);" - @db.fetch("SELECT * FROM old_vm_pool") do |row| + @db.transaction do + @db.fetch("SELECT * FROM old_vm_pool") do |row| - doc = Document.new(row[:body]) - user_template = doc.root.add_element("USER_TEMPLATE") + doc = Nokogiri::XML(row[:body]) + user_template = doc.create_element("USER_TEMPLATE") - doc.root.each_element("TEMPLATE") do |e| - elem = e.delete_element("REQUIREMENTS") + e = doc.root.at_xpath("TEMPLATE") + elem = e.at_xpath("REQUIREMENTS") if !elem.nil? - user_template.add_element("SCHED_REQUIREMENTS").text = elem.text + new_elem = doc.create_element("SCHED_REQUIREMENTS") + new_elem.content = elem.text + elem.remove + + user_template.add_child(new_elem) end - elem = e.delete_element("RANK") + elem = e.at_xpath("RANK") if !elem.nil? - user_template.add_element("SCHED_RANK").text = elem.text + new_elem = doc.create_element("SCHED_RANK") + new_elem.content = elem.text + elem.remove + + user_template.add_child(new_elem) end + + doc.root << user_template + + @db[:vm_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:uid], + :gid => row[:gid], + :last_poll => row[:last_poll], + :state => row[:state], + :lcm_state => row[:lcm_state], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) end - - @db[:vm_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:uid], - :gid => row[:gid], - :last_poll => row[:last_poll], - :state => row[:state], - :lcm_state => row[:lcm_state], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) end @db.run "DROP TABLE old_vm_pool;" + log_time() ######################################################################## # Feature #1483: Move scheduling attributes @@ -463,43 +506,49 @@ module Migrator @db.run "ALTER TABLE template_pool RENAME TO old_template_pool;" @db.run "CREATE TABLE template_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);" - @db.fetch("SELECT * FROM old_template_pool") do |row| + @db.transaction do + @db.fetch("SELECT * FROM old_template_pool") do |row| - doc = Document.new(row[:body]) + doc = Nokogiri::XML(row[:body]) - template = nil + template = doc.root.at_xpath("TEMPLATE") - doc.root.each_element("TEMPLATE") do |e| - template = e - end - - doc.root.each_element("TEMPLATE") do |e| - elem = e.delete_element("REQUIREMENTS") + elem = template.at_xpath("REQUIREMENTS") if !elem.nil? - template.add_element("SCHED_REQUIREMENTS").text = elem.text + new_elem = doc.create_element("SCHED_REQUIREMENTS") + new_elem.content = elem.text + elem.remove + + template.add_child(new_elem) end - elem = e.delete_element("RANK") + elem = template.at_xpath("RANK") if !elem.nil? - template.add_element("SCHED_RANK").text = elem.text - end - end + new_elem = doc.create_element("SCHED_RANK") + new_elem.content = elem.text + elem.remove - @db[:template_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:uid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) + template.add_child(new_elem) + end + + @db[:template_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:uid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end end @db.run "DROP TABLE old_template_pool;" + log_time() + ######################################################################## # Feature #1691 Add new attribute NIC/NIC_ID ######################################################################## @@ -507,38 +556,43 @@ module Migrator @db.run "ALTER TABLE vm_pool RENAME TO old_vm_pool;" @db.run "CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, last_poll INTEGER, state INTEGER, lcm_state INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);" - @db.fetch("SELECT * FROM old_vm_pool") do |row| - if ( row[:state] != 6 ) # DONE - doc = Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT * FROM old_vm_pool") do |row| + if ( row[:state] != 6 ) # DONE + doc = Nokogiri::XML(row[:body]) - nic_id = 0 + nic_id = 0 - doc.root.each_element("TEMPLATE/NIC") { |e| - e.delete_element("NIC_ID") - e.add_element("NIC_ID").text = (nic_id).to_s + doc.root.xpath("TEMPLATE/NIC").each { |e| + e.xpath("NIC_ID").each {|n| n.remove} + e.add_child(doc.create_element("NIC_ID")).content = + (nic_id).to_s - nic_id += 1 - } + nic_id += 1 + } - row[:body] = doc.root.to_s + row[:body] = doc.root.to_s + end + + @db[:vm_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => row[:body], + :uid => row[:uid], + :gid => row[:gid], + :last_poll => row[:last_poll], + :state => row[:state], + :lcm_state => row[:lcm_state], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) end - - @db[:vm_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => row[:body], - :uid => row[:uid], - :gid => row[:gid], - :last_poll => row[:last_poll], - :state => row[:state], - :lcm_state => row[:lcm_state], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) end @db.run "DROP TABLE old_vm_pool;" + log_time() + ######################################################################## # # Banner for the new /var/lib/one/vms directory @@ -563,38 +617,38 @@ module Migrator # VM quotas - doc.root.each_element("VM_QUOTA/VM/CPU") do |e| - e.text = "-1" if e.text.to_f == 0 + doc.root.xpath("VM_QUOTA/VM/CPU").each do |e| + e.content = "-1" if e.text.to_f == 0 end - doc.root.each_element("VM_QUOTA/VM/MEMORY") do |e| - e.text = "-1" if e.text.to_i == 0 + doc.root.xpath("VM_QUOTA/VM/MEMORY").each do |e| + e.content = "-1" if e.text.to_i == 0 end - doc.root.each_element("VM_QUOTA/VM/VMS") do |e| - e.text = "-1" if e.text.to_i == 0 + doc.root.xpath("VM_QUOTA/VM/VMS").each do |e| + e.content = "-1" if e.text.to_i == 0 end # VNet quotas - doc.root.each_element("NETWORK_QUOTA/NETWORK/LEASES") do |e| - e.text = "-1" if e.text.to_i == 0 + doc.root.xpath("NETWORK_QUOTA/NETWORK/LEASES").each do |e| + e.content = "-1" if e.text.to_i == 0 end # Image quotas - doc.root.each_element("IMAGE_QUOTA/IMAGE/RVMS") do |e| - e.text = "-1" if e.text.to_i == 0 + doc.root.xpath("IMAGE_QUOTA/IMAGE/RVMS").each do |e| + e.content = "-1" if e.text.to_i == 0 end # Datastore quotas - doc.root.each_element("DATASTORE_QUOTA/DATASTORE/IMAGES") do |e| - e.text = "-1" if e.text.to_i == 0 + doc.root.xpath("DATASTORE_QUOTA/DATASTORE/IMAGES").each do |e| + e.content = "-1" if e.text.to_i == 0 end - doc.root.each_element("DATASTORE_QUOTA/DATASTORE/SIZE") do |e| - e.text = "-1" if e.text.to_i == 0 + doc.root.xpath("DATASTORE_QUOTA/DATASTORE/SIZE").each do |e| + e.content = "-1" if e.text.to_i == 0 end end end diff --git a/src/onedb/3.9.80_to_3.9.90.rb b/src/onedb/3.9.80_to_3.9.90.rb index 2c11a48328..e51f471b7a 100644 --- a/src/onedb/3.9.80_to_3.9.90.rb +++ b/src/onedb/3.9.80_to_3.9.90.rb @@ -14,8 +14,7 @@ # limitations under the License. # #--------------------------------------------------------------------------- # -require "rexml/document" -include REXML +require "nokogiri" class String def red @@ -39,6 +38,7 @@ module Migrator end def up + init_log_time() ######################################################################## # Feature #1631: Add ACTION to history entries @@ -47,49 +47,57 @@ module Migrator @db.run "ALTER TABLE vm_pool RENAME TO old_vm_pool;" @db.run "CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, last_poll INTEGER, state INTEGER, lcm_state INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);" - @db.fetch("SELECT * FROM old_vm_pool") do |row| - doc = Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT * FROM old_vm_pool") do |row| + doc = Nokogiri::XML(row[:body]) - doc.root.each_element("HISTORY_RECORDS/HISTORY") do |e| - update_history(e) + doc.root.xpath("HISTORY_RECORDS/HISTORY").each do |e| + update_history(e) + end + + @db[:vm_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:uid], + :gid => row[:gid], + :last_poll => row[:last_poll], + :state => row[:state], + :lcm_state => row[:lcm_state], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) end - - @db[:vm_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:uid], - :gid => row[:gid], - :last_poll => row[:last_poll], - :state => row[:state], - :lcm_state => row[:lcm_state], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) end @db.run "DROP TABLE old_vm_pool;" + log_time() + @db.run "ALTER TABLE history RENAME TO old_history;" @db.run "CREATE TABLE history (vid INTEGER, seq INTEGER, body MEDIUMTEXT, stime INTEGER, etime INTEGER,PRIMARY KEY(vid,seq));" - @db.fetch("SELECT * FROM old_history") do |row| - doc = Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT * FROM old_history") do |row| + doc = Nokogiri::XML(row[:body]) - doc.root.each_element("/HISTORY") do |e| - update_history(e) + doc.root.xpath("/HISTORY").each do |e| + update_history(e) + end + + @db[:history].insert( + :vid => row[:vid], + :seq => row[:seq], + :body => doc.root.to_s, + :stime => row[:stime], + :etime => row[:etime]) end - - @db[:history].insert( - :vid => row[:vid], - :seq => row[:seq], - :body => doc.root.to_s, - :stime => row[:stime], - :etime => row[:etime]) end @db.run "DROP TABLE old_history;" + log_time() + ######################################################################## # Banner for drivers renamed ######################################################################## @@ -135,16 +143,18 @@ module Migrator end def update_history(history_elem) - history_elem.add_element("ACTION").text = "0" # NONE_ACTION + # NONE_ACTION + history_elem.add_child( + history_elem.document.create_element("ACTION")).content = "0" # History reason enum has changed from # NONE, ERROR, STOP_RESUME, USER, CANCEL to # NONE, ERROR, USER - history_elem.each_element("REASON") do |reason_e| + history_elem.xpath("REASON").each do |reason_e| reason = reason_e.text.to_i - if reason > 1 # STOP_RESUME, USER, CANCEL - reason_e.text = "2" # USER + if reason > 1 # STOP_RESUME, USER, CANCEL + reason_e.content = "2" # USER end end end diff --git a/src/onedb/4.0.1_to_4.1.80.rb b/src/onedb/4.0.1_to_4.1.80.rb index f44fe554cf..965d93ef92 100644 --- a/src/onedb/4.0.1_to_4.1.80.rb +++ b/src/onedb/4.0.1_to_4.1.80.rb @@ -15,9 +15,9 @@ #--------------------------------------------------------------------------- # require 'fileutils' -require 'rexml/document' require 'openssl' +require "nokogiri" module Migrator def db_version @@ -42,30 +42,35 @@ module Migrator puts "Please copy the files manually." end + init_log_time() + @db.run "ALTER TABLE user_pool RENAME TO old_user_pool;" @db.run "CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" - @db.fetch("SELECT * FROM old_user_pool") do |row| - doc = REXML::Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT * FROM old_user_pool") do |row| + doc = Nokogiri::XML(row[:body]) - doc.root.each_element("TEMPLATE") do |e| - e.add_element("TOKEN_PASSWORD").text = - OpenSSL::Digest::SHA1.hexdigest( rand().to_s ) + doc.root.at_xpath("TEMPLATE") + .add_child(doc.create_element("TOKEN_PASSWORD")) + .content = OpenSSL::Digest::SHA1.hexdigest( rand().to_s ) + + @db[:user_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:oid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) end - - @db[:user_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:oid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) end @db.run "DROP TABLE old_user_pool;" + log_time() + ######################################################################## # Feature #1613 ######################################################################## @@ -73,27 +78,31 @@ module Migrator @db.run "ALTER TABLE datastore_pool RENAME TO old_datastore_pool;" @db.run "CREATE TABLE datastore_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));" - @db.fetch("SELECT * FROM old_datastore_pool") do |row| - doc = REXML::Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT * FROM old_datastore_pool") do |row| + doc = Nokogiri::XML(row[:body]) - doc.root.add_element("TOTAL_MB").text = "0" - doc.root.add_element("FREE_MB").text = "0" - doc.root.add_element("USED_MB").text = "0" + doc.root.add_child(doc.create_element("TOTAL_MB")).content = "0" + doc.root.add_child(doc.create_element("FREE_MB")).content = "0" + doc.root.add_child(doc.create_element("USED_MB")).content = "0" - @db[:datastore_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:uid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u], - :cid => row[:cid]) + @db[:datastore_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:uid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u], + :cid => row[:cid]) + end end @db.run "DROP TABLE old_datastore_pool;" + log_time() + return true end end diff --git a/src/onedb/4.2.0_to_4.3.80.rb b/src/onedb/4.2.0_to_4.3.80.rb index 96ffc65e27..be408b512c 100644 --- a/src/onedb/4.2.0_to_4.3.80.rb +++ b/src/onedb/4.2.0_to_4.3.80.rb @@ -15,6 +15,7 @@ #--------------------------------------------------------------------------- # require 'rexml/document' +require 'nokogiri' TM_MAD_CONF = { "dummy" => { @@ -78,6 +79,8 @@ module Migrator def up + init_log_time() + ######################################################################## # Feature #1742 & #1612 ######################################################################## @@ -85,29 +88,36 @@ module Migrator @db.run "ALTER TABLE user_pool RENAME TO old_user_pool;" @db.run "CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" - @db.fetch("SELECT * FROM old_user_pool") do |row| - doc = REXML::Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT * FROM old_user_pool") do |row| + doc = Nokogiri::XML(row[:body]) - doc.root.add_element("GROUPS").add_element("ID").text = row[:gid].to_s + g_elem = doc.create_element("GROUPS") + g_elem.add_child(doc.create_element("ID")).content = row[:gid].to_s - # oneadmin does not have quotas - if row[:oid] != 0 - redo_vm_quotas(doc, "uid=#{row[:oid]}") + doc.root.add_child(g_elem) + + # oneadmin does not have quotas + if row[:oid] != 0 + redo_vm_quotas(doc, "uid=#{row[:oid]}") + end + + @db[:user_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:oid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) end - - @db[:user_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:oid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) end @db.run "DROP TABLE old_user_pool;" + log_time() + ######################################################################## # Feature #1612 ######################################################################## @@ -115,37 +125,41 @@ module Migrator @db.run "ALTER TABLE group_pool RENAME TO old_group_pool;" @db.run "CREATE TABLE group_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" - # oneadmin group does not have quotas - @db.fetch("SELECT * FROM old_group_pool WHERE oid=0") do |row| - @db[:group_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => row[:body], - :uid => row[:oid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) - end + @db.transaction do + # oneadmin group does not have quotas + @db.fetch("SELECT * FROM old_group_pool WHERE oid=0") do |row| + @db[:group_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => row[:body], + :uid => row[:oid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end - @db.fetch("SELECT * FROM old_group_pool WHERE oid>0") do |row| - doc = REXML::Document.new(row[:body]) + @db.fetch("SELECT * FROM old_group_pool WHERE oid>0") do |row| + doc = Nokogiri::XML(row[:body]) - redo_vm_quotas(doc, "gid=#{row[:oid]}") + redo_vm_quotas(doc, "gid=#{row[:oid]}") - @db[:group_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:oid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) + @db[:group_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:oid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end end @db.run "DROP TABLE old_group_pool;" + log_time() + ######################################################################## # Bug #2330 & Feature #1678 ######################################################################## @@ -154,61 +168,64 @@ module Migrator @db.run "CREATE TABLE datastore_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));" #tm_mads = {} + @db.transaction do + @db.fetch("SELECT * FROM old_datastore_pool") do |row| + doc = REXML::Document.new(row[:body]) - @db.fetch("SELECT * FROM old_datastore_pool") do |row| - doc = REXML::Document.new(row[:body]) - - doc.root.each_element("TEMPLATE/HOST") do |e| - e.name = "BRIDGE_LIST" - end - - tm_mad = "" - doc.root.each_element("TM_MAD"){ |e| tm_mad = e.text } - - type = 0 - doc.root.each_element("TYPE"){ |e| type = e.text.to_i } - - if (type == 1) # System DS - doc.root.each_element("TEMPLATE") do |e| - e.add_element("SHARED").text = - (tm_mad == "ssh" ? "NO" : "YES") + doc.root.each_element("TEMPLATE/HOST") do |e| + e.name = "BRIDGE_LIST" end - else - #tm_mads[row[:oid].to_i] = tm_mad - conf = TM_MAD_CONF[tm_mad] + tm_mad = "" + doc.root.each_element("TM_MAD"){ |e| tm_mad = e.text } - if conf.nil? - puts - puts "ATTENTION: manual intervention required".red - puts <<-END + type = 0 + doc.root.each_element("TYPE"){ |e| type = e.text.to_i } + + if (type == 1) # System DS + doc.root.each_element("TEMPLATE") do |e| + e.add_element("SHARED").text = + (tm_mad == "ssh" ? "NO" : "YES") + end + else + #tm_mads[row[:oid].to_i] = tm_mad + + conf = TM_MAD_CONF[tm_mad] + + if conf.nil? + puts + puts "ATTENTION: manual intervention required".red + puts <<-END The Datastore ##{row[:oid]} #{row[:name]} is using the custom TM MAD '#{tm_mad}'. You will need to define new configuration parameters in oned.conf for this driver, see http://opennebula.org/documentation:rel4.4:upgrade - END - else - doc.root.each_element("TEMPLATE") do |e| - e.add_element("LN_TARGET").text = conf[:ln_target] - e.add_element("CLONE_TARGET").text = conf[:clone_target] + END + else + doc.root.each_element("TEMPLATE") do |e| + e.add_element("LN_TARGET").text = conf[:ln_target] + e.add_element("CLONE_TARGET").text = conf[:clone_target] + end end end - end - @db[:datastore_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:uid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u], - :cid => row[:cid]) + @db[:datastore_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:uid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u], + :cid => row[:cid]) + end end @db.run "DROP TABLE old_datastore_pool;" + log_time() + ######################################################################## # Feature #2392 ######################################################################## @@ -216,49 +233,57 @@ http://opennebula.org/documentation:rel4.4:upgrade @db.run "ALTER TABLE vm_pool RENAME TO old_vm_pool;" @db.run "CREATE TABLE vm_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, last_poll INTEGER, state INTEGER, lcm_state INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER);" - @db.fetch("SELECT * FROM old_vm_pool") do |row| - doc = REXML::Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT * FROM old_vm_pool") do |row| + doc = Nokogiri::XML(row[:body]) - doc.root.each_element("HISTORY_RECORDS/HISTORY") do |e| - update_history(e) + doc.root.xpath("HISTORY_RECORDS/HISTORY").each do |e| + update_history(e) + end + + @db[:vm_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:uid], + :gid => row[:gid], + :last_poll => row[:last_poll], + :state => row[:state], + :lcm_state => row[:lcm_state], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) end - - @db[:vm_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:uid], - :gid => row[:gid], - :last_poll => row[:last_poll], - :state => row[:state], - :lcm_state => row[:lcm_state], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) end @db.run "DROP TABLE old_vm_pool;" + log_time() + @db.run "ALTER TABLE history RENAME TO old_history;" @db.run "CREATE TABLE history (vid INTEGER, seq INTEGER, body MEDIUMTEXT, stime INTEGER, etime INTEGER,PRIMARY KEY(vid,seq));" - @db.fetch("SELECT * FROM old_history") do |row| - doc = REXML::Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT * FROM old_history") do |row| + doc = Nokogiri::XML(row[:body]) - doc.root.each_element("/HISTORY") do |e| - update_history(e) + doc.root.xpath("/HISTORY").each do |e| + update_history(e) + end + + @db[:history].insert( + :vid => row[:vid], + :seq => row[:seq], + :body => doc.root.to_s, + :stime => row[:stime], + :etime => row[:etime]) end - - @db[:history].insert( - :vid => row[:vid], - :seq => row[:seq], - :body => doc.root.to_s, - :stime => row[:stime], - :etime => row[:etime]) end @db.run "DROP TABLE old_history;" + log_time() + ######################################################################## # Feature #1678 ######################################################################## @@ -266,29 +291,32 @@ http://opennebula.org/documentation:rel4.4:upgrade @db.run "ALTER TABLE host_pool RENAME TO old_host_pool;" @db.run "CREATE TABLE host_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, state INTEGER, last_mon_time INTEGER, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));" - @db.fetch("SELECT * FROM old_host_pool") do |row| - doc = REXML::Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT * FROM old_host_pool") do |row| + doc = Nokogiri::XML(row[:body]) - doc.root.each_element("HOST_SHARE") do |e| - e.add_element("DATASTORES") + doc.root.at_xpath("HOST_SHARE") + .add_child(doc.create_element("DATASTORES")) + + @db[:host_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :state => row[:state], + :last_mon_time => row[:last_mon_time], + :uid => row[:uid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u], + :cid => row[:cid]) end - - @db[:host_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :state => row[:state], - :last_mon_time => row[:last_mon_time], - :uid => row[:uid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u], - :cid => row[:cid]) end @db.run "DROP TABLE old_host_pool;" + log_time() + # TODO: # For Feature #1678, VMs have new disk elements: # VM/DISK/CLONE_TARGET @@ -309,20 +337,23 @@ http://opennebula.org/documentation:rel4.4:upgrade vms_limit = "-1" vol_limit = "-1" - doc.root.each_element("VM_QUOTA/VM/CPU") { |e| + doc.root.xpath("VM_QUOTA/VM/CPU").each { |e| cpu_limit = e.text } - doc.root.each_element("VM_QUOTA/VM/MEMORY") { |e| + doc.root.xpath("VM_QUOTA/VM/MEMORY").each { |e| mem_limit = e.text } - doc.root.each_element("VM_QUOTA/VM/VMS") { |e| + doc.root.xpath("VM_QUOTA/VM/VMS").each { |e| vms_limit = e.text } - doc.root.delete_element("VM_QUOTA") - vm_quota = doc.root.add_element("VM_QUOTA") + doc.root.xpath("VM_QUOTA").each { |e| + e.remove + } + + vm_quota = doc.root.add_child(doc.create_element("VM_QUOTA")) # VM quotas cpu_used = 0 @@ -331,26 +362,26 @@ http://opennebula.org/documentation:rel4.4:upgrade vol_used = 0 @db.fetch("SELECT body FROM vm_pool WHERE #{where_filter} AND state<>6") do |vm_row| - vmdoc = REXML::Document.new(vm_row[:body]) + vmdoc = Nokogiri::XML(vm_row[:body]) # VM quotas - vmdoc.root.each_element("TEMPLATE/CPU") { |e| + vmdoc.root.xpath("TEMPLATE/CPU").each { |e| cpu_used += e.text.to_f } - vmdoc.root.each_element("TEMPLATE/MEMORY") { |e| + vmdoc.root.xpath("TEMPLATE/MEMORY").each { |e| mem_used += e.text.to_i } - vmdoc.root.each_element("TEMPLATE/DISK") { |e| + vmdoc.root.xpath("TEMPLATE/DISK").each { |e| type = "" - e.each_element("TYPE") { |t_elem| + e.xpath("TYPE").each { |t_elem| type = t_elem.text.upcase } if ( type == "SWAP" || type == "FS") - e.each_element("SIZE") { |size_elem| + e.xpath("SIZE").each { |size_elem| vol_used += size_elem.text.to_i } end @@ -363,38 +394,40 @@ http://opennebula.org/documentation:rel4.4:upgrade cpu_limit != "-1" || mem_limit != "-1" || vms_limit != "-1" || vol_limit != "-1" ) # VM quotas - vm_elem = vm_quota.add_element("VM") + vm_elem = vm_quota.add_child(doc.create_element("VM")) - vm_elem.add_element("CPU").text = cpu_limit - vm_elem.add_element("CPU_USED").text = sprintf('%.2f', cpu_used) + vm_elem.add_child(doc.create_element("CPU")).content = cpu_limit + vm_elem.add_child(doc.create_element("CPU_USED")).content = sprintf('%.2f', cpu_used) - vm_elem.add_element("MEMORY").text = mem_limit - vm_elem.add_element("MEMORY_USED").text = mem_used.to_s + vm_elem.add_child(doc.create_element("MEMORY")).content = mem_limit + vm_elem.add_child(doc.create_element("MEMORY_USED")).content = mem_used.to_s - vm_elem.add_element("VMS").text = vms_limit - vm_elem.add_element("VMS_USED").text = vms_used.to_s + vm_elem.add_child(doc.create_element("VMS")).content = vms_limit + vm_elem.add_child(doc.create_element("VMS_USED")).content = vms_used.to_s - vm_elem.add_element("VOLATILE_SIZE").text = vol_limit - vm_elem.add_element("VOLATILE_SIZE_USED").text = vol_used.to_s + vm_elem.add_child(doc.create_element("VOLATILE_SIZE")).content = vol_limit + vm_elem.add_child(doc.create_element("VOLATILE_SIZE_USED")).content = vol_used.to_s end end def update_history(history_elem) hid = nil - history_elem.each_element("HID") do |e| + history_elem.xpath("HID").each do |e| hid = e.text end - new_elem = history_elem.add_element("CID") - new_elem.text = "-1" # Cluster None + new_elem = history_elem.add_child( + history_elem.document.create_element("CID")) + + new_elem.content = "-1" # Cluster None if hid.nil? return end @db.fetch("SELECT cid FROM host_pool WHERE oid = #{hid}") do |row| - new_elem.text = row[:cid].to_s + new_elem.content = row[:cid].to_s end end diff --git a/src/onedb/onedb.rb b/src/onedb/onedb.rb index faf6dd3f4b..1b59e7ecf9 100644 --- a/src/onedb/onedb.rb +++ b/src/onedb/onedb.rb @@ -16,6 +16,8 @@ require 'onedb_backend' +LOG_TIME = false + class OneDB def initialize(ops) if ops[:backend] == :sqlite @@ -133,6 +135,8 @@ class OneDB result = nil i = 0 + timea = Time.now + while ( matches.size > 0 ) if ( matches.size > 1 ) raise "There are more than one file that match \ @@ -143,10 +147,18 @@ class OneDB puts " > Running migrator #{file}" if ops[:verbose] + time0 = Time.now + load(file) @backend.extend Migrator result = @backend.up + time1 = Time.now + + if LOG_TIME + puts " > Time for #{file}: #{time1 - time0}s" + end + if !result raise "Error while upgrading from #{version} to " << " #{@backend.db_version}" @@ -166,6 +178,12 @@ class OneDB puts "Database already uses version #{version}" end + timeb = Time.now + + if LOG_TIME + puts " > Total time: #{timeb - timea}s" if ops[:verbose] + end + return 0 rescue Exception => e diff --git a/src/onedb/onedb_backend.rb b/src/onedb/onedb_backend.rb index 3832bc20ab..ba9fdf5629 100644 --- a/src/onedb/onedb_backend.rb +++ b/src/onedb/onedb_backend.rb @@ -129,6 +129,20 @@ class OneDBBacKEnd return found end + + def init_log_time() + @block_n = 0 + @time0 = Time.now + end + + def log_time() + if LOG_TIME + @time1 = Time.now + puts " > #{db_version} Time for block #{@block_n}: #{@time1 - @time0}s" + @time0 = Time.now + @block_n += 1 + end + end end class BackEndMySQL < OneDBBacKEnd From 8067c0db356d1bdd8edf431bc78bece3ea7d7848 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Mon, 17 Feb 2014 17:53:03 +0100 Subject: [PATCH 23/80] Feature #2586: Optimize onedb fsck (cherry picked from commit 492310768a15f90eb4428fcc43d3f190d53bd91f) --- src/onedb/fsck.rb | 1263 +++++++++++++++++++++++--------------------- src/onedb/onedb.rb | 8 + 2 files changed, 672 insertions(+), 599 deletions(-) diff --git a/src/onedb/fsck.rb b/src/onedb/fsck.rb index aeb8964b15..5b9e0a221d 100644 --- a/src/onedb/fsck.rb +++ b/src/onedb/fsck.rb @@ -19,6 +19,8 @@ include REXML require 'ipaddr' require 'set' +require 'nokogiri' + module OneDBFsck VERSION = "4.5.0" @@ -102,6 +104,7 @@ module OneDBFsck # VNET/GNAME ######################################################################## + init_log_time() @errors = 0 puts @@ -151,6 +154,7 @@ module OneDBFsck end end + log_time() ######################################################################## # Groups @@ -239,48 +243,54 @@ module OneDBFsck end end - users_fix.each do |id, user| - @db[:user_pool].where(:oid => id).update( - :body => user[:body], - :gid => user[:gid]) + @db.transaction do + users_fix.each do |id, user| + @db[:user_pool].where(:oid => id).update( + :body => user[:body], + :gid => user[:gid]) + end end + log_time() @db.run "CREATE TABLE group_pool_new (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" - @db.fetch("SELECT * from group_pool") do |row| - gid = row[:oid] - doc = Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT * from group_pool") do |row| + gid = row[:oid] + doc = Document.new(row[:body]) - users_elem = doc.root.elements.delete("USERS") + users_elem = doc.root.elements.delete("USERS") - users_new_elem = doc.root.add_element("USERS") + users_new_elem = doc.root.add_element("USERS") - group[gid].each do |id| - id_elem = users_elem.elements.delete("ID[.=#{id}]") + group[gid].each do |id| + id_elem = users_elem.elements.delete("ID[.=#{id}]") - if id_elem.nil? - log_error("User #{id} is missing from Group #{gid} users id list") + if id_elem.nil? + log_error("User #{id} is missing from Group #{gid} users id list") + end + + users_new_elem.add_element("ID").text = id.to_s end - users_new_elem.add_element("ID").text = id.to_s + users_elem.each_element("ID") do |id_elem| + log_error("User #{id_elem.text} is in Group #{gid} users id list, but it should not") + end + + row[:body] = doc.to_s + + # commit + @db[:group_pool_new].insert(row) end - - users_elem.each_element("ID") do |id_elem| - log_error("User #{id_elem.text} is in Group #{gid} users id list, but it should not") - end - - row[:body] = doc.to_s - - # commit - @db[:group_pool_new].insert(row) end # Rename table @db.run("DROP TABLE group_pool") @db.run("ALTER TABLE group_pool_new RENAME TO group_pool") + log_time() ######################################################################## # Clusters @@ -324,250 +334,259 @@ module OneDBFsck datastores_fix = {} vnets_fix = {} - @db.fetch("SELECT oid,body,cid FROM host_pool") do |row| - doc = Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT oid,body,cid FROM host_pool") do |row| + doc = Document.new(row[:body]) - cluster_id = doc.root.get_text('CLUSTER_ID').to_s.to_i - cluster_name = doc.root.get_text('CLUSTER') + cluster_id = doc.root.get_text('CLUSTER_ID').to_s.to_i + cluster_name = doc.root.get_text('CLUSTER') - if cluster_id != row[:cid] - log_error("Host #{row[:oid]} is in cluster #{cluster_id}, but cid column has cluster #{row[:cid]}") - hosts_fix[row[:oid]] = {:body => row[:body], :cid => cluster_id} - end + if cluster_id != row[:cid] + log_error("Host #{row[:oid]} is in cluster #{cluster_id}, but cid column has cluster #{row[:cid]}") + hosts_fix[row[:oid]] = {:body => row[:body], :cid => cluster_id} + end - if cluster_id != -1 - cluster_entry = cluster[cluster_id] + if cluster_id != -1 + cluster_entry = cluster[cluster_id] - if cluster_entry.nil? - log_error("Host #{row[:oid]} is in cluster #{cluster_id}, but it does not exist") + if cluster_entry.nil? + log_error("Host #{row[:oid]} is in cluster #{cluster_id}, but it does not exist") - doc.root.each_element('CLUSTER_ID') do |e| - e.text = "-1" - end - - doc.root.each_element('CLUSTER') do |e| - e.text = "" - end - - hosts_fix[row[:oid]] = {:body => doc.to_s, :cid => -1} - else - if cluster_name != cluster_entry[:name] - log_error("Host #{row[:oid]} has a wrong name for cluster #{cluster_id}, #{cluster_name}. It will be changed to #{cluster_entry[:name]}") - - doc.root.each_element('CLUSTER') do |e| - e.text = cluster_entry[:name] + doc.root.each_element('CLUSTER_ID') do |e| + e.text = "-1" end - hosts_fix[row[:oid]] = {:body => doc.to_s, :cid => cluster_id} - end + doc.root.each_element('CLUSTER') do |e| + e.text = "" + end - cluster_entry[:hosts] << row[:oid] - end - end - end - - hosts_fix.each do |id, entry| - @db[:host_pool].where(:oid => id).update(:body => entry[:body], :cid => entry[:cid]) - end - - - @db.fetch("SELECT oid,body,cid FROM datastore_pool") do |row| - doc = Document.new(row[:body]) - - cluster_id = doc.root.get_text('CLUSTER_ID').to_s.to_i - cluster_name = doc.root.get_text('CLUSTER') - - if cluster_id != row[:cid] - log_error("Datastore #{row[:oid]} is in cluster #{cluster_id}, but cid column has cluster #{row[:cid]}") - hosts_fix[row[:oid]] = {:body => row[:body], :cid => cluster_id} - end - - if cluster_id != -1 - cluster_entry = cluster[cluster_id] - - if cluster_entry.nil? - log_error("Datastore #{row[:oid]} is in cluster #{cluster_id}, but it does not exist") - - doc.root.each_element('CLUSTER_ID') do |e| - e.text = "-1" - end - - doc.root.each_element('CLUSTER') do |e| - e.text = "" - end - - datastores_fix[row[:oid]] = {:body => doc.to_s, :cid => -1} - else - if doc.root.get_text('TYPE').to_s != "1" - cluster_entry[:datastores] << row[:oid] + hosts_fix[row[:oid]] = {:body => doc.to_s, :cid => -1} else - if cluster_entry[:system_ds] == 0 - cluster_entry[:datastores] << row[:oid] - cluster_entry[:system_ds] = row[:oid] - else - log_error("System Datastore #{row[:oid]} is in Cluster #{cluster_id}, but it already contains System Datastore #{cluster_entry[:system_ds]}") - - doc.root.each_element('CLUSTER_ID') do |e| - e.text = "-1" - end + if cluster_name != cluster_entry[:name] + log_error("Host #{row[:oid]} has a wrong name for cluster #{cluster_id}, #{cluster_name}. It will be changed to #{cluster_entry[:name]}") doc.root.each_element('CLUSTER') do |e| - e.text = "" + e.text = cluster_entry[:name] end - datastores_fix[row[:oid]] = {:body => doc.to_s, :cid => -1} - - next - end - end - - if cluster_name != cluster_entry[:name] - log_error("Datastore #{row[:oid]} has a wrong name for cluster #{cluster_id}, #{cluster_name}. It will be changed to #{cluster_entry[:name]}") - - doc.root.each_element('CLUSTER') do |e| - e.text = cluster_entry[:name] + hosts_fix[row[:oid]] = {:body => doc.to_s, :cid => cluster_id} end - datastores_fix[row[:oid]] = {:body => doc.to_s, :cid => cluster_id} + cluster_entry[:hosts] << row[:oid] end end end - end - datastores_fix.each do |id, entry| - @db[:datastore_pool].where(:oid => id).update(:body => entry[:body], :cid => entry[:cid]) - end - - - @db.fetch("SELECT oid,body,cid FROM network_pool") do |row| - doc = Document.new(row[:body]) - - cluster_id = doc.root.get_text('CLUSTER_ID').to_s.to_i - cluster_name = doc.root.get_text('CLUSTER') - - if cluster_id != row[:cid] - log_error("VNet #{row[:oid]} is in cluster #{cluster_id}, but cid column has cluster #{row[:cid]}") - hosts_fix[row[:oid]] = {:body => row[:body], :cid => cluster_id} + hosts_fix.each do |id, entry| + @db[:host_pool].where(:oid => id).update(:body => entry[:body], :cid => entry[:cid]) end - if cluster_id != -1 - cluster_entry = cluster[cluster_id] + log_time() - if cluster_entry.nil? - log_error("VNet #{row[:oid]} is in cluster #{cluster_id}, but it does not exist") + @db.fetch("SELECT oid,body,cid FROM datastore_pool") do |row| + doc = Document.new(row[:body]) - doc.root.each_element('CLUSTER_ID') do |e| - e.text = "-1" - end + cluster_id = doc.root.get_text('CLUSTER_ID').to_s.to_i + cluster_name = doc.root.get_text('CLUSTER') - doc.root.each_element('CLUSTER') do |e| - e.text = "" - end + if cluster_id != row[:cid] + log_error("Datastore #{row[:oid]} is in cluster #{cluster_id}, but cid column has cluster #{row[:cid]}") + hosts_fix[row[:oid]] = {:body => row[:body], :cid => cluster_id} + end - vnets_fix[row[:oid]] = {:body => doc.to_s, :cid => -1} - else - if cluster_name != cluster_entry[:name] - log_error("VNet #{row[:oid]} has a wrong name for cluster #{cluster_id}, #{cluster_name}. It will be changed to #{cluster_entry[:name]}") + if cluster_id != -1 + cluster_entry = cluster[cluster_id] + + if cluster_entry.nil? + log_error("Datastore #{row[:oid]} is in cluster #{cluster_id}, but it does not exist") + + doc.root.each_element('CLUSTER_ID') do |e| + e.text = "-1" + end doc.root.each_element('CLUSTER') do |e| - e.text = cluster_entry[:name] + e.text = "" + end + + datastores_fix[row[:oid]] = {:body => doc.to_s, :cid => -1} + else + if doc.root.get_text('TYPE').to_s != "1" + cluster_entry[:datastores] << row[:oid] + else + if cluster_entry[:system_ds] == 0 + cluster_entry[:datastores] << row[:oid] + cluster_entry[:system_ds] = row[:oid] + else + log_error("System Datastore #{row[:oid]} is in Cluster #{cluster_id}, but it already contains System Datastore #{cluster_entry[:system_ds]}") + + doc.root.each_element('CLUSTER_ID') do |e| + e.text = "-1" + end + + doc.root.each_element('CLUSTER') do |e| + e.text = "" + end + + datastores_fix[row[:oid]] = {:body => doc.to_s, :cid => -1} + + next + end + end + + if cluster_name != cluster_entry[:name] + log_error("Datastore #{row[:oid]} has a wrong name for cluster #{cluster_id}, #{cluster_name}. It will be changed to #{cluster_entry[:name]}") + + doc.root.each_element('CLUSTER') do |e| + e.text = cluster_entry[:name] + end + + datastores_fix[row[:oid]] = {:body => doc.to_s, :cid => cluster_id} + end + end + end + end + + datastores_fix.each do |id, entry| + @db[:datastore_pool].where(:oid => id).update(:body => entry[:body], :cid => entry[:cid]) + end + + log_time() + + @db.fetch("SELECT oid,body,cid FROM network_pool") do |row| + doc = Document.new(row[:body]) + + cluster_id = doc.root.get_text('CLUSTER_ID').to_s.to_i + cluster_name = doc.root.get_text('CLUSTER') + + if cluster_id != row[:cid] + log_error("VNet #{row[:oid]} is in cluster #{cluster_id}, but cid column has cluster #{row[:cid]}") + hosts_fix[row[:oid]] = {:body => row[:body], :cid => cluster_id} + end + + if cluster_id != -1 + cluster_entry = cluster[cluster_id] + + if cluster_entry.nil? + log_error("VNet #{row[:oid]} is in cluster #{cluster_id}, but it does not exist") + + doc.root.each_element('CLUSTER_ID') do |e| + e.text = "-1" + end + + doc.root.each_element('CLUSTER') do |e| + e.text = "" end vnets_fix[row[:oid]] = {:body => doc.to_s, :cid => -1} - end + else + if cluster_name != cluster_entry[:name] + log_error("VNet #{row[:oid]} has a wrong name for cluster #{cluster_id}, #{cluster_name}. It will be changed to #{cluster_entry[:name]}") - cluster_entry[:vnets] << row[:oid] + doc.root.each_element('CLUSTER') do |e| + e.text = cluster_entry[:name] + end + + vnets_fix[row[:oid]] = {:body => doc.to_s, :cid => -1} + end + + cluster_entry[:vnets] << row[:oid] + end end end + + vnets_fix.each do |id, entry| + @db[:network_pool].where(:oid => id).update(:body => entry[:body], :cid => entry[:cid]) + end end - vnets_fix.each do |id, entry| - @db[:network_pool].where(:oid => id).update(:body => entry[:body], :cid => entry[:cid]) - end - + log_time() @db.run "CREATE TABLE cluster_pool_new (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" - @db.fetch("SELECT * from cluster_pool") do |row| - cluster_id = row[:oid] - doc = Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT * from cluster_pool") do |row| + cluster_id = row[:oid] + doc = Document.new(row[:body]) - # Hosts - hosts_elem = doc.root.elements.delete("HOSTS") + # Hosts + hosts_elem = doc.root.elements.delete("HOSTS") - hosts_new_elem = doc.root.add_element("HOSTS") + hosts_new_elem = doc.root.add_element("HOSTS") - cluster[cluster_id][:hosts].each do |id| - id_elem = hosts_elem.elements.delete("ID[.=#{id}]") + cluster[cluster_id][:hosts].each do |id| + id_elem = hosts_elem.elements.delete("ID[.=#{id}]") - if id_elem.nil? - log_error("Host #{id} is missing from Cluster #{cluster_id} host id list") + if id_elem.nil? + log_error("Host #{id} is missing from Cluster #{cluster_id} host id list") + end + + hosts_new_elem.add_element("ID").text = id.to_s end - hosts_new_elem.add_element("ID").text = id.to_s - end - - hosts_elem.each_element("ID") do |id_elem| - log_error("Host #{id_elem.text} is in Cluster #{cluster_id} host id list, but it should not") - end - - - # Datastores - ds_elem = doc.root.elements.delete("DATASTORES") - - ds_new_elem = doc.root.add_element("DATASTORES") - - doc.root.each_element("SYSTEM_DS") do |e| - system_ds = e.text.to_i - - if system_ds != cluster[cluster_id][:system_ds] - log_error("Cluster #{cluster_id} has System Datastore set to #{system_ds}, but it should be #{cluster[cluster_id][:system_ds]}") - - e.text = cluster[cluster_id][:system_ds].to_s - end - end - - cluster[cluster_id][:datastores].each do |id| - id_elem = ds_elem.elements.delete("ID[.=#{id}]") - - if id_elem.nil? - log_error("Datastore #{id} is missing from Cluster #{cluster_id} datastore id list") + hosts_elem.each_element("ID") do |id_elem| + log_error("Host #{id_elem.text} is in Cluster #{cluster_id} host id list, but it should not") end - ds_new_elem.add_element("ID").text = id.to_s - end - ds_elem.each_element("ID") do |id_elem| - log_error("Datastore #{id_elem.text} is in Cluster #{cluster_id} datastore id list, but it should not") - end + # Datastores + ds_elem = doc.root.elements.delete("DATASTORES") + ds_new_elem = doc.root.add_element("DATASTORES") - # VNets - vnets_elem = doc.root.elements.delete("VNETS") + doc.root.each_element("SYSTEM_DS") do |e| + system_ds = e.text.to_i - vnets_new_elem = doc.root.add_element("VNETS") + if system_ds != cluster[cluster_id][:system_ds] + log_error("Cluster #{cluster_id} has System Datastore set to #{system_ds}, but it should be #{cluster[cluster_id][:system_ds]}") - cluster[cluster_id][:vnets].each do |id| - id_elem = vnets_elem.elements.delete("ID[.=#{id}]") - - if id_elem.nil? - log_error("VNet #{id} is missing from Cluster #{cluster_id} vnet id list") + e.text = cluster[cluster_id][:system_ds].to_s + end end - vnets_new_elem.add_element("ID").text = id.to_s + cluster[cluster_id][:datastores].each do |id| + id_elem = ds_elem.elements.delete("ID[.=#{id}]") + + if id_elem.nil? + log_error("Datastore #{id} is missing from Cluster #{cluster_id} datastore id list") + end + + ds_new_elem.add_element("ID").text = id.to_s + end + + ds_elem.each_element("ID") do |id_elem| + log_error("Datastore #{id_elem.text} is in Cluster #{cluster_id} datastore id list, but it should not") + end + + + # VNets + vnets_elem = doc.root.elements.delete("VNETS") + + vnets_new_elem = doc.root.add_element("VNETS") + + cluster[cluster_id][:vnets].each do |id| + id_elem = vnets_elem.elements.delete("ID[.=#{id}]") + + if id_elem.nil? + log_error("VNet #{id} is missing from Cluster #{cluster_id} vnet id list") + end + + vnets_new_elem.add_element("ID").text = id.to_s + end + + vnets_elem.each_element("ID") do |id_elem| + log_error("VNet #{id_elem.text} is in Cluster #{cluster_id} vnet id list, but it should not") + end + + + row[:body] = doc.to_s + + # commit + @db[:cluster_pool_new].insert(row) end - - vnets_elem.each_element("ID") do |id_elem| - log_error("VNet #{id_elem.text} is in Cluster #{cluster_id} vnet id list, but it should not") - end - - - row[:body] = doc.to_s - - # commit - @db[:cluster_pool_new].insert(row) end + log_time() + # Rename table @db.run("DROP TABLE cluster_pool") @db.run("ALTER TABLE cluster_pool_new RENAME TO cluster_pool") @@ -633,46 +652,57 @@ module OneDBFsck end end - images_fix.each do |id, body| - @db[:image_pool].where(:oid => id).update(:body => body) + @db.transaction do + images_fix.each do |id, body| + @db[:image_pool].where(:oid => id).update(:body => body) + end end + log_time() @db.run "CREATE TABLE datastore_pool_new (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name));" - @db.fetch("SELECT * from datastore_pool") do |row| - ds_id = row[:oid] - doc = Document.new(row[:body]) + @db.transaction do + @db.fetch("SELECT * from datastore_pool") do |row| + ds_id = row[:oid] + doc = Document.new(row[:body]) - images_elem = doc.root.elements.delete("IMAGES") + images_elem = doc.root.elements.delete("IMAGES") - images_new_elem = doc.root.add_element("IMAGES") + images_new_elem = doc.root.add_element("IMAGES") - datastore[ds_id][:images].each do |id| - id_elem = images_elem.elements.delete("ID[.=#{id}]") + datastore[ds_id][:images].each do |id| + id_elem = images_elem.elements.delete("ID[.=#{id}]") - if id_elem.nil? - log_error("Image #{id} is missing from Datastore #{ds_id} image id list") + if id_elem.nil? + log_error( + "Image #{id} is missing from Datastore #{ds_id} "<< + "image id list") + end + + images_new_elem.add_element("ID").text = id.to_s end - images_new_elem.add_element("ID").text = id.to_s + images_elem.each_element("ID") do |id_elem| + log_error( + "Image #{id_elem.text} is in Cluster #{ds_id} "<< + "image id list, but it should not") + end + + + row[:body] = doc.to_s + + # commit + @db[:datastore_pool_new].insert(row) end - - images_elem.each_element("ID") do |id_elem| - log_error("Image #{id_elem.text} is in Cluster #{ds_id} image id list, but it should not") - end - - - row[:body] = doc.to_s - - # commit - @db[:datastore_pool_new].insert(row) end # Rename table @db.run("DROP TABLE datastore_pool") @db.run("ALTER TABLE datastore_pool_new RENAME TO datastore_pool") + log_time() + ######################################################################## # VM Counters for host, image and vnet usage ######################################################################## @@ -692,6 +722,8 @@ module OneDBFsck } end + log_time() + # Init image counters @db.fetch("SELECT oid,body FROM image_pool") do |row| if counters[:image][row[:oid]].nil? @@ -717,6 +749,8 @@ module OneDBFsck end end + log_time() + # Init vnet counters @db.fetch("SELECT oid,body FROM network_pool") do |row| doc = Document.new(row[:body]) @@ -728,43 +762,46 @@ module OneDBFsck } end + log_time() + vms_fix = {} # Aggregate information of the RUNNING vms @db.fetch("SELECT oid,body FROM vm_pool WHERE state<>6") do |row| - vm_doc = Document.new(row[:body]) - - state = vm_doc.root.get_text('STATE').to_s.to_i - lcm_state = vm_doc.root.get_text('LCM_STATE').to_s.to_i + vm_doc = Nokogiri::XML(row[:body]) + state = vm_doc.root.at_xpath('STATE').text.to_i + lcm_state = vm_doc.root.at_xpath('LCM_STATE').text.to_i # Images used by this VM - vm_doc.root.each_element("TEMPLATE/DISK/IMAGE_ID") do |e| + vm_doc.root.xpath("TEMPLATE/DISK/IMAGE_ID").each do |e| img_id = e.text.to_i if counters[:image][img_id].nil? - log_error("VM #{row[:oid]} is using Image #{img_id}, but it does not exist") + log_error("VM #{row[:oid]} is using Image #{img_id}, but "<< + "it does not exist") else counters[:image][img_id][:vms].add(row[:oid]) end end # VNets used by this VM - vm_doc.root.each_element("TEMPLATE/NIC") do |e| + vm_doc.root.xpath("TEMPLATE/NIC").each do |e| net_id = nil - e.each_element("NETWORK_ID") do |nid| + e.xpath("NETWORK_ID").each do |nid| net_id = nid.text.to_i end if !net_id.nil? if counters[:vnet][net_id].nil? - log_error("VM #{row[:oid]} is using VNet #{net_id}, but it does not exist") + log_error("VM #{row[:oid]} is using VNet #{net_id}, "<< + "but it does not exist") else - counters[:vnet][net_id][:leases][e.get_text('IP').to_s] = + counters[:vnet][net_id][:leases][e.at_xpath('IP').text] = [ - e.get_text('MAC').to_s, # MAC + e.at_xpath('MAC').text, # MAC "1", # USED - vm_doc.root.get_text('ID').to_s.to_i # VID + vm_doc.root.at_xpath('ID').text.to_i # VID ] end end @@ -777,41 +814,39 @@ module OneDBFsck next if !([3,5,8].include? state) # Get memory (integer) - memory = 0 - vm_doc.root.each_element("TEMPLATE/MEMORY") { |e| - memory = e.text.to_i - } + memory = vm_doc.root.at_xpath("TEMPLATE/MEMORY").text.to_i # Get CPU (float) - cpu = 0 - vm_doc.root.each_element("TEMPLATE/CPU") { |e| - cpu = e.text.to_f - } + cpu = vm_doc.root.at_xpath("TEMPLATE/CPU").text.to_f # Get hostid, hostname hid = -1 - vm_doc.root.each_element("HISTORY_RECORDS/HISTORY[last()]/HID") { |e| + vm_doc.root.xpath("HISTORY_RECORDS/HISTORY[last()]/HID").each { |e| hid = e.text.to_i } hostname = "" - vm_doc.root.each_element("HISTORY_RECORDS/HISTORY[last()]/HOSTNAME") { |e| + vm_doc.root.xpath("HISTORY_RECORDS/HISTORY[last()]/HOSTNAME").each { |e| hostname = e.text } counters_host = counters[:host][hid] if counters_host.nil? - log_error("VM #{row[:oid]} is using Host #{hid}, but it does not exist") + log_error("VM #{row[:oid]} is using Host #{hid}, "<< + "but it does not exist") else if counters_host[:name] != hostname - log_error("VM #{row[:oid]} has a wrong hostname for Host #{hid}, #{hostname}. It will be changed to #{counters_host[:name]}") + log_error("VM #{row[:oid]} has a wrong hostname for "<< + "Host #{hid}, #{hostname}. It will be changed to "<< + "#{counters_host[:name]}") - vm_doc.root.each_element("HISTORY_RECORDS/HISTORY[last()]/HOSTNAME") { |e| - e.text = counters_host[:name] + vm_doc.root.xpath( + "HISTORY_RECORDS/HISTORY[last()]/HOSTNAME").each { |e| + e.content = counters_host[:name] } - vms_fix[row[:oid]] = vm_doc.to_s + vms_fix[row[:oid]] = vm_doc.root.to_s end counters_host[:memory] += memory @@ -820,10 +855,13 @@ module OneDBFsck end end - vms_fix.each do |id, body| - @db[:vm_pool].where(:oid => id).update(:body => body) + @db.transaction do + vms_fix.each do |id, body| + @db[:vm_pool].where(:oid => id).update(:body => body) + end end + log_time() ######################################################################## # Hosts @@ -842,72 +880,82 @@ module OneDBFsck "cid INTEGER, UNIQUE(name));" # Calculate the host's xml and write them to host_pool_new - @db[:host_pool].each do |row| - host_doc = Document.new(row[:body]) + @db.transaction do + @db[:host_pool].each do |row| + host_doc = Document.new(row[:body]) - hid = row[:oid] + hid = row[:oid] - counters_host = counters[:host][hid] + counters_host = counters[:host][hid] - rvms = counters_host[:rvms].size - cpu_usage = (counters_host[:cpu]*100).to_i - mem_usage = counters_host[:memory]*1024 + rvms = counters_host[:rvms].size + cpu_usage = (counters_host[:cpu]*100).to_i + mem_usage = counters_host[:memory]*1024 - # rewrite running_vms - host_doc.root.each_element("HOST_SHARE/RUNNING_VMS") {|e| - if e.text != rvms.to_s - log_error("Host #{hid} RUNNING_VMS has #{e.text} \tis\t#{rvms}") - e.text = rvms - end - } + # rewrite running_vms + host_doc.root.each_element("HOST_SHARE/RUNNING_VMS") {|e| + if e.text != rvms.to_s + log_error( + "Host #{hid} RUNNING_VMS has #{e.text} \tis\t#{rvms}") + e.text = rvms + end + } - # re-do list of VM IDs - vms_elem = host_doc.root.elements.delete("VMS") + # re-do list of VM IDs + vms_elem = host_doc.root.elements.delete("VMS") - vms_new_elem = host_doc.root.add_element("VMS") + vms_new_elem = host_doc.root.add_element("VMS") - counters_host[:rvms].each do |id| - id_elem = vms_elem.elements.delete("ID[.=#{id}]") + counters_host[:rvms].each do |id| + id_elem = vms_elem.elements.delete("ID[.=#{id}]") - if id_elem.nil? - log_error("VM #{id} is missing from Host #{hid} VM id list") + if id_elem.nil? + log_error( + "VM #{id} is missing from Host #{hid} VM id list") + end + + vms_new_elem.add_element("ID").text = id.to_s end - vms_new_elem.add_element("ID").text = id.to_s + vms_elem.each_element("ID") do |id_elem| + log_error( + "VM #{id_elem.text} is in Host #{hid} VM id list, "<< + "but it should not") + end + + + # rewrite cpu + host_doc.root.each_element("HOST_SHARE/CPU_USAGE") {|e| + if e.text != cpu_usage.to_s + log_error( + "Host #{hid} CPU_USAGE has #{e.text} "<< + "\tis\t#{cpu_usage}") + e.text = cpu_usage + end + } + + # rewrite memory + host_doc.root.each_element("HOST_SHARE/MEM_USAGE") {|e| + if e.text != mem_usage.to_s + log_error("Host #{hid} MEM_USAGE has #{e.text} "<< + "\tis\t#{mem_usage}") + e.text = mem_usage + end + } + + row[:body] = host_doc.to_s + + # commit + @db[:host_pool_new].insert(row) end - - vms_elem.each_element("ID") do |id_elem| - log_error("VM #{id_elem.text} is in Host #{hid} VM id list, but it should not") - end - - - # rewrite cpu - host_doc.root.each_element("HOST_SHARE/CPU_USAGE") {|e| - if e.text != cpu_usage.to_s - log_error("Host #{hid} CPU_USAGE has #{e.text} \tis\t#{cpu_usage}") - e.text = cpu_usage - end - } - - # rewrite memory - host_doc.root.each_element("HOST_SHARE/MEM_USAGE") {|e| - if e.text != mem_usage.to_s - log_error("Host #{hid} MEM_USAGE has #{e.text} \tis\t#{mem_usage}") - e.text = mem_usage - end - } - - row[:body] = host_doc.to_s - - # commit - @db[:host_pool_new].insert(row) end # Rename table @db.run("DROP TABLE host_pool") @db.run("ALTER TABLE host_pool_new RENAME TO host_pool") + log_time() ######################################################################## # Image @@ -926,122 +974,124 @@ module OneDBFsck # Create a new empty table where we will store the new calculated values @db.run "CREATE TABLE image_pool_new (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name,uid) );" - # Calculate the host's xml and write them to host_pool_new - @db[:image_pool].each do |row| - doc = Document.new(row[:body]) + @db.transaction do + @db[:image_pool].each do |row| + doc = Document.new(row[:body]) - oid = row[:oid] + oid = row[:oid] - persistent = ( doc.root.get_text('PERSISTENT').to_s == "1" ) - current_state = doc.root.get_text('STATE').to_s.to_i + persistent = ( doc.root.get_text('PERSISTENT').to_s == "1" ) + current_state = doc.root.get_text('STATE').to_s.to_i - rvms = counters[:image][oid][:vms].size - n_cloning_ops = counters[:image][oid][:clones].size + rvms = counters[:image][oid][:vms].size + n_cloning_ops = counters[:image][oid][:clones].size - # rewrite running_vms - doc.root.each_element("RUNNING_VMS") {|e| - if e.text != rvms.to_s - log_error("Image #{oid} RUNNING_VMS has #{e.text} \tis\t#{rvms}") - e.text = rvms - end - } + # rewrite running_vms + doc.root.each_element("RUNNING_VMS") {|e| + if e.text != rvms.to_s + log_error("Image #{oid} RUNNING_VMS has #{e.text} \tis\t#{rvms}") + e.text = rvms + end + } - # re-do list of VM IDs - vms_elem = doc.root.elements.delete("VMS") + # re-do list of VM IDs + vms_elem = doc.root.elements.delete("VMS") - vms_new_elem = doc.root.add_element("VMS") + vms_new_elem = doc.root.add_element("VMS") - counters[:image][oid][:vms].each do |id| - id_elem = vms_elem.elements.delete("ID[.=#{id}]") + counters[:image][oid][:vms].each do |id| + id_elem = vms_elem.elements.delete("ID[.=#{id}]") - if id_elem.nil? - log_error("VM #{id} is missing from Image #{oid} VM id list") + if id_elem.nil? + log_error("VM #{id} is missing from Image #{oid} VM id list") + end + + vms_new_elem.add_element("ID").text = id.to_s end - vms_new_elem.add_element("ID").text = id.to_s + vms_elem.each_element("ID") do |id_elem| + log_error("VM #{id_elem.text} is in Image #{oid} VM id list, but it should not") + end + + + if ( persistent && rvms > 0 ) + n_cloning_ops = 0 + counters[:image][oid][:clones] = Set.new + end + + # Check number of clones + doc.root.each_element("CLONING_OPS") { |e| + if e.text != n_cloning_ops.to_s + log_error("Image #{oid} CLONING_OPS has #{e.text} \tis\t#{n_cloning_ops}") + e.text = n_cloning_ops + end + } + + # re-do list of Images cloning this one + clones_elem = doc.root.elements.delete("CLONES") + + clones_new_elem = doc.root.add_element("CLONES") + + counters[:image][oid][:clones].each do |id| + id_elem = clones_elem.elements.delete("ID[.=#{id}]") + + if id_elem.nil? + log_error("Image #{id} is missing from Image #{oid} CLONES id list") + end + + clones_new_elem.add_element("ID").text = id.to_s + end + + clones_elem.each_element("ID") do |id_elem| + log_error("Image #{id_elem.text} is in Image #{oid} CLONES id list, but it should not") + end + + + # Check state + + state = current_state + + if persistent + if ( rvms > 0 ) + state = 8 # USED_PERS + elsif ( n_cloning_ops > 0 ) + state = 6 # CLONE + elsif ( current_state == 8 || current_state == 6 ) + # rvms == 0 && n_cloning_ops == 0, but image is in state + # USED_PERS or CLONE + + state = 1 # READY + end + else + if ( rvms > 0 || n_cloning_ops > 0 ) + state = 2 # USED + elsif ( current_state == 2 ) + # rvms == 0 && n_cloning_ops == 0, but image is in state + # USED + + state = 1 # READY + end + end + + doc.root.each_element("STATE") { |e| + if e.text != state.to_s + log_error("Image #{oid} has STATE #{IMAGE_STATES[e.text.to_i]} \tis\t#{IMAGE_STATES[state]}") + e.text = state + end + } + + row[:body] = doc.to_s + + # commit + @db[:image_pool_new].insert(row) end - - vms_elem.each_element("ID") do |id_elem| - log_error("VM #{id_elem.text} is in Image #{oid} VM id list, but it should not") - end - - - if ( persistent && rvms > 0 ) - n_cloning_ops = 0 - counters[:image][oid][:clones] = Set.new - end - - # Check number of clones - doc.root.each_element("CLONING_OPS") { |e| - if e.text != n_cloning_ops.to_s - log_error("Image #{oid} CLONING_OPS has #{e.text} \tis\t#{n_cloning_ops}") - e.text = n_cloning_ops - end - } - - # re-do list of Images cloning this one - clones_elem = doc.root.elements.delete("CLONES") - - clones_new_elem = doc.root.add_element("CLONES") - - counters[:image][oid][:clones].each do |id| - id_elem = clones_elem.elements.delete("ID[.=#{id}]") - - if id_elem.nil? - log_error("Image #{id} is missing from Image #{oid} CLONES id list") - end - - clones_new_elem.add_element("ID").text = id.to_s - end - - clones_elem.each_element("ID") do |id_elem| - log_error("Image #{id_elem.text} is in Image #{oid} CLONES id list, but it should not") - end - - - # Check state - - state = current_state - - if persistent - if ( rvms > 0 ) - state = 8 # USED_PERS - elsif ( n_cloning_ops > 0 ) - state = 6 # CLONE - elsif ( current_state == 8 || current_state == 6 ) - # rvms == 0 && n_cloning_ops == 0, but image is in state - # USED_PERS or CLONE - - state = 1 # READY - end - else - if ( rvms > 0 || n_cloning_ops > 0 ) - state = 2 # USED - elsif ( current_state == 2 ) - # rvms == 0 && n_cloning_ops == 0, but image is in state - # USED - - state = 1 # READY - end - end - - doc.root.each_element("STATE") { |e| - if e.text != state.to_s - log_error("Image #{oid} has STATE #{IMAGE_STATES[e.text.to_i]} \tis\t#{IMAGE_STATES[state]}") - e.text = state - end - } - - row[:body] = doc.to_s - - # commit - @db[:image_pool_new].insert(row) end # Rename table @db.run("DROP TABLE image_pool") @db.run("ALTER TABLE image_pool_new RENAME TO image_pool") + log_time() ######################################################################## # VNet @@ -1051,107 +1101,111 @@ module OneDBFsck @db.run "CREATE TABLE leases_new (oid INTEGER, ip BIGINT, body MEDIUMTEXT, PRIMARY KEY(oid,ip));" - @db[:leases].each do |row| - doc = Document.new(row[:body]) + @db.transaction do + @db[:leases].each do |row| + doc = Nokogiri::XML(row[:body]) - used = (doc.root.get_text('USED') == "1") - vid = doc.root.get_text('VID').to_s.to_i + used = (doc.root.at_xpath('USED').text == "1") + vid = doc.root.at_xpath('VID').text.to_i - ip_str = IPAddr.new(row[:ip], Socket::AF_INET).to_s + ip_str = IPAddr.new(row[:ip], Socket::AF_INET).to_s - vnet_structure = counters[:vnet][row[:oid]] + vnet_structure = counters[:vnet][row[:oid]] - if vnet_structure.nil? - log_error("Table leases contains the lease #{ip_str} for VNet #{row[:oid]}, but it does not exit") + if vnet_structure.nil? + log_error("Table leases contains the lease #{ip_str} "<< + "for VNet #{row[:oid]}, but it does not exit") - next - end - - ranged = vnet_structure[:type] == 0 - - counter_mac, counter_used, counter_vid = - vnet_structure[:leases][ip_str] - - vnet_structure[:leases].delete(ip_str) - - insert = true - - if used && (vid != -1) # Lease used by a VM - if counter_mac.nil? - log_error("VNet #{row[:oid]} has used lease #{ip_str} (VM #{vid}) \tbut it is free") - - if ranged - insert = false - end - - doc.root.each_element("USED") { |e| - e.text = "0" - } - - doc.root.each_element("VID") {|e| - e.text = "-1" - } - - row[:body] = doc.to_s - - elsif vid != counter_vid - log_error("VNet #{row[:oid]} has used lease #{ip_str} (VM #{vid}) \tbut it used by VM #{counter_vid}") - - doc.root.each_element("VID") {|e| - e.text = counter_vid.to_s - } - - row[:body] = doc.to_s + next end - else # Lease is free or on hold (used=1, vid=-1) - if !counter_mac.nil? - if used - log_error("VNet #{row[:oid]} has lease on hold #{ip_str} \tbut it is used by VM #{counter_vid}") - else - log_error("VNet #{row[:oid]} has free lease #{ip_str} \tbut it is used by VM #{counter_vid}") + + ranged = vnet_structure[:type] == 0 + + counter_mac, counter_used, counter_vid = + vnet_structure[:leases][ip_str] + + vnet_structure[:leases].delete(ip_str) + + insert = true + + if used && (vid != -1) # Lease used by a VM + if counter_mac.nil? + log_error( + "VNet #{row[:oid]} has used lease #{ip_str} "<< + "(VM #{vid}) \tbut it is free") + + if ranged + insert = false + end + + doc.root.at_xpath("USED").content = "0" + + doc.root.at_xpath("VID").content = "-1" + + row[:body] = doc.root.to_s + + elsif vid != counter_vid + log_error( + "VNet #{row[:oid]} has used lease #{ip_str} "<< + "(VM #{vid}) \tbut it used by VM #{counter_vid}") + + doc.root.at_xpath("VID").content = counter_vid.to_s + + row[:body] = doc.root.to_s end + else # Lease is free or on hold (used=1, vid=-1) + if !counter_mac.nil? + if used + log_error( + "VNet #{row[:oid]} has lease on hold #{ip_str} "<< + "\tbut it is used by VM #{counter_vid}") + else + log_error( + "VNet #{row[:oid]} has free lease #{ip_str} "<< + "\tbut it is used by VM #{counter_vid}") + end - doc.root.each_element("USED") { |e| - e.text = "1" - } + doc.root.at_xpath("USED").content = "1" - doc.root.each_element("VID") {|e| - e.text = counter_vid.to_s - } + doc.root.at_xpath("VID").content = counter_vid.to_s - row[:body] = doc.to_s + row[:body] = doc.root.to_s + end end - end - if (doc.root.get_text('USED') == "1") - vnet_structure[:total_leases] += 1 - end + if (doc.root.at_xpath('USED').text == "1") + vnet_structure[:total_leases] += 1 + end - # commit - @db[:leases_new].insert(row) if insert + # commit + @db[:leases_new].insert(row) if insert + end end + log_time() + # Now insert all the leases left in the hash, i.e. used by a VM in # vm_pool, but not in the leases table. This will only happen in # ranged networks + @db.transaction do + counters[:vnet].each do |net_id,vnet_structure| + vnet_structure[:leases].each do |ip,array| + mac,used,vid = array - counters[:vnet].each do |net_id,vnet_structure| - vnet_structure[:leases].each do |ip,array| - mac,used,vid = array + ip_i = IPAddr.new(ip, Socket::AF_INET).to_i - ip_i = IPAddr.new(ip, Socket::AF_INET).to_i + # TODO: MAC_PREFIX is now hardcoded to "02:00" + body = "#{ip_i}512#{ip_i}#{used}#{vid}" - # TODO: MAC_PREFIX is now hardcoded to "02:00" - body = "#{ip_i}512#{ip_i}#{used}#{vid}" + log_error("VNet #{net_id} has free lease #{ip} \tbut it is used by VM #{vid}") - log_error("VNet #{net_id} has free lease #{ip} \tbut it is used by VM #{vid}") + vnet_structure[:total_leases] += 1 - vnet_structure[:total_leases] += 1 - - @db[:leases_new].insert( - :oid => net_id, - :ip => ip_i, - :body => body) + @db[:leases_new].insert( + :oid => net_id, + :ip => ip_i, + :body => body) + end end end @@ -1160,6 +1214,7 @@ module OneDBFsck @db.run("DROP TABLE leases") @db.run("ALTER TABLE leases_new RENAME TO leases") + log_time() ######################################################################## # VNet @@ -1170,31 +1225,34 @@ module OneDBFsck # Create a new empty table where we will store the new calculated values @db.run "CREATE TABLE network_pool_new (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, cid INTEGER, UNIQUE(name,uid));" - @db[:network_pool].each do |row| - doc = Document.new(row[:body]) + @db.transaction do + @db[:network_pool].each do |row| + doc = Document.new(row[:body]) - oid = row[:oid] + oid = row[:oid] - total_leases = counters[:vnet][oid][:total_leases] + total_leases = counters[:vnet][oid][:total_leases] - # rewrite running_vms - doc.root.each_element("TOTAL_LEASES") {|e| - if e.text != total_leases.to_s - log_error("VNet #{oid} TOTAL_LEASES has #{e.text} \tis\t#{total_leases}") - e.text = total_leases - end - } + # rewrite running_vms + doc.root.each_element("TOTAL_LEASES") {|e| + if e.text != total_leases.to_s + log_error("VNet #{oid} TOTAL_LEASES has #{e.text} \tis\t#{total_leases}") + e.text = total_leases + end + } - row[:body] = doc.to_s + row[:body] = doc.to_s - # commit - @db[:network_pool_new].insert(row) + # commit + @db[:network_pool_new].insert(row) + end end # Rename table @db.run("DROP TABLE network_pool") @db.run("ALTER TABLE network_pool_new RENAME TO network_pool") + log_time() ######################################################################## # Users @@ -1205,29 +1263,32 @@ module OneDBFsck @db.run "ALTER TABLE user_pool RENAME TO old_user_pool;" @db.run "CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" - # oneadmin does not have quotas - @db.fetch("SELECT * FROM old_user_pool WHERE oid=0") do |row| - @db[:user_pool].insert(row) - end + @db.transaction do + # oneadmin does not have quotas + @db.fetch("SELECT * FROM old_user_pool WHERE oid=0") do |row| + @db[:user_pool].insert(row) + end - @db.fetch("SELECT * FROM old_user_pool WHERE oid>0") do |row| - doc = Document.new(row[:body]) + @db.fetch("SELECT * FROM old_user_pool WHERE oid>0") do |row| + doc = Nokogiri::XML(row[:body]) - calculate_quotas(doc, "uid=#{row[:oid]}", "User") + calculate_quotas(doc, "uid=#{row[:oid]}", "User") - @db[:user_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:oid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) + @db[:user_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:oid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end end @db.run "DROP TABLE old_user_pool;" + log_time() ######################################################################## # Groups @@ -1238,29 +1299,33 @@ module OneDBFsck @db.run "ALTER TABLE group_pool RENAME TO old_group_pool;" @db.run "CREATE TABLE group_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" - # oneadmin group does not have quotas - @db.fetch("SELECT * FROM old_group_pool WHERE oid=0") do |row| - @db[:group_pool].insert(row) - end + @db.transaction do + # oneadmin group does not have quotas + @db.fetch("SELECT * FROM old_group_pool WHERE oid=0") do |row| + @db[:group_pool].insert(row) + end - @db.fetch("SELECT * FROM old_group_pool WHERE oid>0") do |row| - doc = Document.new(row[:body]) + @db.fetch("SELECT * FROM old_group_pool WHERE oid>0") do |row| + doc = Nokogiri::XML(row[:body]) - calculate_quotas(doc, "gid=#{row[:oid]}", "Group") + calculate_quotas(doc, "gid=#{row[:oid]}", "Group") - @db[:group_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:oid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) + @db[:group_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:oid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end end @db.run "DROP TABLE old_group_pool;" + log_time() + log_total_errors() return true @@ -1280,7 +1345,7 @@ module OneDBFsck def calculate_quotas(doc, where_filter, resource) - oid = doc.root.get_text("ID").to_s.to_i + oid = doc.root.at_xpath("ID").text.to_i # VM quotas cpu_used = 0.0 @@ -1295,29 +1360,29 @@ module OneDBFsck img_usage = {} @db.fetch("SELECT body FROM vm_pool WHERE #{where_filter} AND state<>6") do |vm_row| - vmdoc = Document.new(vm_row[:body]) + vmdoc = Nokogiri::XML(vm_row[:body]) # VM quotas - vmdoc.root.each_element("TEMPLATE/CPU") { |e| + vmdoc.root.xpath("TEMPLATE/CPU").each { |e| # truncate to 2 decimals cpu = (e.text.to_f * 100).to_i / 100.0 cpu_used += cpu cpu_used = (cpu_used * 100).to_i / 100.0 } - vmdoc.root.each_element("TEMPLATE/MEMORY") { |e| + vmdoc.root.xpath("TEMPLATE/MEMORY").each { |e| mem_used += e.text.to_i } - vmdoc.root.each_element("TEMPLATE/DISK") { |e| + vmdoc.root.xpath("TEMPLATE/DISK").each { |e| type = "" - e.each_element("TYPE") { |t_elem| + e.xpath("TYPE").each { |t_elem| type = t_elem.text.upcase } if ( type == "SWAP" || type == "FS") - e.each_element("SIZE") { |size_elem| + e.xpath("SIZE").each { |size_elem| vol_used += size_elem.text.to_i } end @@ -1326,13 +1391,13 @@ module OneDBFsck vms_used += 1 # VNet quotas - vmdoc.root.each_element("TEMPLATE/NIC/NETWORK_ID") { |e| + vmdoc.root.xpath("TEMPLATE/NIC/NETWORK_ID").each { |e| vnet_usage[e.text] = 0 if vnet_usage[e.text].nil? vnet_usage[e.text] += 1 } # Image quotas - vmdoc.root.each_element("TEMPLATE/DISK/IMAGE_ID") { |e| + vmdoc.root.xpath("TEMPLATE/DISK/IMAGE_ID").each { |e| img_usage[e.text] = 0 if img_usage[e.text].nil? img_usage[e.text] += 1 } @@ -1342,29 +1407,29 @@ module OneDBFsck # VM quotas vm_elem = nil - doc.root.each_element("VM_QUOTA/VM") { |e| vm_elem = e } + doc.root.xpath("VM_QUOTA/VM").each { |e| vm_elem = e } if vm_elem.nil? - doc.root.delete_element("VM_QUOTA") + doc.root.xpath("VM_QUOTA").each { |e| e.remove } - vm_quota = doc.root.add_element("VM_QUOTA") - vm_elem = vm_quota.add_element("VM") + vm_quota = doc.root.add_child(doc.create_element("VM_QUOTA")) + vm_elem = vm_quota.add_child(doc.create_element("VM")) - vm_elem.add_element("CPU").text = "-1" - vm_elem.add_element("CPU_USED").text = "0" + vm_elem.add_child(doc.create_element("CPU")).content = "-1" + vm_elem.add_child(doc.create_element("CPU_USED")).content = "0" - vm_elem.add_element("MEMORY").text = "-1" - vm_elem.add_element("MEMORY_USED").text = "0" + vm_elem.add_child(doc.create_element("MEMORY")).content = "-1" + vm_elem.add_child(doc.create_element("MEMORY_USED")).content = "0" - vm_elem.add_element("VMS").text = "-1" - vm_elem.add_element("VMS_USED").text = "0" + vm_elem.add_child(doc.create_element("VMS")).content = "-1" + vm_elem.add_child(doc.create_element("VMS_USED")).content = "0" - vm_elem.add_element("VOLATILE_SIZE").text = "-1" - vm_elem.add_element("VOLATILE_SIZE_USED").text = "0" + vm_elem.add_child(doc.create_element("VOLATILE_SIZE")).content = "-1" + vm_elem.add_child(doc.create_element("VOLATILE_SIZE_USED")).content = "0" end - vm_elem.each_element("CPU_USED") { |e| + vm_elem.xpath("CPU_USED").each { |e| # Because of bug http://dev.opennebula.org/issues/1567 the element # may contain a float number in scientific notation. @@ -1379,51 +1444,51 @@ module OneDBFsck if different log_error("#{resource} #{oid} quotas: CPU_USED has #{e.text} \tis\t#{cpu_used_str}") - e.text = cpu_used_str + e.content = cpu_used_str end } - vm_elem.each_element("MEMORY_USED") { |e| + vm_elem.xpath("MEMORY_USED").each { |e| if e.text != mem_used.to_s log_error("#{resource} #{oid} quotas: MEMORY_USED has #{e.text} \tis\t#{mem_used}") - e.text = mem_used.to_s + e.content = mem_used.to_s end } - vm_elem.each_element("VMS_USED") { |e| + vm_elem.xpath("VMS_USED").each { |e| if e.text != vms_used.to_s log_error("#{resource} #{oid} quotas: VMS_USED has #{e.text} \tis\t#{vms_used}") - e.text = vms_used.to_s + e.content = vms_used.to_s end } - vm_elem.each_element("VOLATILE_SIZE_USED") { |e| + vm_elem.xpath("VOLATILE_SIZE_USED").each { |e| if e.text != vol_used.to_s log_error("#{resource} #{oid} quotas: VOLATILE_SIZE_USED has #{e.text} \tis\t#{vol_used}") - e.text = vol_used.to_s + e.content = vol_used.to_s end } # VNet quotas net_quota = nil - doc.root.each_element("NETWORK_QUOTA") { |e| net_quota = e } + doc.root.xpath("NETWORK_QUOTA").each { |e| net_quota = e } if net_quota.nil? - net_quota = doc.root.add_element("NETWORK_QUOTA") + net_quota = doc.root.add_child(doc.create_element("NETWORK_QUOTA")) end - net_quota.each_element("NETWORK") { |net_elem| - vnet_id = net_elem.get_text("ID").to_s + net_quota.xpath("NETWORK").each { |net_elem| + vnet_id = net_elem.at_xpath("ID").text leases_used = vnet_usage.delete(vnet_id) leases_used = 0 if leases_used.nil? - net_elem.each_element("LEASES_USED") { |e| + net_elem.xpath("LEASES_USED").each { |e| if e.text != leases_used.to_s log_error("#{resource} #{oid} quotas: VNet #{vnet_id}\tLEASES_USED has #{e.text} \tis\t#{leases_used}") - e.text = leases_used.to_s + e.content = leases_used.to_s end } } @@ -1431,34 +1496,34 @@ module OneDBFsck vnet_usage.each { |vnet_id, leases_used| log_error("#{resource} #{oid} quotas: VNet #{vnet_id}\tLEASES_USED has 0 \tis\t#{leases_used}") - new_elem = net_quota.add_element("NETWORK") + new_elem = net_quota.add_child(doc.create_element("NETWORK")) - new_elem.add_element("ID").text = vnet_id - new_elem.add_element("LEASES").text = "-1" - new_elem.add_element("LEASES_USED").text = leases_used.to_s + new_elem.add_child(doc.create_element("ID")).content = vnet_id + new_elem.add_child(doc.create_element("LEASES")).content = "-1" + new_elem.add_child(doc.create_element("LEASES_USED")).content = leases_used.to_s } # Image quotas img_quota = nil - doc.root.each_element("IMAGE_QUOTA") { |e| img_quota = e } + doc.root.xpath("IMAGE_QUOTA").each { |e| img_quota = e } if img_quota.nil? - img_quota = doc.root.add_element("IMAGE_QUOTA") + img_quota = doc.root.add_child(doc.create_element("IMAGE_QUOTA")) end - img_quota.each_element("IMAGE") { |img_elem| - img_id = img_elem.get_text("ID").to_s + img_quota.xpath("IMAGE").each { |img_elem| + img_id = img_elem.at_xpath("ID").text rvms = img_usage.delete(img_id) rvms = 0 if rvms.nil? - img_elem.each_element("RVMS_USED") { |e| + img_elem.xpath("RVMS_USED").each { |e| if e.text != rvms.to_s log_error("#{resource} #{oid} quotas: Image #{img_id}\tRVMS has #{e.text} \tis\t#{rvms}") - e.text = rvms.to_s + e.content = rvms.to_s end } } @@ -1466,11 +1531,11 @@ module OneDBFsck img_usage.each { |img_id, rvms| log_error("#{resource} #{oid} quotas: Image #{img_id}\tRVMS has 0 \tis\t#{rvms}") - new_elem = img_quota.add_element("IMAGE") + new_elem = img_quota.add_child(doc.create_element("IMAGE")) - new_elem.add_element("ID").text = img_id - new_elem.add_element("RVMS").text = "-1" - new_elem.add_element("RVMS_USED").text = rvms.to_s + new_elem.add_child(doc.create_element("ID")).content = img_id + new_elem.add_child(doc.create_element("RVMS")).content = "-1" + new_elem.add_child(doc.create_element("RVMS_USED")).content = rvms.to_s } @@ -1479,44 +1544,44 @@ module OneDBFsck ds_usage = {} @db.fetch("SELECT body FROM image_pool WHERE #{where_filter}") do |img_row| - img_doc = Document.new(img_row[:body]) + img_doc = Nokogiri::XML(img_row[:body]) - img_doc.root.each_element("DATASTORE_ID") { |e| + img_doc.root.xpath("DATASTORE_ID").each { |e| ds_usage[e.text] = [0,0] if ds_usage[e.text].nil? ds_usage[e.text][0] += 1 - img_doc.root.each_element("SIZE") { |size| + img_doc.root.xpath("SIZE").each { |size| ds_usage[e.text][1] += size.text.to_i } } end ds_quota = nil - doc.root.each_element("DATASTORE_QUOTA") { |e| ds_quota = e } + doc.root.xpath("DATASTORE_QUOTA").each { |e| ds_quota = e } if ds_quota.nil? - ds_quota = doc.root.add_element("DATASTORE_QUOTA") + ds_quota = doc.root.add_child(doc.create_element("DATASTORE_QUOTA")) end - ds_quota.each_element("DATASTORE") { |ds_elem| - ds_id = ds_elem.get_text("ID").to_s + ds_quota.xpath("DATASTORE").each { |ds_elem| + ds_id = ds_elem.at_xpath("ID").text images_used,size_used = ds_usage.delete(ds_id) images_used = 0 if images_used.nil? size_used = 0 if size_used.nil? - ds_elem.each_element("IMAGES_USED") { |e| + ds_elem.xpath("IMAGES_USED").each { |e| if e.text != images_used.to_s log_error("#{resource} #{oid} quotas: Datastore #{ds_id}\tIMAGES_USED has #{e.text} \tis\t#{images_used}") - e.text = images_used.to_s + e.content = images_used.to_s end } - ds_elem.each_element("SIZE_USED") { |e| + ds_elem.xpath("SIZE_USED").each { |e| if e.text != size_used.to_s log_error("#{resource} #{oid} quotas: Datastore #{ds_id}\tSIZE_USED has #{e.text} \tis\t#{size_used}") - e.text = size_used.to_s + e.content = size_used.to_s end } } @@ -1527,15 +1592,15 @@ module OneDBFsck log_error("#{resource} #{oid} quotas: Datastore #{ds_id}\tIMAGES_USED has 0 \tis\t#{images_used}") log_error("#{resource} #{oid} quotas: Datastore #{ds_id}\tSIZE_USED has 0 \tis\t#{size_used}") - new_elem = ds_quota.add_element("DATASTORE") + new_elem = ds_quota.add_child(doc.create_element("DATASTORE")) - new_elem.add_element("ID").text = ds_id + new_elem.add_child(doc.create_element("ID")).content = ds_id - new_elem.add_element("IMAGES").text = "-1" - new_elem.add_element("IMAGES_USED").text = images_used.to_s + new_elem.add_child(doc.create_element("IMAGES")).content = "-1" + new_elem.add_child(doc.create_element("IMAGES_USED")).content = images_used.to_s - new_elem.add_element("SIZE").text = "-1" - new_elem.add_element("SIZE_USED").text = size_used.to_s + new_elem.add_child(doc.create_element("SIZE")).content = "-1" + new_elem.add_child(doc.create_element("SIZE_USED")).content = size_used.to_s } end end diff --git a/src/onedb/onedb.rb b/src/onedb/onedb.rb index 1b59e7ecf9..3989d05cce 100644 --- a/src/onedb/onedb.rb +++ b/src/onedb/onedb.rb @@ -229,6 +229,8 @@ class OneDB begin puts " > Running fsck" if ops[:verbose] + time0 = Time.now + result = @backend.fsck if !result @@ -238,6 +240,12 @@ class OneDB puts " > Done" if ops[:verbose] puts "" if ops[:verbose] + time1 = Time.now + + if LOG_TIME + puts " > Total time: #{time1 - time0}s" if ops[:verbose] + end + return 0 rescue Exception => e puts e.message From f31c6a7c146a7b7440ccc1e471db768c132537b6 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Mon, 17 Feb 2014 19:27:58 +0100 Subject: [PATCH 24/80] feature #1762: add APIC option to sunstone --- src/sunstone/public/js/plugins/templates-tab.js | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/sunstone/public/js/plugins/templates-tab.js b/src/sunstone/public/js/plugins/templates-tab.js index b42fe88b5b..20496b9b54 100644 --- a/src/sunstone/public/js/plugins/templates-tab.js +++ b/src/sunstone/public/js/plugins/templates-tab.js @@ -2655,6 +2655,21 @@ function add_osTab(dialog) { '
'+tr("Add support in the VM for Physical Address Extension (PAE)")+'
'+ '
'+ '
'+ + '
'+ + '
'+ + ''+ + '
'+ + '
'+ + ''+ + '
'+ + '
'+ + '
'+tr("Enables the advanced programmable IRQ management.")+'
'+ + '
'+ + '
'+ '
'+ '
'+ ''+ From 2e1aba520aff2e3e1114199588838a2a36d0b9b0 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Tue, 18 Feb 2014 10:34:33 +0100 Subject: [PATCH 25/80] feature #2547: add localtime to the template wizard --- src/sunstone/public/js/plugins/templates-tab.js | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/sunstone/public/js/plugins/templates-tab.js b/src/sunstone/public/js/plugins/templates-tab.js index 20496b9b54..0a70117b12 100644 --- a/src/sunstone/public/js/plugins/templates-tab.js +++ b/src/sunstone/public/js/plugins/templates-tab.js @@ -2685,6 +2685,21 @@ function add_osTab(dialog) { '
'+tr("Add support in the VM for hyper-v features (HYPERV)")+'
'+ '
'+ '
'+ + '
'+ + '
'+ + ''+ + '
'+ + '
'+ + ''+ + '
'+ + '
'+ + '
'+tr("The guest clock will be synchronized to the host's configured timezone when booted.")+'
'+ + '
'+ + '
'+ ''+ '
'+ '
'+ From dbc6120c1d61620aa074381f56bf17d244a88eab Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Tue, 18 Feb 2014 10:49:23 +0100 Subject: [PATCH 26/80] feature #1762: add device_model to template wizard --- src/sunstone/public/js/plugins/templates-tab.js | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/sunstone/public/js/plugins/templates-tab.js b/src/sunstone/public/js/plugins/templates-tab.js index 0a70117b12..0217de66c8 100644 --- a/src/sunstone/public/js/plugins/templates-tab.js +++ b/src/sunstone/public/js/plugins/templates-tab.js @@ -2700,6 +2700,17 @@ function add_osTab(dialog) { '
'+tr("The guest clock will be synchronized to the host's configured timezone when booted.")+'
'+ '
'+ '
'+ + '
'+ + '
'+ + ''+ + '
'+ + '
'+ + ''+ + '
'+ + '
'+ + '
'+tr("Used to change the IO emulator in Xen HVM. Only XEN.")+'
'+ + '
'+ + '
'+ ''+ '
'+ '
'+ From 4797588156621e7644c205a57e54a4c7d58edc3c Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Tue, 18 Feb 2014 12:18:09 +0100 Subject: [PATCH 27/80] feature #2685: add describe and list options to oneacct --- src/cli/oneacct | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/cli/oneacct b/src/cli/oneacct index 0aca436c03..6573ec879f 100755 --- a/src/cli/oneacct +++ b/src/cli/oneacct @@ -44,9 +44,15 @@ cmd = CommandParser::CmdParser.new(ARGV) do end option AcctHelper::ACCT_OPTIONS + CommandParser::OPTIONS + + [OpenNebulaHelper::DESCRIBE, CLIHelper::LIST] + OpenNebulaHelper::CLIENT_OPTIONS main do + if options[:describe] + AcctHelper::ACCT_TABLE.describe_columns + exit(0) + end + filter_flag = (options[:userfilter] || VirtualMachinePool::INFO_ALL) start_time = options[:start_time] ? options[:start_time].to_i : -1 end_time = options[:end_time] ? options[:end_time].to_i : -1 @@ -102,13 +108,13 @@ cmd = CommandParser::CmdParser.new(ARGV) do # Use one table for each VM value.each { |vm_id, history_array| array = history_array['HISTORY_RECORDS']['HISTORY'] - AcctHelper::ACCT_TABLE.show(array) + AcctHelper::ACCT_TABLE.show(array, options) puts } else # Use the same table for all the VMs array = value['HISTORY_RECORDS']['HISTORY'] - AcctHelper::ACCT_TABLE.show(array) + AcctHelper::ACCT_TABLE.show(array, options) puts end } From e18be2d51775fe3c3de143707ad3e3eeeedbccc3 Mon Sep 17 00:00:00 2001 From: Jaime Melis Date: Tue, 18 Feb 2014 12:43:38 +0100 Subject: [PATCH 28/80] Bug #2729: Race condition in hash calculation in downloader.sh --- src/datastore_mad/remotes/downloader.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/datastore_mad/remotes/downloader.sh b/src/datastore_mad/remotes/downloader.sh index b387bd2d98..d5be9ae5c9 100755 --- a/src/datastore_mad/remotes/downloader.sh +++ b/src/datastore_mad/remotes/downloader.sh @@ -188,8 +188,10 @@ esac file_type=$(get_type "$command") decompressor=$(get_decompressor "$file_type") +# Note: the 'cat' at the end of this pipeline forces the pipe to wait until +# all the 'tee' processes are finished $command | tee >( decompress "$decompressor" "$TO" ) \ - >( hasher $HASH_TYPE ) >/dev/null + >( hasher $HASH_TYPE ) | cat >/dev/null if [ "$?" != "0" ]; then echo "Error copying" >&2 From dfb052b9ed04f2e18dc225c7b0f7d684542388f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Tue, 18 Feb 2014 17:13:24 +0100 Subject: [PATCH 29/80] Add onedb migrator to 4.5.80 --- install.sh | 1 + src/onedb/4.4.1_to_4.5.80.rb | 145 +++++++++++++++++++++++++++++++++++ 2 files changed, 146 insertions(+) create mode 100644 src/onedb/4.4.1_to_4.5.80.rb diff --git a/install.sh b/install.sh index 4d8adbf9f7..3f62f396a1 100755 --- a/install.sh +++ b/install.sh @@ -1070,6 +1070,7 @@ ONEDB_MIGRATOR_FILES="src/onedb/2.0_to_2.9.80.rb \ src/onedb/4.3.85_to_4.3.90.rb \ src/onedb/4.3.90_to_4.4.0.rb \ src/onedb/4.4.0_to_4.4.1.rb \ + src/onedb/4.4.1_to_4.5.80.rb \ src/onedb/fsck.rb \ src/onedb/import_slave.rb \ src/onedb/onedb.rb \ diff --git a/src/onedb/4.4.1_to_4.5.80.rb b/src/onedb/4.4.1_to_4.5.80.rb new file mode 100644 index 0000000000..9b27ef70ae --- /dev/null +++ b/src/onedb/4.4.1_to_4.5.80.rb @@ -0,0 +1,145 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +require 'nokogiri' + +module Migrator + def db_version + "4.5.80" + end + + def one_version + "OpenNebula 4.5.80" + end + + def up + + init_log_time() + + @db.run "ALTER TABLE acl RENAME TO old_acl;" + @db.run "CREATE TABLE acl (oid INT PRIMARY KEY, user BIGINT, resource BIGINT, rights BIGINT, zone BIGINT, UNIQUE(user, resource, rights, zone));" + + @db.transaction do + @db.fetch("SELECT * FROM old_acl") do |row| + @db[:acl].insert( + :oid => row[:oid], + :user => row[:user], + :resource => row[:resource], + :rights => row[:rights], + :zone => 4294967296) + end + end + + @db.run "DROP TABLE old_acl;" + + log_time() + + # Move USER/QUOTA to user_quotas table + + @db.run "ALTER TABLE user_pool RENAME TO old_user_pool;" + @db.run "CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" + + @db.run "CREATE TABLE user_quotas (user_oid INTEGER PRIMARY KEY, body MEDIUMTEXT);" + + @db.transaction do + @db.fetch("SELECT * FROM old_user_pool") do |row| + doc = Nokogiri::XML(row[:body]) + + quotas_doc = extract_quotas(doc) + + @db[:user_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:oid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + + @db[:user_quotas].insert( + :user_oid => row[:oid], + :body => quotas_doc.root.to_s) + end + end + + @db.run "DROP TABLE old_user_pool;" + + log_time() + + # GROUP/RESOURCE_PROVIDER is not needed + + # Move GROUP/QUOTA to group_quotas table + + @db.run "ALTER TABLE group_pool RENAME TO old_group_pool;" + @db.run "CREATE TABLE group_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" + + @db.run "CREATE TABLE group_quotas (group_oid INTEGER PRIMARY KEY, body MEDIUMTEXT);" + + @db.transaction do + @db.fetch("SELECT * FROM old_group_pool") do |row| + doc = Nokogiri::XML(row[:body]) + + quotas_doc = extract_quotas(doc) + + @db[:group_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:oid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + + @db[:group_quotas].insert( + :group_oid => row[:oid], + :body => quotas_doc.root.to_s) + end + end + + @db.run "DROP TABLE old_group_pool;" + + log_time() + + # Default ZONE + @db.run "CREATE TABLE zone_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" + @db.run "INSERT INTO \"zone_pool\" VALUES(0,'OpenNebula','0OpenNebula',0,0,1,0,0);" + + @db.run "INSERT INTO \"pool_control\" VALUES('zone_pool',99);" + + return true + end + + def extract_quotas(doc) + ds_quota = doc.root.at_xpath("DATASTORE_QUOTA").remove + net_quota = doc.root.at_xpath("NETWORK_QUOTA").remove + vm_quota = doc.root.at_xpath("VM_QUOTA").remove + img_quota = doc.root.at_xpath("IMAGE_QUOTA").remove + + quotas_doc = Nokogiri::XML("") + + quotas_doc.root.add_child(quotas_doc.create_element("ID")) + .content = doc.root.at_xpath("ID").text + + quotas_doc.root.add_child(ds_quota) + quotas_doc.root.add_child(net_quota) + quotas_doc.root.add_child(vm_quota) + quotas_doc.root.add_child(img_quota) + + return quotas_doc + end +end From 6b0971415b802b388cddc8db8d781b597ed79a8f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Tue, 18 Feb 2014 17:21:38 +0100 Subject: [PATCH 30/80] Feature #2586: Better onedb upgrade time log --- src/onedb/onedb.rb | 11 +++-------- src/onedb/onedb_backend.rb | 2 +- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/src/onedb/onedb.rb b/src/onedb/onedb.rb index 3989d05cce..fdef7f41cb 100644 --- a/src/onedb/onedb.rb +++ b/src/onedb/onedb.rb @@ -16,6 +16,7 @@ require 'onedb_backend' +# If set to true, extra verbose time log will be printed for each migrator LOG_TIME = false class OneDB @@ -155,16 +156,12 @@ class OneDB time1 = Time.now - if LOG_TIME - puts " > Time for #{file}: #{time1 - time0}s" - end - if !result raise "Error while upgrading from #{version} to " << " #{@backend.db_version}" end - puts " > Done" if ops[:verbose] + puts " > Done in #{"%0.02f" % (time1 - time0).to_s}s" if ops[:verbose] puts "" if ops[:verbose] matches = Dir.glob( @@ -180,9 +177,7 @@ class OneDB timeb = Time.now - if LOG_TIME - puts " > Total time: #{timeb - timea}s" if ops[:verbose] - end + puts "Total time: #{"%0.02f" % (timeb - timea).to_s}s" if ops[:verbose] return 0 diff --git a/src/onedb/onedb_backend.rb b/src/onedb/onedb_backend.rb index ba9fdf5629..315251597e 100644 --- a/src/onedb/onedb_backend.rb +++ b/src/onedb/onedb_backend.rb @@ -138,7 +138,7 @@ class OneDBBacKEnd def log_time() if LOG_TIME @time1 = Time.now - puts " > #{db_version} Time for block #{@block_n}: #{@time1 - @time0}s" + puts " > #{db_version} Time for block #{@block_n}: #{"%0.02f" % (@time1 - @time0).to_s}s" @time0 = Time.now @block_n += 1 end From e49687be51dc7c934e2a1795050c0b976e739019 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Tue, 18 Feb 2014 17:50:08 +0100 Subject: [PATCH 31/80] feature #1484: add csv output to table formater --- src/cli/cli_helper.rb | 39 ++++++++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 11 deletions(-) diff --git a/src/cli/cli_helper.rb b/src/cli/cli_helper.rb index 30e7116320..5cdb3f5bbf 100644 --- a/src/cli/cli_helper.rb +++ b/src/cli/cli_helper.rb @@ -14,6 +14,8 @@ # limitations under the License. # #--------------------------------------------------------------------------- # +require 'csv' + module CLIHelper LIST = { :name => "list", @@ -23,6 +25,12 @@ module CLIHelper :description => "Selects columns to display with list command" } + CSV = { + :name => "csv", + :large => "--csv", + :description => "Write table in csv format" + } + #ORDER = { # :name => "order", # :short => "-o x,y,z", @@ -56,7 +64,7 @@ module CLIHelper } #OPTIONS = [LIST, ORDER, FILTER, HEADER, DELAY] - OPTIONS = [LIST, DELAY, FILTER] + OPTIONS = [LIST, DELAY, FILTER, CSV] # Sets bold font def CLIHelper.scr_bold @@ -154,6 +162,8 @@ module CLIHelper class ShowTable require 'yaml' + attr_reader :default_columns + def initialize(conf=nil, ext=nil, &block) @columns = Hash.new @default_columns = Array.new @@ -241,7 +251,7 @@ module CLIHelper private def print_table(data, options) - CLIHelper.print_header(header_str) + CLIHelper.print_header(header_str) if !options[:csv] data ? print_data(data, options) : puts end @@ -257,17 +267,24 @@ module CLIHelper end begin - res_data.each{|l| - puts (0..ncolumns-1).collect{ |i| - dat=l[i] - col=@default_columns[i] + if options[:csv] + CSV($stdout, :write_headers => true, + :headers => @default_columns) do |csv| + res_data.each {|l| csv << l } + end + else + res_data.each{|l| + puts (0..ncolumns-1).collect{ |i| + dat=l[i] + col=@default_columns[i] - str=format_str(col, dat) - str=CLIHelper.color_state(str) if i==stat_column + str=format_str(col, dat) + str=CLIHelper.color_state(str) if i==stat_column - str - }.join(' ').rstrip - } + str + }.join(' ').rstrip + } + end rescue Errno::EPIPE end end From 308484fa54f519a49573329b10f8a8eede693f83 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Tue, 18 Feb 2014 17:52:54 +0100 Subject: [PATCH 32/80] feature #1484: merge all user data in oneacct csv --- src/cli/one_helper/oneacct_helper.rb | 4 ++++ src/cli/oneacct | 22 ++++++++++++++++++++-- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/src/cli/one_helper/oneacct_helper.rb b/src/cli/one_helper/oneacct_helper.rb index f3048e3aa5..572777bf70 100644 --- a/src/cli/one_helper/oneacct_helper.rb +++ b/src/cli/one_helper/oneacct_helper.rb @@ -100,6 +100,10 @@ class AcctHelper < OpenNebulaHelper::OneHelper ACCT_TABLE = CLIHelper::ShowTable.new("oneacct.yaml", nil) do + column :UID, "User ID", :size=>4 do |d| + d["UID"] + end + column :VID, "Virtual Machine ID", :size=>4 do |d| d["OID"] end diff --git a/src/cli/oneacct b/src/cli/oneacct index 6573ec879f..9f27f732d8 100755 --- a/src/cli/oneacct +++ b/src/cli/oneacct @@ -44,7 +44,7 @@ cmd = CommandParser::CmdParser.new(ARGV) do end option AcctHelper::ACCT_OPTIONS + CommandParser::OPTIONS + - [OpenNebulaHelper::DESCRIBE, CLIHelper::LIST] + + [OpenNebulaHelper::DESCRIBE, CLIHelper::LIST, CLIHelper::CSV] + OpenNebulaHelper::CLIENT_OPTIONS main do @@ -87,7 +87,10 @@ cmd = CommandParser::CmdParser.new(ARGV) do else order_by = Hash.new order_by[:order_by_1] = 'VM/UID' - order_by[:order_by_2] = 'VM/ID' if options[:split] + + if options[:split] && !options[:csv] + order_by[:order_by_2] = 'VM/ID' + end acct_hash = pool.accounting(filter_flag, common_opts.merge(order_by)) @@ -96,6 +99,21 @@ cmd = CommandParser::CmdParser.new(ARGV) do exit -1 end + if options[:csv] + a=Array.new + acct_hash.each do |user_id, value| + value['HISTORY_RECORDS']['HISTORY'].each do |l| + l['UID']=user_id + a << l + end + end + + cols=AcctHelper::ACCT_TABLE.default_columns + AcctHelper::ACCT_TABLE.default(:UID, *cols) + + AcctHelper::ACCT_TABLE.show(a, options) + exit(0) + end if ( start_time != -1 or end_time != -1 ) AcctHelper.print_start_end_time_header(start_time, end_time) From d0eb7aead4db827699d7976d914003f855932164 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Tue, 18 Feb 2014 18:45:01 +0100 Subject: [PATCH 33/80] Bug #2734: Revert "Feature #2694: Force ACL rule zone to be the local zone when oned is standalone" This reverts commit 41756a9de3ce2289908c6b91de11f1c853b6d8d9. --- include/AclManager.h | 16 +++------------- src/acl/AclManager.cc | 7 ------- src/nebula/Nebula.cc | 3 +-- 3 files changed, 4 insertions(+), 22 deletions(-) diff --git a/include/AclManager.h b/include/AclManager.h index 6a25884967..998d248d23 100644 --- a/include/AclManager.h +++ b/include/AclManager.h @@ -38,16 +38,12 @@ class AclManager : public Callbackable, public ActionListener public: /** - * * @param _db pointer to the DB * @param zone_id of the Zone - * @param is_federation_enabled true is this oned is part of a federation - * @param is_federation_slave true is this oned is a federation slave. It - * it is true, it will reload periodically rules from the DB + * @param refresh_cache will reload periodically rules from the DB * @param timer_period period to reload the rules */ - AclManager(SqlDB * _db, int zone_id, bool is_federation_enabled, - bool is_federation_slave, time_t timer); + AclManager(SqlDB * _db, int zone_id, bool _refresh_cache, time_t timer); virtual ~AclManager(); @@ -214,8 +210,7 @@ protected: * from DB) */ AclManager(int _zone_id) - :zone_id(_zone_id), db(0),lastOID(0), is_federation_enabled(false), - is_federation_slave(false) + :zone_id(_zone_id), db(0),lastOID(0), is_federation_slave(false) { pthread_mutex_init(&mutex, 0); }; @@ -420,11 +415,6 @@ private: // Refresh loop thread // ---------------------------------------- - /** - * Flag to know if this oned is part of a federation - */ - bool is_federation_enabled; - /** * Flag to refresh the cache periodically */ diff --git a/src/acl/AclManager.cc b/src/acl/AclManager.cc index 6a647b422f..949527ec69 100644 --- a/src/acl/AclManager.cc +++ b/src/acl/AclManager.cc @@ -51,11 +51,9 @@ int AclManager::init_cb(void *nil, int num, char **values, char **names) AclManager::AclManager( SqlDB * _db, int _zone_id, - bool _is_federation_enabled, bool _is_federation_slave, time_t _timer_period) :zone_id(_zone_id), db(_db), lastOID(-1), - is_federation_enabled(_is_federation_enabled), is_federation_slave(_is_federation_slave), timer_period(_timer_period) { ostringstream oss; @@ -529,11 +527,6 @@ int AclManager::add_rule(long long user, long long resource, long long rights, return -1; } - if (!is_federation_enabled) - { - zone = AclRule::INDIVIDUAL_ID | zone_id; - } - lock(); if (lastOID == INT_MAX) diff --git a/src/nebula/Nebula.cc b/src/nebula/Nebula.cc index 458f22151f..da1eb823d2 100644 --- a/src/nebula/Nebula.cc +++ b/src/nebula/Nebula.cc @@ -701,8 +701,7 @@ void Nebula::start(bool bootstrap_only) // ---- ACL Manager ---- try { - aclm = new AclManager(db, zone_id, is_federation_enabled(), - is_federation_slave(), timer_period); + aclm = new AclManager(db, zone_id, is_federation_slave(), timer_period); } catch (bad_alloc&) { From 22e621ea95edebde05b1a2901155ae45ca1615a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Tue, 18 Feb 2014 18:54:36 +0100 Subject: [PATCH 34/80] Bug #2734: ACL rules are not forced to be in the local zone But the onedb import-slave command will simply ignore any rule that applies to a zone other than #0 or * --- include/AclManager.h | 5 +++-- src/acl/AclManager.cc | 13 ++++++++----- src/onedb/import_slave.rb | 1 + 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/include/AclManager.h b/include/AclManager.h index 998d248d23..32eb30e5d1 100644 --- a/include/AclManager.h +++ b/include/AclManager.h @@ -40,10 +40,11 @@ public: /** * @param _db pointer to the DB * @param zone_id of the Zone - * @param refresh_cache will reload periodically rules from the DB + * @param is_federation_slave true is this oned is a federation slave. If + * it is true, it will reload periodically rules from the DB * @param timer_period period to reload the rules */ - AclManager(SqlDB * _db, int zone_id, bool _refresh_cache, time_t timer); + AclManager(SqlDB * _db, int zone_id, bool is_federation_slave, time_t timer); virtual ~AclManager(); diff --git a/src/acl/AclManager.cc b/src/acl/AclManager.cc index 949527ec69..04c7cfcbed 100644 --- a/src/acl/AclManager.cc +++ b/src/acl/AclManager.cc @@ -83,7 +83,7 @@ AclManager::AclManager( string error_str; // Users in group USERS can create standard resources - // @1 VM+NET+IMAGE+TEMPLATE/* CREATE + // @1 VM+NET+IMAGE+TEMPLATE/* CREATE # add_rule(AclRule::GROUP_ID | 1, AclRule::ALL_ID | @@ -92,24 +92,27 @@ AclManager::AclManager( PoolObjectSQL::IMAGE | PoolObjectSQL::TEMPLATE, AuthRequest::CREATE, - AclRule::ALL_ID, + AclRule::INDIVIDUAL_ID | + zone_id, error_str); // Users in USERS can deploy VMs in any HOST - // @1 HOST/* MANAGE + // @1 HOST/* MANAGE # add_rule(AclRule::GROUP_ID | 1, AclRule::ALL_ID | PoolObjectSQL::HOST, AuthRequest::MANAGE, - AclRule::ALL_ID, + AclRule::INDIVIDUAL_ID | + zone_id, error_str); add_rule(AclRule::ALL_ID, AclRule::ALL_ID | PoolObjectSQL::DOCUMENT, AuthRequest::CREATE, - AclRule::ALL_ID, + AclRule::INDIVIDUAL_ID | + zone_id, error_str); } } diff --git a/src/onedb/import_slave.rb b/src/onedb/import_slave.rb index ec11ed7186..a6cbb603b5 100644 --- a/src/onedb/import_slave.rb +++ b/src/onedb/import_slave.rb @@ -499,6 +499,7 @@ EOT if (zid != 0) insert = false + error_str = "Zone ##{zid} is unknown for the slave" else new_zone = (Acl::USERS["UID"] | zone_id) end From def8fa4490fe19dfd1039be5dbd56173f933b82b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Wed, 19 Feb 2014 15:48:36 +0100 Subject: [PATCH 35/80] Feature #2565: Make all zones visible by default to all users --- src/acl/AclManager.cc | 8 ++++++++ src/rm/RequestManagerGroup.cc | 30 ------------------------------ 2 files changed, 8 insertions(+), 30 deletions(-) diff --git a/src/acl/AclManager.cc b/src/acl/AclManager.cc index 04c7cfcbed..fd408d29d8 100644 --- a/src/acl/AclManager.cc +++ b/src/acl/AclManager.cc @@ -114,6 +114,14 @@ AclManager::AclManager( AclRule::INDIVIDUAL_ID | zone_id, error_str); + + // @ ZONE/# USE * + add_rule(AclRule::ALL_ID, + AclRule::ALL_ID | + PoolObjectSQL::ZONE, + AuthRequest::USE, + AclRule::ALL_ID, + error_str); } } diff --git a/src/rm/RequestManagerGroup.cc b/src/rm/RequestManagerGroup.cc index 785c3d1d6b..0dc58dea44 100644 --- a/src/rm/RequestManagerGroup.cc +++ b/src/rm/RequestManagerGroup.cc @@ -261,21 +261,6 @@ int GroupAddProvider::edit_acl_rules( error_msg); - // @ ZONE/# USE * - rc += aclm->add_rule( - AclRule::GROUP_ID | - group_id, - - PoolObjectSQL::ZONE | - AclRule::INDIVIDUAL_ID | - zone_id, - - AuthRequest::USE, - - AclRule::ALL_ID, - - error_msg); - if (rc != 0) { return -1; @@ -343,21 +328,6 @@ int GroupDelProvider::edit_acl_rules( error_msg); - // @ ZONE/# USE * - rc += aclm->del_rule( - AclRule::GROUP_ID | - group_id, - - PoolObjectSQL::ZONE | - AclRule::INDIVIDUAL_ID | - zone_id, - - AuthRequest::USE, - - AclRule::ALL_ID, - - error_msg); - if (rc != 0) { return -1; From 0a71139b34096c3f06ecfc1cfcf3d4b9b0c453e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Wed, 19 Feb 2014 16:49:26 +0100 Subject: [PATCH 36/80] Bug #2724: Add resource provider ALL to users group on bootstrap --- include/RequestManagerGroup.h | 9 --- src/acl/AclManager.cc | 14 +--- src/group/Group.cc | 102 +++++++++++++++++++++++++++++ src/group/GroupPool.cc | 6 ++ src/nebula/Nebula.cc | 105 +++++++++++++++--------------- src/rm/RequestManagerGroup.cc | 118 ---------------------------------- 6 files changed, 163 insertions(+), 191 deletions(-) diff --git a/include/RequestManagerGroup.h b/include/RequestManagerGroup.h index f4e8eb752e..814595d358 100644 --- a/include/RequestManagerGroup.h +++ b/include/RequestManagerGroup.h @@ -104,9 +104,6 @@ protected: virtual int edit_resource_provider( Group* group, int zone_id, int cluster_id, string& error_msg) = 0; - - virtual int edit_acl_rules( - int group_id, int zone_id, int cluster_id, string& error_msg) = 0; }; /* ------------------------------------------------------------------------- */ @@ -125,9 +122,6 @@ public: int edit_resource_provider( Group* group, int zone_id, int cluster_id, string& error_msg); - - int edit_acl_rules( - int group_id, int zone_id, int cluster_id, string& error_msg); }; /* ------------------------------------------------------------------------- */ @@ -146,9 +140,6 @@ public: int edit_resource_provider( Group* group, int zone_id, int cluster_id, string& error_msg); - - int edit_acl_rules( - int group_id, int zone_id, int cluster_id, string& error_msg); }; /* -------------------------------------------------------------------------- */ diff --git a/src/acl/AclManager.cc b/src/acl/AclManager.cc index fd408d29d8..fa525a922f 100644 --- a/src/acl/AclManager.cc +++ b/src/acl/AclManager.cc @@ -96,17 +96,7 @@ AclManager::AclManager( zone_id, error_str); - // Users in USERS can deploy VMs in any HOST - // @1 HOST/* MANAGE # - add_rule(AclRule::GROUP_ID | - 1, - AclRule::ALL_ID | - PoolObjectSQL::HOST, - AuthRequest::MANAGE, - AclRule::INDIVIDUAL_ID | - zone_id, - error_str); - + // * DOCUMENT/* CREATE # add_rule(AclRule::ALL_ID, AclRule::ALL_ID | PoolObjectSQL::DOCUMENT, @@ -115,7 +105,7 @@ AclManager::AclManager( zone_id, error_str); - // @ ZONE/# USE * + // * ZONE/* USE * add_rule(AclRule::ALL_ID, AclRule::ALL_ID | PoolObjectSQL::ZONE, diff --git a/src/group/Group.cc b/src/group/Group.cc index 3f0c31e1f3..7af368375e 100644 --- a/src/group/Group.cc +++ b/src/group/Group.cc @@ -317,6 +317,11 @@ int Group::from_xml(const string& xml) int Group::add_resource_provider(int zone_id, int cluster_id, string& error_msg) { + AclManager* aclm = Nebula::instance().get_aclm(); + + int rc = 0; + long long mask_prefix; + pair >::iterator,bool> ret; ret = providers.insert(pair(zone_id, cluster_id)); @@ -327,6 +332,51 @@ int Group::add_resource_provider(int zone_id, int cluster_id, string& error_msg) return -1; } + if (cluster_id == ClusterPool::ALL_RESOURCES) + { + mask_prefix = AclRule::ALL_ID; + } + else + { + mask_prefix = AclRule::CLUSTER_ID | cluster_id; + } + + // @ HOST/% MANAGE # + rc += aclm->add_rule( + AclRule::GROUP_ID | + oid, + + mask_prefix | + PoolObjectSQL::HOST, + + AuthRequest::MANAGE, + + AclRule::INDIVIDUAL_ID | + zone_id, + + error_msg); + + // @ DATASTORE+NET/% USE # + rc += aclm->add_rule( + AclRule::GROUP_ID | + oid, + + mask_prefix | + PoolObjectSQL::DATASTORE | + PoolObjectSQL::NET, + + AuthRequest::USE, + + AclRule::INDIVIDUAL_ID | + zone_id, + + error_msg); + + if (rc != 0) + { + return -1; + } + return 0; } @@ -335,11 +385,63 @@ int Group::add_resource_provider(int zone_id, int cluster_id, string& error_msg) int Group::del_resource_provider(int zone_id, int cluster_id, string& error_msg) { + AclManager* aclm = Nebula::instance().get_aclm(); + + int rc = 0; + + long long mask_prefix; + if( providers.erase(pair(zone_id, cluster_id)) != 1 ) { error_msg = "Resource provider is not assigned to this group"; return -1; } + if (cluster_id == ClusterPool::ALL_RESOURCES) + { + mask_prefix = AclRule::ALL_ID; + } + else + { + mask_prefix = AclRule::CLUSTER_ID | cluster_id; + } + + // @ HOST/% MANAGE # + rc += aclm->del_rule( + AclRule::GROUP_ID | + oid, + + mask_prefix | + PoolObjectSQL::HOST, + + AuthRequest::MANAGE, + + AclRule::INDIVIDUAL_ID | + zone_id, + + error_msg); + + // @ DATASTORE+NET/% USE # + rc += aclm->del_rule( + AclRule::GROUP_ID | + oid, + + mask_prefix | + PoolObjectSQL::DATASTORE | + PoolObjectSQL::NET, + + AuthRequest::USE, + + AclRule::INDIVIDUAL_ID | + zone_id, + + error_msg); + + if (rc != 0) + { + return -1; + } + return 0; } + diff --git a/src/group/GroupPool.cc b/src/group/GroupPool.cc index 2340fe0433..829d7c8d60 100644 --- a/src/group/GroupPool.cc +++ b/src/group/GroupPool.cc @@ -77,6 +77,12 @@ GroupPool::GroupPool(SqlDB * db, goto error_groups; } + group = get(rc, true); + + group->add_resource_provider(Nebula::instance().get_zone_id(), ClusterPool::ALL_RESOURCES, error_str); + + group->unlock(); + set_update_lastOID(99); } diff --git a/src/nebula/Nebula.cc b/src/nebula/Nebula.cc index da1eb823d2..203970504c 100644 --- a/src/nebula/Nebula.cc +++ b/src/nebula/Nebula.cc @@ -377,6 +377,59 @@ void Nebula::start(bool bootstrap_only) return; } + // ----------------------------------------------------------- + // Close stds, we no longer need them + // ----------------------------------------------------------- + + fd = open("/dev/null", O_RDWR); + + dup2(fd,0); + dup2(fd,1); + dup2(fd,2); + + close(fd); + + fcntl(0,F_SETFD,0); // Keep them open across exec funcs + fcntl(1,F_SETFD,0); + fcntl(2,F_SETFD,0); + + // ----------------------------------------------------------- + // Block all signals before creating any Nebula thread + // ----------------------------------------------------------- + + sigfillset(&mask); + + pthread_sigmask(SIG_BLOCK, &mask, NULL); + + // ----------------------------------------------------------- + //Managers + // ----------------------------------------------------------- + + MadManager::mad_manager_system_init(); + + time_t timer_period; + time_t monitor_period; + + nebula_configuration->get("MANAGER_TIMER", timer_period); + nebula_configuration->get("MONITORING_INTERVAL", monitor_period); + + // ---- ACL Manager ---- + try + { + aclm = new AclManager(db, zone_id, is_federation_slave(), timer_period); + } + catch (bad_alloc&) + { + throw; + } + + rc = aclm->start(); + + if ( rc != 0 ) + { + throw runtime_error("Could not start the ACL Manager"); + } + // ----------------------------------------------------------- // Pools // ----------------------------------------------------------- @@ -487,41 +540,6 @@ void Nebula::start(bool bootstrap_only) throw; } - // ----------------------------------------------------------- - // Close stds, we no longer need them - // ----------------------------------------------------------- - - fd = open("/dev/null", O_RDWR); - - dup2(fd,0); - dup2(fd,1); - dup2(fd,2); - - close(fd); - - fcntl(0,F_SETFD,0); // Keep them open across exec funcs - fcntl(1,F_SETFD,0); - fcntl(2,F_SETFD,0); - - // ----------------------------------------------------------- - // Block all signals before creating any Nebula thread - // ----------------------------------------------------------- - - sigfillset(&mask); - - pthread_sigmask(SIG_BLOCK, &mask, NULL); - - // ----------------------------------------------------------- - //Managers - // ----------------------------------------------------------- - - MadManager::mad_manager_system_init(); - - time_t timer_period; - time_t monitor_period; - - nebula_configuration->get("MANAGER_TIMER", timer_period); - nebula_configuration->get("MONITORING_INTERVAL", monitor_period); // ---- Virtual Machine Manager ---- try @@ -698,23 +716,6 @@ void Nebula::start(bool bootstrap_only) } } - // ---- ACL Manager ---- - try - { - aclm = new AclManager(db, zone_id, is_federation_slave(), timer_period); - } - catch (bad_alloc&) - { - throw; - } - - rc = aclm->start(); - - if ( rc != 0 ) - { - throw runtime_error("Could not start the ACL Manager"); - } - // ---- Image Manager ---- try { diff --git a/src/rm/RequestManagerGroup.cc b/src/rm/RequestManagerGroup.cc index 0dc58dea44..985caf6382 100644 --- a/src/rm/RequestManagerGroup.cc +++ b/src/rm/RequestManagerGroup.cc @@ -197,8 +197,6 @@ void GroupEditProvider::request_execute( return; } - edit_acl_rules(group_id, zone_id, cluster_id, error_str); - success_response(cluster_id, att); } @@ -214,124 +212,8 @@ int GroupAddProvider::edit_resource_provider( /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -int GroupAddProvider::edit_acl_rules( - int group_id, int zone_id, int cluster_id, string& error_msg) -{ - int rc = 0; - - long long mask_prefix; - - if (cluster_id == ClusterPool::ALL_RESOURCES) - { - mask_prefix = AclRule::ALL_ID; - } - else - { - mask_prefix = AclRule::CLUSTER_ID | cluster_id; - } - - // @ HOST/% MANAGE # - rc += aclm->add_rule( - AclRule::GROUP_ID | - group_id, - - mask_prefix | - PoolObjectSQL::HOST, - - AuthRequest::MANAGE, - - AclRule::INDIVIDUAL_ID | - zone_id, - - error_msg); - - // @ DATASTORE+NET/% USE # - rc += aclm->add_rule( - AclRule::GROUP_ID | - group_id, - - mask_prefix | - PoolObjectSQL::DATASTORE | - PoolObjectSQL::NET, - - AuthRequest::USE, - - AclRule::INDIVIDUAL_ID | - zone_id, - - error_msg); - - if (rc != 0) - { - return -1; - } - - return 0; -} - -/* -------------------------------------------------------------------------- */ -/* -------------------------------------------------------------------------- */ - int GroupDelProvider::edit_resource_provider( Group* group, int zone_id, int cluster_id, string& error_msg) { return group->del_resource_provider(zone_id, cluster_id, error_msg); } - -/* -------------------------------------------------------------------------- */ -/* -------------------------------------------------------------------------- */ - -int GroupDelProvider::edit_acl_rules( - int group_id, int zone_id, int cluster_id, string& error_msg) -{ - int rc = 0; - - long long mask_prefix; - - if (cluster_id == ClusterPool::ALL_RESOURCES) - { - mask_prefix = AclRule::ALL_ID; - } - else - { - mask_prefix = AclRule::CLUSTER_ID | cluster_id; - } - - // @ HOST/% MANAGE # - rc += aclm->del_rule( - AclRule::GROUP_ID | - group_id, - - mask_prefix | - PoolObjectSQL::HOST, - - AuthRequest::MANAGE, - - AclRule::INDIVIDUAL_ID | - zone_id, - - error_msg); - - // @ DATASTORE+NET/% USE # - rc += aclm->del_rule( - AclRule::GROUP_ID | - group_id, - - mask_prefix | - PoolObjectSQL::DATASTORE | - PoolObjectSQL::NET, - - AuthRequest::USE, - - AclRule::INDIVIDUAL_ID | - zone_id, - - error_msg); - - if (rc != 0) - { - return -1; - } - - return 0; -} From 73bdfbe632574f99f28f0b37c2ef6da2d67ec052 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Wed, 19 Feb 2014 18:26:34 +0100 Subject: [PATCH 37/80] Bug #2724 #2736: DS 1,2 are not created with permission OTHER USE Since permissions are managed with resource providers, it is no longer needed --- src/datastore/DatastorePool.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/datastore/DatastorePool.cc b/src/datastore/DatastorePool.cc index 8cee553741..da7de7c8c7 100644 --- a/src/datastore/DatastorePool.cc +++ b/src/datastore/DatastorePool.cc @@ -103,7 +103,7 @@ DatastorePool::DatastorePool(SqlDB * db): GroupPool::ONEADMIN_ID, UserPool::oneadmin_name, GroupPool::ONEADMIN_NAME, - 0133, + 0137, ds_tmpl, &rc, ClusterPool::NONE_CLUSTER_ID, @@ -137,7 +137,7 @@ DatastorePool::DatastorePool(SqlDB * db): GroupPool::ONEADMIN_ID, UserPool::oneadmin_name, GroupPool::ONEADMIN_NAME, - 0133, + 0137, ds_tmpl, &rc, ClusterPool::NONE_CLUSTER_ID, From 7e824a39a24b3533ad2a846397f4f234b5a9d92b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Wed, 19 Feb 2014 18:42:28 +0100 Subject: [PATCH 38/80] Feature #2736: Default group ACL allows to create DOCUMENTs This way we can remove the default acl that allowed everybody to create them, and let the admin decide when a new group is defined. --- src/acl/AclManager.cc | 12 ++---------- src/oca/ruby/opennebula/group.rb | 2 +- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/src/acl/AclManager.cc b/src/acl/AclManager.cc index fa525a922f..96f8ede4d7 100644 --- a/src/acl/AclManager.cc +++ b/src/acl/AclManager.cc @@ -83,22 +83,14 @@ AclManager::AclManager( string error_str; // Users in group USERS can create standard resources - // @1 VM+NET+IMAGE+TEMPLATE/* CREATE # + // @1 VM+NET+IMAGE+TEMPLATE+DOCUMENT/* CREATE # add_rule(AclRule::GROUP_ID | 1, AclRule::ALL_ID | PoolObjectSQL::VM | PoolObjectSQL::NET | PoolObjectSQL::IMAGE | - PoolObjectSQL::TEMPLATE, - AuthRequest::CREATE, - AclRule::INDIVIDUAL_ID | - zone_id, - error_str); - - // * DOCUMENT/* CREATE # - add_rule(AclRule::ALL_ID, - AclRule::ALL_ID | + PoolObjectSQL::TEMPLATE | PoolObjectSQL::DOCUMENT, AuthRequest::CREATE, AclRule::INDIVIDUAL_ID | diff --git a/src/oca/ruby/opennebula/group.rb b/src/oca/ruby/opennebula/group.rb index cebbaa9980..84caa911ac 100644 --- a/src/oca/ruby/opennebula/group.rb +++ b/src/oca/ruby/opennebula/group.rb @@ -36,7 +36,7 @@ module OpenNebula SELF = -1 # Default resource ACL's for group users (create) - GROUP_DEFAULT_ACLS = "VM+IMAGE+NET+TEMPLATE" + GROUP_DEFAULT_ACLS = "VM+IMAGE+NET+TEMPLATE+DOCUMENT" ALL_CLUSTERS_IN_ZONE = 10 # Creates a Group description with just its identifier From d1a2b207f250dd7555650a4b965f4cabd63beb80 Mon Sep 17 00:00:00 2001 From: "Ruben S. Montero" Date: Thu, 20 Feb 2014 12:36:06 +0100 Subject: [PATCH 39/80] feature #2587: Added a DEFAULT_DEV_PREFIX for CD-ROM devices --- include/ImagePool.h | 1 + share/etc/oned.conf | 4 ++++ src/image/ImagePool.cc | 3 ++- src/nebula/Nebula.cc | 5 ++++- src/nebula/NebulaTemplate.cc | 5 +++++ 5 files changed, 16 insertions(+), 2 deletions(-) diff --git a/include/ImagePool.h b/include/ImagePool.h index 234db68bd7..bc51791fb2 100644 --- a/include/ImagePool.h +++ b/include/ImagePool.h @@ -43,6 +43,7 @@ public: SqlDB * db, const string& __default_type, const string& __default_dev_prefix, + const string& __default_cdrom_dev_prefix, vector& restricted_attrs, vector hook_mads, const string& remotes_location, diff --git a/share/etc/oned.conf b/share/etc/oned.conf index f04b5ee725..4b9152c3b9 100644 --- a/share/etc/oned.conf +++ b/share/etc/oned.conf @@ -176,11 +176,14 @@ MAC_PREFIX = "02:00" # CDROM Image file holding a CDROM # DATABLOCK Image file holding a datablock, # always created as an empty block +# # DEFAULT_DEVICE_PREFIX: This can be set to # hd IDE prefix # sd SCSI # xvd XEN Virtual Disk # vd KVM virtual disk +# +# DEFAULT_CDROM_DEVICE_PREFIX: Same as above but for CDROM devices. #******************************************************************************* #DATASTORE_LOCATION = /var/lib/one/datastores @@ -192,6 +195,7 @@ DATASTORE_CAPACITY_CHECK = "yes" DEFAULT_IMAGE_TYPE = "OS" DEFAULT_DEVICE_PREFIX = "hd" +DEFAULT_CDROM_DEVICE_PREFIX = "hd" #******************************************************************************* # Information Driver Configuration diff --git a/src/image/ImagePool.cc b/src/image/ImagePool.cc index 6974929157..4d326fafed 100644 --- a/src/image/ImagePool.cc +++ b/src/image/ImagePool.cc @@ -36,6 +36,7 @@ ImagePool::ImagePool( SqlDB * db, const string& __default_type, const string& __default_dev_prefix, + const string& __default_cdrom_dev_prefix, vector& restricted_attrs, vector hook_mads, const string& remotes_location, @@ -47,7 +48,7 @@ ImagePool::ImagePool( _default_type = __default_type; _default_dev_prefix = __default_dev_prefix; - _default_cdrom_dev_prefix = "hd"; + _default_cdrom_dev_prefix = __default_cdrom_dev_prefix; // Init inherit attributes vector::const_iterator it; diff --git a/src/nebula/Nebula.cc b/src/nebula/Nebula.cc index 203970504c..111e146c4d 100644 --- a/src/nebula/Nebula.cc +++ b/src/nebula/Nebula.cc @@ -440,6 +440,7 @@ void Nebula::start(bool bootstrap_only) string mac_prefix; string default_image_type; string default_device_prefix; + string default_cdrom_device_prefix; time_t expiration_time; time_t vm_expiration; @@ -518,10 +519,12 @@ void Nebula::start(bool bootstrap_only) nebula_configuration->get("DEFAULT_IMAGE_TYPE", default_image_type); nebula_configuration->get("DEFAULT_DEVICE_PREFIX", default_device_prefix); - + nebula_configuration->get("DEFAULT_CDROM_DEVICE_PREFIX", + default_cdrom_device_prefix); ipool = new ImagePool(db, default_image_type, default_device_prefix, + default_cdrom_device_prefix, img_restricted_attrs, image_hooks, remotes_location, diff --git a/src/nebula/NebulaTemplate.cc b/src/nebula/NebulaTemplate.cc index 591eadd958..5f52db8fae 100644 --- a/src/nebula/NebulaTemplate.cc +++ b/src/nebula/NebulaTemplate.cc @@ -282,6 +282,7 @@ void OpenNebulaTemplate::set_conf_default() # DATASTORE_CAPACITY_CHECK # DEFAULT_IMAGE_TYPE # DEFAULT_DEVICE_PREFIX +# DEFAULT_CDROM_DEVICE_PREFIX #******************************************************************************* */ //DATASTORE_LOCATION @@ -311,6 +312,10 @@ void OpenNebulaTemplate::set_conf_default() attribute = new SingleAttribute("DEFAULT_DEVICE_PREFIX",value); conf_default.insert(make_pair(attribute->name(),attribute)); + + //DEFAULT_CDROM_DEVICE_PREFIX + attribute = new SingleAttribute("DEFAULT_CDROM_DEVICE_PREFIX",value); + conf_default.insert(make_pair(attribute->name(),attribute)); /* #******************************************************************************* # Auth Manager Configuration From a52a66445e035368079cf759f7be30047f310608 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Thu, 20 Feb 2014 12:57:48 +0100 Subject: [PATCH 40/80] Fix onehost sync percentage bar bug --- src/cli/one_helper/onehost_helper.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cli/one_helper/onehost_helper.rb b/src/cli/one_helper/onehost_helper.rb index d80a3b3c4b..50f149dee5 100644 --- a/src/cli/one_helper/onehost_helper.rb +++ b/src/cli/one_helper/onehost_helper.rb @@ -307,7 +307,7 @@ class OneHostHelper < OpenNebulaHelper::OneHelper str="#{bar} #{info} " name=host[0..(79-str.length)] str=str+name - str=str+" "*(79-str.length) + str=str+" "*(80-str.length) print "#{str}\r" STDOUT.flush From 12a9b8db46b50f76c2be37dbcf07f520c9deb487 Mon Sep 17 00:00:00 2001 From: "Ruben S. Montero" Date: Thu, 20 Feb 2014 15:48:13 +0100 Subject: [PATCH 41/80] feature #2731: Datastores with ID < 100 can now be removed or added to a cluster --- src/cluster/Cluster.cc | 13 +------------ src/datastore/DatastorePool.cc | 8 -------- 2 files changed, 1 insertion(+), 20 deletions(-) diff --git a/src/cluster/Cluster.cc b/src/cluster/Cluster.cc index fbdeee4311..451204766e 100644 --- a/src/cluster/Cluster.cc +++ b/src/cluster/Cluster.cc @@ -118,18 +118,7 @@ string& Cluster::get_ds_location(string &ds_location) int Cluster::add_datastore(int id, Datastore::DatastoreType ds_type, string& error_msg) { - if ( id == DatastorePool::SYSTEM_DS_ID ) - { - ostringstream oss; - oss << "Datastore "<< DatastorePool::SYSTEM_DS_ID - << " cannot be added to any cluster."; - - error_msg = oss.str(); - - return -1; - } - - int rc = datastores.add_collection_id(id); + int rc = datastores.add_collection_id(id); if ( rc < 0 ) { diff --git a/src/datastore/DatastorePool.cc b/src/datastore/DatastorePool.cc index da7de7c8c7..27d81c2485 100644 --- a/src/datastore/DatastorePool.cc +++ b/src/datastore/DatastorePool.cc @@ -241,14 +241,6 @@ int DatastorePool::drop(PoolObjectSQL * objsql, string& error_msg) int rc; - // Return error if the datastore is a default one. - if( datastore->get_oid() < 100 ) - { - error_msg = "System Datastores (ID < 100) cannot be deleted."; - NebulaLog::log("DATASTORE", Log::ERROR, error_msg); - return -2; - } - if( datastore->get_collection_size() > 0 ) { ostringstream oss; From 96c823c3af8d67ab82f12f5321f5e8e37338bf72 Mon Sep 17 00:00:00 2001 From: "Ruben S. Montero" Date: Thu, 20 Feb 2014 15:54:43 +0100 Subject: [PATCH 42/80] feature #2731: System DS can be added to a cluster through Sunstone --- src/sunstone/public/js/plugins/clusters-tab.js | 1 - 1 file changed, 1 deletion(-) diff --git a/src/sunstone/public/js/plugins/clusters-tab.js b/src/sunstone/public/js/plugins/clusters-tab.js index e06d3ea9a5..ea52229c53 100644 --- a/src/sunstone/public/js/plugins/clusters-tab.js +++ b/src/sunstone/public/js/plugins/clusters-tab.js @@ -848,7 +848,6 @@ function updateClusterDatastoresView(request, list){ var list_array = []; $.each(list,function(){ - if(this.DATASTORE.ID!=0) list_array.push( datastoreElementArray(this)); }); From 5ad7d82109d3e96db43a98317a8883f3d01c9623 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Thu, 20 Feb 2014 16:24:08 +0100 Subject: [PATCH 43/80] bug #2723: fix use of --user with --cluster in the cli --cluster param needs to create a client to search for the cluster. This happens while the parameters are being parsed so it fails if there is no ONE_AUTH. To make it work the parameters user, password and endpoint are saved as OneHelper class variables as soon as they are parsed. This way they can be used to create the XMLRPC client before all parameters are parsed. --- src/cli/one_helper.rb | 61 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 50 insertions(+), 11 deletions(-) diff --git a/src/cli/one_helper.rb b/src/cli/one_helper.rb index 43fbc34a9a..34440291f7 100644 --- a/src/cli/one_helper.rb +++ b/src/cli/one_helper.rb @@ -100,19 +100,31 @@ EOT :name => 'user', :large => '--user name', :description => 'User name used to connect to OpenNebula', - :format => String + :format => String, + :proc => lambda do |o, options| + OneHelper.set_user(o) + [0, o] + end }, { :name => 'password', :large => '--password password', :description => 'Password to authenticate with OpenNebula', - :format => String + :format => String, + :proc => lambda do |o, options| + OneHelper.set_password(o) + [0, o] + end }, { :name => 'endpoint', :large => '--endpoint endpoint', :description => 'URL of OpenNebula xmlrpc frontend', - :format => String + :format => String, + :proc => lambda do |o, options| + OneHelper.set_endpoint(o) + [0, o] + end } ] @@ -340,18 +352,31 @@ EOT class OneHelper attr_accessor :client - def self.get_client(options) - if defined?(@@client) + def self.get_client(options={}, force=false) + if !force && defined?(@@client) @@client else + secret=nil - user=options[:user] + password=nil + + if defined?(@@user) + user=@@user + password=@@password if defined?(@@password) + else + user=options[:user] + end + if user - password=options[:password]||self.get_password + password=password||options[:password]||self.get_password secret="#{user}:#{password}" end - endpoint=options[:endpoint] + if defined?(@@endpoint) + endpoint=@@endpoint + else + endpoint=options[:endpoint] + end @@client=OpenNebula::Client.new(secret, endpoint) end @@ -361,10 +386,22 @@ EOT if defined?(@@client) @@client else - self.get_client({}) + self.get_client end end + def self.set_user(user) + @@user=user + end + + def self.set_password(password) + @@password=password + end + + def self.set_endpoint(endpoint) + @@endpoint=endpoint + end + if RUBY_VERSION>="1.9.3" require 'io/console' def self.get_password @@ -374,6 +411,7 @@ EOT puts pass.chop! if pass + @@password=pass pass end else @@ -381,8 +419,9 @@ EOT def self.get_password print "Password: " system("stty", "-echo") + @@password=gets.chop begin - return gets.chop + return @@password ensure system("stty", "echo") print "\n" @@ -397,7 +436,7 @@ EOT end def set_client(options) - @client=OpenNebulaHelper::OneHelper.get_client(options) + @client=OpenNebulaHelper::OneHelper.get_client(options, true) end def create_resource(options, &block) From 4ac9c8126e45e4e92b2492463cd8487aa4b9e615 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Thu, 20 Feb 2014 15:25:24 +0100 Subject: [PATCH 44/80] Fix bug in 4.5.80 migrator --- src/onedb/4.4.1_to_4.5.80.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/onedb/4.4.1_to_4.5.80.rb b/src/onedb/4.4.1_to_4.5.80.rb index 9b27ef70ae..6baa114b75 100644 --- a/src/onedb/4.4.1_to_4.5.80.rb +++ b/src/onedb/4.4.1_to_4.5.80.rb @@ -117,9 +117,9 @@ module Migrator # Default ZONE @db.run "CREATE TABLE zone_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" - @db.run "INSERT INTO \"zone_pool\" VALUES(0,'OpenNebula','0OpenNebula',0,0,1,0,0);" + @db.run "INSERT INTO zone_pool VALUES(0,'OpenNebula','0OpenNebula',0,0,1,0,0);" - @db.run "INSERT INTO \"pool_control\" VALUES('zone_pool',99);" + @db.run "INSERT INTO pool_control VALUES('zone_pool',99);" return true end From 1e096aa69cbb56b29708e32988f63dff07a06213 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Thu, 20 Feb 2014 16:35:02 +0100 Subject: [PATCH 45/80] Update onedb fsck --- src/onedb/fsck.rb | 102 +++++++++++++++++++++++++++------------------- 1 file changed, 61 insertions(+), 41 deletions(-) diff --git a/src/onedb/fsck.rb b/src/onedb/fsck.rb index 5b9e0a221d..ae6f6a9f0f 100644 --- a/src/onedb/fsck.rb +++ b/src/onedb/fsck.rb @@ -116,7 +116,7 @@ module OneDBFsck tables = ["group_pool", "user_pool", "acl", "image_pool", "host_pool", "network_pool", "template_pool", "vm_pool", "cluster_pool", - "datastore_pool", "document_pool"] + "datastore_pool", "document_pool", "zone_pool"] tables.each do |table| max_oid = -1 @@ -1260,33 +1260,43 @@ module OneDBFsck # USER QUOTAS ######################################################################## - @db.run "ALTER TABLE user_pool RENAME TO old_user_pool;" - @db.run "CREATE TABLE user_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" - @db.transaction do - # oneadmin does not have quotas - @db.fetch("SELECT * FROM old_user_pool WHERE oid=0") do |row| - @db[:user_pool].insert(row) - end + @db.fetch("SELECT oid FROM user_pool") do |row| + found = false - @db.fetch("SELECT * FROM old_user_pool WHERE oid>0") do |row| - doc = Nokogiri::XML(row[:body]) + @db.fetch("SELECT user_oid FROM user_quotas WHERE user_oid=#{row[:oid]}") do |q_row| + found = true + end - calculate_quotas(doc, "uid=#{row[:oid]}", "User") + if !found + log_error("User #{row[:oid]} does not have a quotas entry") - @db[:user_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:oid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) + @db.run "INSERT INTO user_quotas VALUES(#{row[:oid]},'#{row[:oid]}');" + end end end - @db.run "DROP TABLE old_user_pool;" + @db.run "ALTER TABLE user_quotas RENAME TO old_user_quotas;" + @db.run "CREATE TABLE user_quotas (user_oid INTEGER PRIMARY KEY, body MEDIUMTEXT);" + + @db.transaction do + # oneadmin does not have quotas + @db.fetch("SELECT * FROM old_user_quotas WHERE user_oid=0") do |row| + @db[:user_quotas].insert(row) + end + + @db.fetch("SELECT * FROM old_user_quotas WHERE user_oid>0") do |row| + doc = Nokogiri::XML(row[:body]) + + calculate_quotas(doc, "uid=#{row[:user_oid]}", "User") + + @db[:user_quotas].insert( + :user_oid => row[:user_oid], + :body => doc.root.to_s) + end + end + + @db.run "DROP TABLE old_user_quotas;" log_time() @@ -1296,33 +1306,43 @@ module OneDBFsck # GROUP QUOTAS ######################################################################## - @db.run "ALTER TABLE group_pool RENAME TO old_group_pool;" - @db.run "CREATE TABLE group_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" - @db.transaction do - # oneadmin group does not have quotas - @db.fetch("SELECT * FROM old_group_pool WHERE oid=0") do |row| - @db[:group_pool].insert(row) - end + @db.fetch("SELECT oid FROM group_pool") do |row| + found = false - @db.fetch("SELECT * FROM old_group_pool WHERE oid>0") do |row| - doc = Nokogiri::XML(row[:body]) + @db.fetch("SELECT group_oid FROM group_quotas WHERE group_oid=#{row[:oid]}") do |q_row| + found = true + end - calculate_quotas(doc, "gid=#{row[:oid]}", "Group") + if !found + log_error("Group #{row[:oid]} does not have a quotas entry") - @db[:group_pool].insert( - :oid => row[:oid], - :name => row[:name], - :body => doc.root.to_s, - :uid => row[:oid], - :gid => row[:gid], - :owner_u => row[:owner_u], - :group_u => row[:group_u], - :other_u => row[:other_u]) + @db.run "INSERT INTO group_quotas VALUES(#{row[:oid]},'#{row[:oid]}');" + end end end - @db.run "DROP TABLE old_group_pool;" + @db.run "ALTER TABLE group_quotas RENAME TO old_group_quotas;" + @db.run "CREATE TABLE group_quotas (group_oid INTEGER PRIMARY KEY, body MEDIUMTEXT);" + + @db.transaction do + # oneadmin does not have quotas + @db.fetch("SELECT * FROM old_group_quotas WHERE group_oid=0") do |row| + @db[:group_quotas].insert(row) + end + + @db.fetch("SELECT * FROM old_group_quotas WHERE group_oid>0") do |row| + doc = Nokogiri::XML(row[:body]) + + calculate_quotas(doc, "gid=#{row[:group_oid]}", "Group") + + @db[:group_quotas].insert( + :group_oid => row[:group_oid], + :body => doc.root.to_s) + end + end + + @db.run "DROP TABLE old_group_quotas;" log_time() From 3a0fefc1c1f9d8e5bd42d26fb6d95edd4ce62453 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Thu, 20 Feb 2014 16:36:06 +0100 Subject: [PATCH 46/80] Bug #2741: Do not use floats to calculate cpu quotas in fsck --- src/onedb/fsck.rb | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/onedb/fsck.rb b/src/onedb/fsck.rb index ae6f6a9f0f..00a8ee8ede 100644 --- a/src/onedb/fsck.rb +++ b/src/onedb/fsck.rb @@ -22,7 +22,7 @@ require 'set' require 'nokogiri' module OneDBFsck - VERSION = "4.5.0" + VERSION = "4.5.80" def db_version VERSION @@ -1368,7 +1368,7 @@ module OneDBFsck oid = doc.root.at_xpath("ID").text.to_i # VM quotas - cpu_used = 0.0 + cpu_used = 0 mem_used = 0 vms_used = 0 vol_used = 0 @@ -1385,9 +1385,8 @@ module OneDBFsck # VM quotas vmdoc.root.xpath("TEMPLATE/CPU").each { |e| # truncate to 2 decimals - cpu = (e.text.to_f * 100).to_i / 100.0 + cpu = (e.text.to_f * 100).to_i cpu_used += cpu - cpu_used = (cpu_used * 100).to_i / 100.0 } vmdoc.root.xpath("TEMPLATE/MEMORY").each { |e| @@ -1457,6 +1456,8 @@ module OneDBFsck # Check if the float value or the string representation mismatch, # but ignoring the precision + cpu_used = (cpu_used / 100.0) + different = ( e.text.to_f != cpu_used || ![sprintf('%.2f', cpu_used), sprintf('%.1f', cpu_used), sprintf('%.0f', cpu_used)].include?(e.text) ) From 7744150944223685b4ef7a7dc4750a31e98772fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Fri, 21 Feb 2014 15:23:24 +0100 Subject: [PATCH 47/80] Feature #2727: Delete acl rules that apply to a zone when that zone is deleted --- include/AclManager.h | 14 ++++++++++++ include/RequestManagerDelete.h | 2 ++ src/acl/AclManager.cc | 41 ++++++++++++++++++++++++++++++++++ src/rm/RequestManagerDelete.cc | 15 +++++++++++++ 4 files changed, 72 insertions(+) diff --git a/include/AclManager.h b/include/AclManager.h index 32eb30e5d1..c870e6a7fb 100644 --- a/include/AclManager.h +++ b/include/AclManager.h @@ -142,6 +142,13 @@ public: */ void del_cid_rules(int cid); + /** + * Deletes rules that apply to this cluster id + * + * @param zid The zone id + */ + void del_zid_rules(int zid); + /** * Deletes all rules that apply to this resource * @@ -308,6 +315,13 @@ private: long long resource_req, long long resource_mask); + /** + * Deletes all rules that match the zone mask + * + * @param zone_req Mask to match + */ + void del_zone_matching_rules(long long zone_req); + // ---------------------------------------- // Local zone // ---------------------------------------- diff --git a/include/RequestManagerDelete.h b/include/RequestManagerDelete.h index 2a387708a4..f5db3ee36c 100644 --- a/include/RequestManagerDelete.h +++ b/include/RequestManagerDelete.h @@ -310,6 +310,8 @@ public: }; ~ZoneDelete(){}; + + int drop(int oid, PoolObjectSQL * object, string& error_msg); }; /* -------------------------------------------------------------------------- */ diff --git a/src/acl/AclManager.cc b/src/acl/AclManager.cc index 96f8ede4d7..6d0c07659d 100644 --- a/src/acl/AclManager.cc +++ b/src/acl/AclManager.cc @@ -785,6 +785,18 @@ void AclManager::del_cid_rules(int cid) /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ +void AclManager::del_zid_rules(int zid) +{ + long long request = AclRule::INDIVIDUAL_ID | zid; + + // Delete rules that match + // __ __/__ __ #zid + del_zone_matching_rules(request); +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + void AclManager::del_resource_rules(int oid, PoolObjectSQL::ObjectType obj_type) { long long request = obj_type | @@ -862,6 +874,35 @@ void AclManager::del_resource_matching_rules(long long resource_req, /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ +void AclManager::del_zone_matching_rules(long long zone_req) +{ + multimap::iterator it; + + vector oids; + vector::iterator oid_it; + string error_str; + + lock(); + + for ( it = acl_rules.begin(); it != acl_rules.end(); it++ ) + { + if ( it->second->zone == zone_req ) + { + oids.push_back(it->second->oid); + } + } + + unlock(); + + for ( oid_it = oids.begin() ; oid_it < oids.end(); oid_it++ ) + { + del_rule(*oid_it, error_str); + } +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + void AclManager::reverse_search(int uid, const set& user_groups, PoolObjectSQL::ObjectType obj_type, diff --git a/src/rm/RequestManagerDelete.cc b/src/rm/RequestManagerDelete.cc index 57ec0211a9..fd1afa404d 100644 --- a/src/rm/RequestManagerDelete.cc +++ b/src/rm/RequestManagerDelete.cc @@ -302,3 +302,18 @@ int UserDelete::drop(int oid, PoolObjectSQL * object, string& error_msg) return rc; } + +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +int ZoneDelete::drop(int oid, PoolObjectSQL * object, string& error_msg) +{ + int rc = RequestManagerDelete::drop(oid, object, error_msg); + + if ( rc == 0 ) + { + aclm->del_zid_rules(oid); + } + + return rc; +} From 46e668870694a0690052bb749a2704542d913d4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Fri, 21 Feb 2014 18:08:41 +0100 Subject: [PATCH 48/80] Feature #2743: onedb keeps 3 different versions 1 the code version 2 the replicated (master) DB tables version 3 the slave (local) DB tables version --- include/Nebula.h | 22 +++++++ include/SystemDB.h | 12 +++- src/nebula/Nebula.cc | 8 ++- src/nebula/SystemDB.cc | 112 +++++++++++++++------------------ src/rm/RequestManagerSystem.cc | 2 +- 5 files changed, 89 insertions(+), 67 deletions(-) diff --git a/include/Nebula.h b/include/Nebula.h index abe37673a9..4aa40c90cc 100644 --- a/include/Nebula.h +++ b/include/Nebula.h @@ -366,11 +366,33 @@ public: return "OpenNebula 4.5.0"; }; + /** + * Returns the version of oned + * @return + */ + static string code_version() + { + return "4.5.0"; + } + + /** + * Version needed for the DB, replicated tables + * @return + */ static string db_version() { return "4.5.0"; } + /** + * Version needed for the DB, local tables + * @return + */ + static string slave_db_version() + { + return "4.5.0"; + } + /** * Starts all the modules and services for OpenNebula */ diff --git a/include/SystemDB.h b/include/SystemDB.h index 979bb19d16..3524a462f4 100644 --- a/include/SystemDB.h +++ b/include/SystemDB.h @@ -119,14 +119,22 @@ private: * * @return 0 on success */ - int bootstrap(); + int bootstrap() + { + return bootstrap(true); + } /** * Bootstraps the database control tables for a slave DB * * @return 0 on success */ - int slave_bootstrap(); + int slave_bootstrap() + { + return bootstrap(false); + } + + int bootstrap(bool do_master); /** * Callback function for the check_db_version method. Stores the read diff --git a/src/nebula/Nebula.cc b/src/nebula/Nebula.cc index 111e146c4d..bbf93f9905 100644 --- a/src/nebula/Nebula.cc +++ b/src/nebula/Nebula.cc @@ -313,10 +313,14 @@ void Nebula::start(bool bootstrap_only) if( is_federation_slave() && rc == -2 ) { - throw runtime_error( + string error_str = "Either the database was not bootstrapped by the " "federation master, or the replication was " - "not configured."); + "not configured."; + + NebulaLog::log("ONE",Log::ERROR,error_str); + + throw runtime_error(error_str); } if( rc == -2 || rc == -3 ) diff --git a/src/nebula/SystemDB.cc b/src/nebula/SystemDB.cc index 53902fb48e..545ac6558a 100644 --- a/src/nebula/SystemDB.cc +++ b/src/nebula/SystemDB.cc @@ -40,11 +40,11 @@ const char * SystemDB::ver_bootstrap = "CREATE TABLE db_versioning " // DB slave versioning table const char * SystemDB::slave_ver_table = "slave_db_versioning"; -const char * SystemDB::slave_ver_names = "oid, version, timestamp, comment"; +const char * SystemDB::slave_ver_names = "oid, version, timestamp, comment, is_slave"; const char * SystemDB::slave_ver_bootstrap = "CREATE TABLE slave_db_versioning " "(oid INTEGER PRIMARY KEY, version VARCHAR(256), timestamp INTEGER, " - "comment VARCHAR(256))"; + "comment VARCHAR(256), is_slave BOOLEAN)"; // System attributes table const char * SystemDB::sys_table = "system_attributes"; @@ -56,7 +56,7 @@ const char * SystemDB::sys_bootstrap = "CREATE TABLE IF NOT EXISTS" /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -int SystemDB::bootstrap() +int SystemDB::bootstrap(bool do_master) { int rc; ostringstream oss; @@ -67,52 +67,34 @@ int SystemDB::bootstrap() oss.str(pc_bootstrap); rc = db->exec(oss); - // ------------------------------------------------------------------------ - // db versioning, version of OpenNebula. - // ------------------------------------------------------------------------ - oss.str(ver_bootstrap); - rc += db->exec(oss); + if (do_master) + { + // --------------------------------------------------------------------- + // db versioning, version of OpenNebula. + // --------------------------------------------------------------------- + oss.str(ver_bootstrap); + rc += db->exec(oss); - oss.str(""); - oss << "INSERT INTO " << ver_table << " (" << ver_names << ") " - << "VALUES (0, '" << Nebula::db_version() << "', " << time(0) - << ", '" << Nebula::version() << " daemon bootstrap')"; + oss.str(""); + oss << "INSERT INTO " << ver_table << " (" << ver_names << ") " + << "VALUES (0, '" << Nebula::db_version() << "', " << time(0) + << ", '" << Nebula::version() << " daemon bootstrap')"; - rc += db->exec(oss); + rc += db->exec(oss); + } // ------------------------------------------------------------------------ - // system - // ------------------------------------------------------------------------ - oss.str(sys_bootstrap); - rc += db->exec(oss); - - return rc; -} - -/* -------------------------------------------------------------------------- */ -/* -------------------------------------------------------------------------- */ - -int SystemDB::slave_bootstrap() -{ - int rc; - ostringstream oss; - - // ------------------------------------------------------------------------ - // pool control, tracks the last ID's assigned to objects - // ------------------------------------------------------------------------ - oss.str(pc_bootstrap); - rc = db->exec(oss); - - // ------------------------------------------------------------------------ - // db versioning, version of OpenNebula. + // slave db versioning, version of tables that are not replicated in a + // slave OpenNebula. // ------------------------------------------------------------------------ oss.str(slave_ver_bootstrap); rc += db->exec(oss); oss.str(""); oss << "INSERT INTO " << slave_ver_table << " (" << slave_ver_names << ") " - << "VALUES (0, '" << Nebula::db_version() << "', " << time(0) - << ", '" << Nebula::version() << " daemon bootstrap')"; + << "VALUES (0, '" << Nebula::slave_db_version() << "', " << time(0) + << ", '" << Nebula::version() << " daemon bootstrap', " + << Nebula::instance().is_federation_slave() << ")"; rc += db->exec(oss); @@ -201,39 +183,45 @@ int SystemDB::check_db_version(bool is_federation_slave) return -1; } - if (is_federation_slave) + loaded_db_version = ""; + + // Try to read latest version from the slave db version table + set_callback( static_cast(&SystemDB::select_cb), + static_cast(&loaded_db_version) ); + + oss << "SELECT version FROM " << slave_ver_table + << " WHERE oid=(SELECT MAX(oid) FROM " << slave_ver_table << ")"; + + db->exec(oss, this, true); + + oss.str(""); + unset_callback(); + + if( loaded_db_version == "" ) { - string loaded_db_version = ""; + // Database needs bootstrap only for the slave tables + return -3; + } - // Try to read latest version from the slave db version table - set_callback( static_cast(&SystemDB::select_cb), - static_cast(&loaded_db_version) ); - - oss << "SELECT version FROM " << slave_ver_table - << " WHERE oid=(SELECT MAX(oid) FROM " << slave_ver_table << ")"; - - db->exec(oss, this, true); - - oss.str(""); - unset_callback(); - - if( loaded_db_version == "" ) + if( Nebula::slave_db_version() != loaded_db_version ) + { + if (!is_federation_slave) { - return -3; + oss << "Database version mismatch. " + << "Installed " << Nebula::version() << " uses DB version '" + << Nebula::slave_db_version() << "', and existing DB version is '" + << loaded_db_version << "'."; } - - if( Nebula::db_version() != loaded_db_version ) + else { oss << "Database version mismatch. " << "Installed slave " << Nebula::version() << " uses DB version '" - << Nebula::db_version() << "', and existing slave DB version is '" + << Nebula::slave_db_version() << "', and existing slave DB version is '" << loaded_db_version << "'."; - - NebulaLog::log("ONE",Log::ERROR,oss); - return -1; } - return 0; + NebulaLog::log("ONE",Log::ERROR,oss); + return -1; } return 0; diff --git a/src/rm/RequestManagerSystem.cc b/src/rm/RequestManagerSystem.cc index a324569c58..a9bc1582fd 100644 --- a/src/rm/RequestManagerSystem.cc +++ b/src/rm/RequestManagerSystem.cc @@ -30,7 +30,7 @@ void SystemVersion::request_execute(xmlrpc_c::paramList const& paramList, // Should we make the version call accessible even // if no user is provided? - success_response(Nebula::instance().db_version(), att); + success_response(Nebula::instance().code_version(), att); return; } From 414fdf8fb6b2813a49213616d795147c9b56c520 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Fri, 21 Feb 2014 19:03:21 +0100 Subject: [PATCH 49/80] bug #2560: add remote to kill wild collectd processes --- install.sh | 9 ++++-- .../common.d/collectd-client-shepherd.sh | 29 +++++++++++++++++++ 2 files changed, 35 insertions(+), 3 deletions(-) create mode 100755 src/im_mad/remotes/common.d/collectd-client-shepherd.sh diff --git a/install.sh b/install.sh index 3f62f396a1..257e2e12d9 100755 --- a/install.sh +++ b/install.sh @@ -795,7 +795,8 @@ IM_PROBES_KVM_PROBES_FILES="src/im_mad/remotes/kvm-probes.d/kvm.rb \ src/im_mad/remotes/kvm-probes.d/poll.sh \ src/im_mad/remotes/kvm-probes.d/name.sh \ src/im_mad/remotes/common.d/monitor_ds.sh \ - src/im_mad/remotes/common.d/version.sh" + src/im_mad/remotes/common.d/version.sh \ + src/im_mad/remotes/common.d/collectd-client-shepherd.sh" IM_PROBES_XEN3_FILES="src/im_mad/remotes/xen.d/collectd-client_control.sh \ src/im_mad/remotes/xen.d/collectd-client.rb" @@ -806,7 +807,8 @@ IM_PROBES_XEN3_PROBES_FILES="src/im_mad/remotes/xen-probes.d/xen.rb \ src/im_mad/remotes/xen-probes.d/poll3.sh \ src/im_mad/remotes/xen-probes.d/name.sh src/im_mad/remotes/common.d/monitor_ds.sh \ - src/im_mad/remotes/common.d/version.sh" + src/im_mad/remotes/common.d/version.sh \ + src/im_mad/remotes/common.d/collectd-client-shepherd.sh" IM_PROBES_XEN4_FILES="src/im_mad/remotes/xen.d/collectd-client_control.sh \ src/im_mad/remotes/xen.d/collectd-client.rb" @@ -817,7 +819,8 @@ IM_PROBES_XEN4_PROBES_FILES="src/im_mad/remotes/xen-probes.d/xen.rb \ src/im_mad/remotes/xen-probes.d/poll4.sh \ src/im_mad/remotes/xen-probes.d/name.sh \ src/im_mad/remotes/common.d/monitor_ds.sh \ - src/im_mad/remotes/common.d/version.sh" + src/im_mad/remotes/common.d/version.sh \ + src/im_mad/remotes/common.d/collectd-client-shepherd.sh" IM_PROBES_VMWARE_FILES="src/im_mad/remotes/vmware.d/vmware.rb" diff --git a/src/im_mad/remotes/common.d/collectd-client-shepherd.sh b/src/im_mad/remotes/common.d/collectd-client-shepherd.sh new file mode 100755 index 0000000000..04058ca178 --- /dev/null +++ b/src/im_mad/remotes/common.d/collectd-client-shepherd.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +# -------------------------------------------------------------------------- # +# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +( + +running_pid=$(cat /tmp/one-collectd-client.pid) +pids=$(ps axuwww | grep /collectd-client.rb | grep -v grep | awk '{ print $2 }' | grep -v "^${running_pid}$") + +if [ -n "$pids" ]; then + kill -6 $pids +fi + +) > /dev/null + From 3ae612a3c6889796c131b321d5059c90d9fcb48c Mon Sep 17 00:00:00 2001 From: "Ruben S. Montero" Date: Sun, 23 Feb 2014 19:31:08 +0100 Subject: [PATCH 50/80] feature #2743: Split logic to deal with local and shared table DB versions. --- include/Nebula.h | 6 +- include/SystemDB.h | 46 ++++---- src/nebula/Nebula.cc | 66 ++++++------ src/nebula/SystemDB.cc | 238 +++++++++++++++++++++++------------------ 4 files changed, 194 insertions(+), 162 deletions(-) diff --git a/include/Nebula.h b/include/Nebula.h index 4aa40c90cc..eb34014bd5 100644 --- a/include/Nebula.h +++ b/include/Nebula.h @@ -376,10 +376,10 @@ public: } /** - * Version needed for the DB, replicated tables + * Version needed for the DB, shared tables * @return */ - static string db_version() + static string shared_db_version() { return "4.5.0"; } @@ -388,7 +388,7 @@ public: * Version needed for the DB, local tables * @return */ - static string slave_db_version() + static string local_db_version() { return "4.5.0"; } diff --git a/include/SystemDB.h b/include/SystemDB.h index 3524a462f4..06002cc71d 100644 --- a/include/SystemDB.h +++ b/include/SystemDB.h @@ -80,18 +80,18 @@ private: static const char * pc_table; // DB versioning table - static const char * ver_names; + static const char * shared_ver_names; - static const char * ver_bootstrap; + static const char * shared_ver_bootstrap; - static const char * ver_table; + static const char * shared_ver_table; // DB slave versioning table - static const char * slave_ver_names; + static const char * local_ver_names; - static const char * slave_ver_bootstrap; + static const char * local_ver_bootstrap; - static const char * slave_ver_table; + static const char * local_ver_table; // System attributes table static const char * sys_names; @@ -115,26 +115,18 @@ private: bool replace, string& error_str); /** - * Bootstraps the database control tables + * Bootstraps the database control tables for shared tables * * @return 0 on success */ - int bootstrap() - { - return bootstrap(true); - } + int shared_bootstrap(); /** - * Bootstraps the database control tables for a slave DB + * Bootstraps the database control tables for a local DB tables * * @return 0 on success */ - int slave_bootstrap() - { - return bootstrap(false); - } - - int bootstrap(bool do_master); + int local_bootstrap(); /** * Callback function for the check_db_version method. Stores the read @@ -159,13 +151,23 @@ private: /** * Reads the current DB version. * @param is_federation_slave + * @param local_bs boostrap local DB tables + * @param shared_bs boostrap shared DB tables * * @return 0 on success, - * -1 if there is a version mismatch, - * -2 if the DB needs a bootstrap from the master - * -3 if the DB needs a bootstrap from the slave + * -1 if there is a version mismatch, or replica config error. */ - int check_db_version(bool is_federation_slave); + int check_db_version(bool is_slave, bool& local_bs, bool& shared_bs); + + /** + * check_db_version to check versioning + * @param table name of the DB table + * @param version target DB version + * @return 0 success, -1 upgrade needed, -2 bootstrap needed + */ + int check_db_version(const string& table, + const string& version, + string& error); }; #endif //SYSTEM_DB_H diff --git a/src/nebula/Nebula.cc b/src/nebula/Nebula.cc index bbf93f9905..ae7095fcb3 100644 --- a/src/nebula/Nebula.cc +++ b/src/nebula/Nebula.cc @@ -300,70 +300,68 @@ void Nebula::start(bool bootstrap_only) // Prepare the SystemDB and check versions // --------------------------------------------------------------------- + bool local_bootstrap; + bool shared_bootstrap; + NebulaLog::log("ONE",Log::INFO,"Checking database version."); system_db = new SystemDB(db); - rc = system_db->check_db_version(is_federation_slave()); - + rc = system_db->check_db_version(is_federation_slave(), + local_bootstrap, + shared_bootstrap); if( rc == -1 ) { - throw runtime_error("Database version mismatch."); + throw runtime_error("Database version mismatch. Check oned.log."); } - if( is_federation_slave() && rc == -2 ) + rc = 0; + + if (local_bootstrap) { - string error_str = - "Either the database was not bootstrapped by the " - "federation master, or the replication was " - "not configured."; - - NebulaLog::log("ONE",Log::ERROR,error_str); - - throw runtime_error(error_str); - } - - if( rc == -2 || rc == -3 ) - { - rc = 0; - NebulaLog::log("ONE",Log::INFO, - "Bootstrapping OpenNebula database."); + "Bootstrapping OpenNebula database, stage 1."); rc += VirtualMachinePool::bootstrap(db); rc += HostPool::bootstrap(db); rc += VirtualNetworkPool::bootstrap(db); - rc += GroupPool::bootstrap(db); - rc += UserPool::bootstrap(db); rc += ImagePool::bootstrap(db); rc += VMTemplatePool::bootstrap(db); - rc += AclManager::bootstrap(db); rc += DatastorePool::bootstrap(db); rc += ClusterPool::bootstrap(db); rc += DocumentPool::bootstrap(db); + + // Create the system tables only if bootstrap went well + if (rc == 0) + { + rc += system_db->local_bootstrap(); + } + } + + if (shared_bootstrap) + { + NebulaLog::log("ONE",Log::INFO, + "Bootstrapping OpenNebula database, stage 2."); + + rc += GroupPool::bootstrap(db); + rc += UserPool::bootstrap(db); + rc += AclManager::bootstrap(db); rc += ZonePool::bootstrap(db); // Create the system tables only if bootstrap went well if ( rc == 0 ) { - if (is_federation_slave()) - { - rc += system_db->slave_bootstrap(); - } - else - { - rc += system_db->bootstrap(); - } + rc += system_db->shared_bootstrap(); } // Insert default system attributes rc += default_user_quota.insert(); rc += default_group_quota.insert(); + } - if ( rc != 0 ) - { - throw runtime_error("Error bootstrapping database."); - } + if ( rc != 0 ) + { + throw runtime_error("Error bootstrapping database."); } } catch (exception&) diff --git a/src/nebula/SystemDB.cc b/src/nebula/SystemDB.cc index 545ac6558a..7004f0e89a 100644 --- a/src/nebula/SystemDB.cc +++ b/src/nebula/SystemDB.cc @@ -30,19 +30,19 @@ const char * SystemDB::pc_bootstrap = "CREATE TABLE pool_control " "(tablename VARCHAR(32) PRIMARY KEY, last_oid BIGINT UNSIGNED)"; -// DB versioning table -const char * SystemDB::ver_table = "db_versioning"; -const char * SystemDB::ver_names = "oid, version, timestamp, comment"; +// DB versioning table, shared (federation) tables +const char * SystemDB::shared_ver_table = "db_versioning"; +const char * SystemDB::shared_ver_names = "oid, version, timestamp, comment"; -const char * SystemDB::ver_bootstrap = "CREATE TABLE db_versioning " +const char * SystemDB::shared_ver_bootstrap = "CREATE TABLE db_versioning " "(oid INTEGER PRIMARY KEY, version VARCHAR(256), timestamp INTEGER, " "comment VARCHAR(256))"; -// DB slave versioning table -const char * SystemDB::slave_ver_table = "slave_db_versioning"; -const char * SystemDB::slave_ver_names = "oid, version, timestamp, comment, is_slave"; +// DB versioning table, local tables +const char * SystemDB::local_ver_table = "local_db_versioning"; +const char * SystemDB::local_ver_names = "oid, version, timestamp, comment, is_slave"; -const char * SystemDB::slave_ver_bootstrap = "CREATE TABLE slave_db_versioning " +const char * SystemDB::local_ver_bootstrap = "CREATE TABLE local_db_versioning " "(oid INTEGER PRIMARY KEY, version VARCHAR(256), timestamp INTEGER, " "comment VARCHAR(256), is_slave BOOLEAN)"; @@ -56,10 +56,34 @@ const char * SystemDB::sys_bootstrap = "CREATE TABLE IF NOT EXISTS" /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -int SystemDB::bootstrap(bool do_master) +int SystemDB::shared_bootstrap() { - int rc; - ostringstream oss; + int rc; + ostringstream oss; + + // --------------------------------------------------------------------- + // db versioning, version of OpenNebula. + // --------------------------------------------------------------------- + oss.str(shared_ver_bootstrap); + rc = db->exec(oss); + + oss.str(""); + oss << "INSERT INTO " << shared_ver_table << " (" << shared_ver_names << ") " + << "VALUES (0, '" << Nebula::shared_db_version() << "', " << time(0) + << ", '" << Nebula::version() << " daemon bootstrap')"; + + rc += db->exec(oss); + + return rc; +}; + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +int SystemDB::local_bootstrap() +{ + int rc; + ostringstream oss; // ------------------------------------------------------------------------ // pool control, tracks the last ID's assigned to objects @@ -67,32 +91,16 @@ int SystemDB::bootstrap(bool do_master) oss.str(pc_bootstrap); rc = db->exec(oss); - if (do_master) - { - // --------------------------------------------------------------------- - // db versioning, version of OpenNebula. - // --------------------------------------------------------------------- - oss.str(ver_bootstrap); - rc += db->exec(oss); - - oss.str(""); - oss << "INSERT INTO " << ver_table << " (" << ver_names << ") " - << "VALUES (0, '" << Nebula::db_version() << "', " << time(0) - << ", '" << Nebula::version() << " daemon bootstrap')"; - - rc += db->exec(oss); - } - // ------------------------------------------------------------------------ - // slave db versioning, version of tables that are not replicated in a + // local db versioning, version of tables that are not replicated in a // slave OpenNebula. // ------------------------------------------------------------------------ - oss.str(slave_ver_bootstrap); + oss.str(local_ver_bootstrap); rc += db->exec(oss); oss.str(""); - oss << "INSERT INTO " << slave_ver_table << " (" << slave_ver_names << ") " - << "VALUES (0, '" << Nebula::slave_db_version() << "', " << time(0) + oss << "INSERT INTO " << local_ver_table << " (" << local_ver_names << ") " + << "VALUES (0, '" << Nebula::local_db_version() << "', " << time(0) << ", '" << Nebula::version() << " daemon bootstrap', " << Nebula::instance().is_federation_slave() << ")"; @@ -105,7 +113,7 @@ int SystemDB::bootstrap(bool do_master) rc += db->exec(oss); return rc; -} +}; /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ @@ -128,104 +136,128 @@ int SystemDB::select_cb(void *_loaded_db_version, int num, char **values, /* -------------------------------------------------------------------------- */ -int SystemDB::check_db_version(bool is_federation_slave) +int SystemDB::check_db_version(const string& table, + const string& version, + string& error) { - int rc; - ostringstream oss; + ostringstream oss; + string db_version; - string loaded_db_version = ""; + error.clear(); - // Try to read latest version - set_callback( static_cast(&SystemDB::select_cb), - static_cast(&loaded_db_version) ); + set_callback(static_cast(&SystemDB::select_cb), + static_cast(&db_version)); - oss << "SELECT version FROM " << ver_table - << " WHERE oid=(SELECT MAX(oid) FROM " << ver_table << ")"; + oss << "SELECT version FROM " << table <<" WHERE oid=(SELECT MAX(oid) FROM " + << table << ")"; - db->exec(oss, this, true); + int rc = db->exec(oss, this, true); - oss.str(""); unset_callback(); - if( loaded_db_version == "" ) + if( rc != 0 || db_version.empty() )//DB needs bootstrap or replica config. { - // Table user_pool is present for all OpenNebula versions, and it - // always contains at least the oneadmin user. - oss << "SELECT MAX(oid) FROM user_pool"; - rc = db->exec(oss, 0, true); - - oss.str(""); - - if( rc != 0 ) // Database needs bootstrap - { - return -2; - } + return -2; } - if( Nebula::db_version() != loaded_db_version ) - { - if (!is_federation_slave) - { - oss << "Database version mismatch. " - << "Installed " << Nebula::version() << " uses DB version '" - << Nebula::db_version() << "', and existing DB version is '" - << loaded_db_version << "'."; - } - else - { - oss << "Database version mismatch. " - << "Installed slave " << Nebula::version() << " uses DB version '" - << Nebula::db_version() << "', and existing master DB version is '" - << loaded_db_version << "'."; - } + oss.str(""); + + if(version != db_version)//DB needs upgrade + { + oss << "Database version mismatch ( " << table << "). " + << "Installed " << Nebula::version() << " needs DB version '" + << version << "', and existing DB version is '"<< db_version << "'."; + + error = oss.str(); - NebulaLog::log("ONE",Log::ERROR,oss); return -1; } - loaded_db_version = ""; + oss << "oned is using version " << version << " for " << table; - // Try to read latest version from the slave db version table - set_callback( static_cast(&SystemDB::select_cb), - static_cast(&loaded_db_version) ); + NebulaLog::log("ONE", Log::INFO, oss); - oss << "SELECT version FROM " << slave_ver_table - << " WHERE oid=(SELECT MAX(oid) FROM " << slave_ver_table << ")"; + return 0; +}; - db->exec(oss, this, true); +/* -------------------------------------------------------------------------- */ - oss.str(""); - unset_callback(); +int SystemDB::check_db_version(bool is_slave, bool &local_bs, bool &shared_bs) +{ + int rc; + string error; - if( loaded_db_version == "" ) + local_bs = false; + shared_bs = false; + + /* ---------------------------------------------------------------------- */ + /* Check DB version for local tables */ + /* ---------------------------------------------------------------------- */ + + rc = check_db_version(local_ver_table, Nebula::local_db_version(), error); + + switch(rc) { - // Database needs bootstrap only for the slave tables - return -3; + case 0:// All ok continue + break; + + case -1:// Version missmatch (same for master/slave/standalone) + NebulaLog::log("ONE", Log::ERROR, error); + NebulaLog::log("ONE", Log::ERROR, "Use onedb to upgrade DB."); + return -1; + + case -2: //Cannot access DB table or empty, bootstrap + local_bs = true; + break; + + default: + break; } - if( Nebula::slave_db_version() != loaded_db_version ) - { - if (!is_federation_slave) - { - oss << "Database version mismatch. " - << "Installed " << Nebula::version() << " uses DB version '" - << Nebula::slave_db_version() << "', and existing DB version is '" - << loaded_db_version << "'."; - } - else - { - oss << "Database version mismatch. " - << "Installed slave " << Nebula::version() << " uses DB version '" - << Nebula::slave_db_version() << "', and existing slave DB version is '" - << loaded_db_version << "'."; - } + /* ---------------------------------------------------------------------- */ + /* Check DB version for shared (federation) tables */ + /* ---------------------------------------------------------------------- */ - NebulaLog::log("ONE",Log::ERROR,oss); - return -1; + rc = check_db_version(shared_ver_table, Nebula::shared_db_version(), error); + + switch(rc) + { + case 0:// All ok continue + break; + + case -1:// Version missmatch + NebulaLog::log("ONE", Log::ERROR, error); + + if (is_slave) + { + NebulaLog::log("ONE", Log::ERROR, + "Cannot join federation, oned master needs upgrade."); + } + else + { + NebulaLog::log("ONE", Log::ERROR, "Use onedb to upgrade DB."); + } + + return -1; + + case -2: //Cannot access DB table or empty, bootstrap (only master/standalone) + if (is_slave) + { + NebulaLog::log("ONE", Log::ERROR, "Cannot access shared DB" + " tables. Check DB replica configuration."); + + return -1; + } + + shared_bs = true; + break; + + default: + break; } return 0; -} +}; /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ From 7f409c7b7c14e2ded91a6ea040c940861cc3773e Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Mon, 24 Feb 2014 15:28:29 +0100 Subject: [PATCH 51/80] bug #2546: fix bug with remotes rsync copy --- src/cli/onehost | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cli/onehost b/src/cli/onehost index 7813046590..8e4d5de3e8 100755 --- a/src/cli/onehost +++ b/src/cli/onehost @@ -20,7 +20,7 @@ ONE_LOCATION=ENV["ONE_LOCATION"] if !ONE_LOCATION RUBY_LIB_LOCATION="/usr/lib/one/ruby" - REMOTES_LOCATION="/var/lib/one/remotes" + REMOTES_LOCATION="/var/lib/one/remotes/" else RUBY_LIB_LOCATION=ONE_LOCATION+"/lib/ruby" REMOTES_LOCATION=ONE_LOCATION+"/var/remotes/" From 16056fcce16f7ce4dea29d2b43dc0bc3c91a4f6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Mon, 24 Feb 2014 18:06:23 +0100 Subject: [PATCH 52/80] Feature #2743: Make the migrator aware of the new local_db_versioning table --- src/onedb/4.4.1_to_4.5.80.rb | 4 ++ src/onedb/fsck.rb | 21 ++++++- src/onedb/import_slave.rb | 31 ++++++++-- src/onedb/onedb.rb | 108 +++++++++++++++++++++-------------- src/onedb/onedb_backend.rb | 33 ++++++++--- 5 files changed, 138 insertions(+), 59 deletions(-) diff --git a/src/onedb/4.4.1_to_4.5.80.rb b/src/onedb/4.4.1_to_4.5.80.rb index 6baa114b75..4feb260ff2 100644 --- a/src/onedb/4.4.1_to_4.5.80.rb +++ b/src/onedb/4.4.1_to_4.5.80.rb @@ -121,6 +121,10 @@ module Migrator @db.run "INSERT INTO pool_control VALUES('zone_pool',99);" + # New versioning table + @db.run "CREATE TABLE local_db_versioning (oid INTEGER PRIMARY KEY, version VARCHAR(256), timestamp INTEGER, comment VARCHAR(256), is_slave BOOLEAN);" + @db.run "INSERT INTO local_db_versioning VALUES(0,'#{db_version()}',#{Time.now.to_i},'Database migrated from 4.4.1 to 4.5.80 (OpenNebula 4.5.80) by onedb command.',0);" + return true end diff --git a/src/onedb/fsck.rb b/src/onedb/fsck.rb index 00a8ee8ede..07d1b533e3 100644 --- a/src/onedb/fsck.rb +++ b/src/onedb/fsck.rb @@ -22,10 +22,23 @@ require 'set' require 'nokogiri' module OneDBFsck - VERSION = "4.5.80" + VERSION = "4.5.0" + LOCAL_VERSION = "4.5.0" - def db_version - VERSION + def check_db_version() + db_version = read_db_version() + + if ( db_version[:version] != VERSION || + db_version[:local_version] != LOCAL_VERSION ) + + raise <<-EOT +Version mismatch: fsck file is for version +Shared: #{VERSION}, Local: #{LOCAL_VERSION} + +Current database is version +Shared: #{db_version[:version]}, Local: #{db_version[:local_version]} +EOT + end end def one_version @@ -36,6 +49,8 @@ module OneDBFsck def fsck + # TODO: different behaviour for slave/master database + ######################################################################## # Acl ######################################################################## diff --git a/src/onedb/import_slave.rb b/src/onedb/import_slave.rb index a6cbb603b5..5f01d80fea 100644 --- a/src/onedb/import_slave.rb +++ b/src/onedb/import_slave.rb @@ -31,9 +31,32 @@ include OpenNebula module OneDBImportSlave VERSION = "4.5.0" + LOCAL_VERSION = "4.5.0" - def db_version - VERSION + def check_db_version(master_db_version, slave_db_version) + if ( master_db_version[:version] != VERSION || + master_db_version[:local_version] != LOCAL_VERSION ) + + raise <<-EOT +Version mismatch: import slave file is for version +Shared: #{VERSION}, Local: #{LOCAL_VERSION} + +Current master database is version +Shared: #{master_db_version[:version]}, Local: #{master_db_version[:local_version]} +EOT + elsif ( slave_db_version[:version] != VERSION || + slave_db_version[:local_version] != LOCAL_VERSION ) + + raise <<-EOT +Version mismatch: import slave file is for version +Shared: #{VERSION}, Local: #{LOCAL_VERSION} + +Current slave database is version +Shared: #{master_db_version[:version]}, Local: #{master_db_version[:local_version]} +EOT + elsif master_db_version[:is_slave] + raise "Master database is an OpenNebula federation slave" + end end def one_version @@ -538,8 +561,8 @@ EOT # Init slave_db_versioning table ######################################################################## - @slave_db.run "CREATE TABLE slave_db_versioning (oid INTEGER PRIMARY KEY, version VARCHAR(256), timestamp INTEGER, comment VARCHAR(256));" - @slave_db.run "INSERT INTO slave_db_versioning (oid, version, timestamp, comment) VALUES (0, '#{VERSION}', #{Time.now.to_i}, 'onedb import tool');" + @slave_db.run "CREATE TABLE local_db_versioning (oid INTEGER PRIMARY KEY, version VARCHAR(256), timestamp INTEGER, comment VARCHAR(256), is_slave BOOLEAN);" + @slave_db.run "INSERT INTO local_db_versioning VALUES(0,'#{LOCAL_VERSION}',#{Time.now.to_i},'onedb import tool',1);" @slave_db.run "DROP TABLE old_document_pool;" @slave_db.run "DROP TABLE old_image_pool;" diff --git a/src/onedb/onedb.rb b/src/onedb/onedb.rb index fdef7f41cb..b8e686dce5 100644 --- a/src/onedb/onedb.rb +++ b/src/onedb/onedb.rb @@ -94,16 +94,32 @@ class OneDB end def version(ops) - version, timestamp, comment = @backend.read_db_version + ret = @backend.read_db_version if(ops[:verbose]) - puts "Version: #{version}" + puts "Shared tables version: #{ret[:version]}" - time = version == "2.0" ? Time.now : Time.at(timestamp) + time = ret[:version] == "2.0" ? Time.now : Time.at(ret[:timestamp]) puts "Timestamp: #{time.strftime("%m/%d %H:%M:%S")}" - puts "Comment: #{comment}" + puts "Comment: #{ret[:comment]}" + + if ret[:local_version] + puts + puts "Local tables version: #{ret[:local_version]}" + + time = Time.at(ret[:local_timestamp]) + puts "Timestamp: #{time.strftime("%m/%d %H:%M:%S")}" + puts "Comment: #{ret[:local_comment]}" + + if ret[:is_slave] + puts + puts "This database is a federation slave" + end + end + else - puts version + puts "Shared: #{ret[:version]}" + puts "Local: #{ret[:version]}" end return 0 @@ -117,15 +133,17 @@ class OneDB # max_version is ignored for now, as this is the first onedb release. # May be used in next releases def upgrade(max_version, ops) - version, timestamp, comment = @backend.read_db_version + ret = @backend.read_db_version if ops[:verbose] - puts "Version read:" - puts "#{version} : #{comment}" + pretty_print_db_version(ret) + puts "" end - matches = Dir.glob("#{RUBY_LIB_LOCATION}/onedb/#{version}_to_*.rb") + # TODO: different upgrade path for slave/master database tables + + matches = Dir.glob("#{RUBY_LIB_LOCATION}/onedb/#{ret[:version]}_to_*.rb") if ( matches.size > 0 ) # At least one upgrade will be executed, make DB backup @@ -141,7 +159,7 @@ class OneDB while ( matches.size > 0 ) if ( matches.size > 1 ) raise "There are more than one file that match \ - \"#{RUBY_LIB_LOCATION}/onedb/#{version}_to_*.rb\"" + \"#{RUBY_LIB_LOCATION}/onedb/#{ret[:version]}_to_*.rb\"" end file = matches[0] @@ -157,7 +175,7 @@ class OneDB time1 = Time.now if !result - raise "Error while upgrading from #{version} to " << + raise "Error while upgrading from #{ret[:version]} to " << " #{@backend.db_version}" end @@ -170,9 +188,9 @@ class OneDB # Modify db_versioning table if result != nil - @backend.update_db_version(version) + @backend.update_db_version(ret[:version]) else - puts "Database already uses version #{version}" + puts "Database already uses version #{ret[:version]}" end timeb = Time.now @@ -196,11 +214,10 @@ class OneDB end def fsck(ops) - version, timestamp, comment = @backend.read_db_version + ret = @backend.read_db_version if ops[:verbose] - puts "Version read:" - puts "#{version} : #{comment}" + pretty_print_db_version(ret) puts "" end @@ -213,10 +230,7 @@ class OneDB load(file) @backend.extend OneDBFsck - if ( version != @backend.db_version ) - raise "Version mismatch: fsck file is for version "<< - "#{@backend.db_version}, current database version is #{version}" - end + @backend.check_db_version() # FSCK will be executed, make DB backup backup(ops[:backup], ops) @@ -229,7 +243,7 @@ class OneDB result = @backend.fsck if !result - raise "Error running fsck version #{version}" + raise "Error running fsck version #{ret[:version]}" end puts " > Done" if ops[:verbose] @@ -245,7 +259,7 @@ class OneDB rescue Exception => e puts e.message - puts "Error running fsck version #{version}" + puts "Error running fsck version #{ret[:version]}" puts "The database will be restored" ops[:force] = true @@ -277,17 +291,17 @@ class OneDB :db_name => ops[:slave_db_name] ) - version, timestamp, comment = @backend.read_db_version + db_version = @backend.read_db_version - slave_version, slave_timestamp, slave_comment = - slave_backend.read_db_version + slave_db_version = slave_backend.read_db_version if ops[:verbose] - puts "Master version read:" - puts "#{version} : #{comment}" + puts "Master database information:" + pretty_print_db_version(db_version) puts "" - puts "Slave version read:" - puts "#{slave_version} : #{slave_comment}" + puts "" + puts "Slave database information:" + pretty_print_db_version(slave_db_version) puts "" end @@ -300,19 +314,7 @@ class OneDB load(file) @backend.extend OneDBImportSlave - if ( version != @backend.db_version ) - raise "Version mismatch: import slave file is for version "<< - "#{@backend.db_version}, current master database version is #{version}" - end - - if ( slave_version != @backend.db_version ) - raise "Version mismatch: import slave file is for version "<< - "#{@backend.db_version}, current slave database version is #{version}" - end - - # Import will be executed, make DB backup - backup(ops[:backup], ops) - backup(ops[:"slave-backup"], ops, slave_backend) + @backend.check_db_version(db_version, slave_db_version) puts <<-EOT Before running this tool, it is required to create a new Zone in the @@ -364,6 +366,10 @@ is preserved. merge_groups = input == "Y" + # Import will be executed, make DB backup + backup(ops[:backup], ops) + backup(ops[:"slave-backup"], ops, slave_backend) + begin puts " > Running slave import" if ops[:verbose] @@ -371,7 +377,7 @@ is preserved. merge_groups, zone_id) if !result - raise "Error running slave import version #{version}" + raise "Error running slave import" end puts " > Done" if ops[:verbose] @@ -381,7 +387,7 @@ is preserved. rescue Exception => e puts e.message - puts "Error running slave import version #{version}" + puts "Error running slave import" puts "The databases will be restored" ops[:force] = true @@ -404,4 +410,18 @@ is preserved. raise "First stop OpenNebula. Lock file found: #{LOCK_FILE}" end end + + def pretty_print_db_version(db_version) + puts "Version read:" + puts "Shared tables #{db_version[:version]} : #{db_version[:comment]}" + + if db_version[:local_version] + puts "Local tables #{db_version[:local_version]} : #{db_version[:local_comment]}" + end + + if db_version[:is_slave] + puts + puts "This database is a federation slave" + end + end end diff --git a/src/onedb/onedb_backend.rb b/src/onedb/onedb_backend.rb index 315251597e..ca7b1a7519 100644 --- a/src/onedb/onedb_backend.rb +++ b/src/onedb/onedb_backend.rb @@ -28,19 +28,36 @@ class OneDBBacKEnd def read_db_version connect_db + ret = {} + begin - version = "2.0" - timestamp = 0 - comment = "" + ret[:version] = "2.0" + ret[:timestamp] = 0 + ret[:comment] = "" @db.fetch("SELECT version, timestamp, comment FROM db_versioning " + "WHERE oid=(SELECT MAX(oid) FROM db_versioning)") do |row| - version = row[:version] - timestamp = row[:timestamp] - comment = row[:comment] + ret[:version] = row[:version] + ret[:timestamp] = row[:timestamp] + ret[:comment] = row[:comment] end - return [version, timestamp, comment] + begin + @db.fetch("SELECT version, timestamp, comment, is_slave FROM "+ + "local_db_versioning WHERE oid=(SELECT MAX(oid) "+ + "FROM local_db_versioning)") do |row| + ret[:local_version] = row[:version] + ret[:local_timestamp] = row[:timestamp] + ret[:local_comment] = row[:comment] + ret[:is_slave] = row[:is_slave] + end + rescue Exception => e + if e.class == Sequel::DatabaseConnectionError + raise e + end + end + + return ret rescue Exception => e if e.class == Sequel::DatabaseConnectionError @@ -62,7 +79,7 @@ class OneDBBacKEnd comment = "Could not read any previous db_versioning data, " << "assuming it is an OpenNebula 2.0 or 2.2 DB." - return [version, timestamp, comment] + return ret end end From 7b972c09da6e4c65a0ccf8bef028b93f18ae8c64 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Wed, 26 Feb 2014 12:25:02 +0100 Subject: [PATCH 53/80] bug #2744: fix bug generating fingerprint for econe keypair --- src/cloud/ec2/lib/keypair.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cloud/ec2/lib/keypair.rb b/src/cloud/ec2/lib/keypair.rb index 464b5d8904..47294e93b4 100644 --- a/src/cloud/ec2/lib/keypair.rb +++ b/src/cloud/ec2/lib/keypair.rb @@ -88,7 +88,7 @@ module Keypair erb_private_key = rsa_kp erb_public_key = rsa_kp.public_key - erb_key_fingerprint = Digest::MD5.hexdigest(rsa_kp.to_der) + erb_key_fingerprint = Digest::MD5.hexdigest(rsa_kp.to_blob) erb_key_fingerprint.gsub!(/(.{2})(?=.)/, '\1:\2') erb_ssh_public_key = erb_public_key.ssh_type << From c58cc19d01c9225ab0f9b166b7b0122267717e72 Mon Sep 17 00:00:00 2001 From: Tino Vazquez Date: Wed, 26 Feb 2014 12:59:03 +0100 Subject: [PATCH 54/80] Feature #2696: Add generic Template for Groups --- include/Group.h | 16 +++++++++++++++- include/RequestManagerUpdateTemplate.h | 18 ++++++++++++++++++ include/UserTemplate.h | 2 +- src/cli/one_helper/onegroup_helper.rb | 4 ++++ src/cli/onegroup | 18 ++++++++++++++++++ src/group/Group.cc | 19 +++++++++++++++++-- src/oca/ruby/opennebula/group.rb | 13 +++++++++++++ src/rm/RequestManager.cc | 13 +++++++++---- 8 files changed, 95 insertions(+), 8 deletions(-) diff --git a/include/Group.h b/include/Group.h index 4e0e26de21..febd6edc7c 100644 --- a/include/Group.h +++ b/include/Group.h @@ -21,6 +21,7 @@ #include "ObjectCollection.h" #include "User.h" #include "QuotasSQL.h" +#include "Template.h" using namespace std; @@ -109,6 +110,14 @@ public: return quota.update(oid, db); }; + /** + * Factory method for Group templates + */ + Template * get_new_template() const + { + return new Template; + } + private: // ------------------------------------------------------------------------- @@ -128,9 +137,14 @@ private: { // Allow users in this group to see it group_u = 1; + + obj_template = new Template; }; - virtual ~Group(){}; + virtual ~Group() + { + delete obj_template; + }; // ************************************************************************* // Attributes (Private) diff --git a/include/RequestManagerUpdateTemplate.h b/include/RequestManagerUpdateTemplate.h index c82ac89a16..669f20d266 100644 --- a/include/RequestManagerUpdateTemplate.h +++ b/include/RequestManagerUpdateTemplate.h @@ -239,6 +239,24 @@ public: ~ZoneUpdateTemplate(){}; }; +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class GroupUpdateTemplate : public RequestManagerUpdateTemplate +{ +public: + GroupUpdateTemplate(): + RequestManagerUpdateTemplate("GroupUpdateTemplate", + "Updates a Group template") + { + Nebula& nd = Nebula::instance(); + pool = nd.get_gpool(); + auth_object = PoolObjectSQL::GROUP; + }; + + ~GroupUpdateTemplate(){}; +}; + /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ diff --git a/include/UserTemplate.h b/include/UserTemplate.h index 2eb92ad639..26d7151f89 100644 --- a/include/UserTemplate.h +++ b/include/UserTemplate.h @@ -35,4 +35,4 @@ public: /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -#endif /*IMAGE_TEMPLATE_H_*/ +#endif /*USER_TEMPLATE_H_*/ diff --git a/src/cli/one_helper/onegroup_helper.rb b/src/cli/one_helper/onegroup_helper.rb index d60576f5d3..045c24a960 100644 --- a/src/cli/one_helper/onegroup_helper.rb +++ b/src/cli/one_helper/onegroup_helper.rb @@ -213,6 +213,10 @@ class OneGroupHelper < OpenNebulaHelper::OneHelper puts str % ["NAME", group.name] puts + CLIHelper.print_header(str_h1 % "GROUP TEMPLATE",false) + puts group.template_str + puts + CLIHelper.print_header(str_h1 % "USERS", false) CLIHelper.print_header("%-15s" % ["ID"]) group.user_ids.each do |uid| diff --git a/src/cli/onegroup b/src/cli/onegroup index 746e41fd47..8a3d0ca587 100755 --- a/src/cli/onegroup +++ b/src/cli/onegroup @@ -135,6 +135,24 @@ cmd=CommandParser::CmdParser.new(ARGV) do end end + update_desc = <<-EOT.unindent + Update the template contents. If a path is not provided the editor will + be launched to modify the current content. + EOT + + command :update, update_desc, :groupid, [:file, nil], + :options=>OpenNebulaHelper::APPEND do + helper.perform_action(args[0],options,"modified") do |obj| + if options[:append] + str = OpenNebulaHelper.append_template(args[0], obj, args[1]) + else + str = OpenNebulaHelper.update_template(args[0], obj, args[1]) + end + + obj.update(str, options[:append]) + end + end + delete_desc = <<-EOT.unindent Deletes the given Group EOT diff --git a/src/group/Group.cc b/src/group/Group.cc index 7af368375e..afa9ab0df3 100644 --- a/src/group/Group.cc +++ b/src/group/Group.cc @@ -214,6 +214,7 @@ string& Group::to_xml_extended(string& xml, bool extended) const { ostringstream oss; string collection_xml; + string template_xml; set >::const_iterator it; @@ -221,8 +222,9 @@ string& Group::to_xml_extended(string& xml, bool extended) const oss << "" << - "" << oid << "" << - "" << name << "" << + "" << oid << "" << + "" << name << "" << + obj_template->to_xml(template_xml) << collection_xml; for (it = providers.begin(); it != providers.end(); it++) @@ -285,6 +287,19 @@ int Group::from_xml(const string& xml) ObjectXML::free_nodes(content); content.clear(); + // Get associated metadata for the group + ObjectXML::get_nodes("/GROUP/TEMPLATE", content); + + if (content.empty()) + { + return -1; + } + + rc += obj_template->from_xml_node(content[0]); + + ObjectXML::free_nodes(content); + content.clear(); + // Set of resource providers ObjectXML::get_nodes("/GROUP/RESOURCE_PROVIDER", content); diff --git a/src/oca/ruby/opennebula/group.rb b/src/oca/ruby/opennebula/group.rb index 84caa911ac..770a13c608 100644 --- a/src/oca/ruby/opennebula/group.rb +++ b/src/oca/ruby/opennebula/group.rb @@ -26,6 +26,7 @@ module OpenNebula GROUP_METHODS = { :info => "group.info", :allocate => "group.allocate", + :update => "group.update", :delete => "group.delete", :quota => "group.quota", :add_provider => "group.addprovider", @@ -222,6 +223,18 @@ module OpenNebula super(GROUP_METHODS[:allocate], groupname) end + # Replaces the template contents + # + # @param new_template [String] New template contents + # @param append [true, false] True to append new attributes instead of + # replace the whole template + # + # @return [nil, OpenNebula::Error] nil in case of success, Error + # otherwise + def update(new_template=nil, append=false) + super(GROUP_METHODS[:update], new_template, append ? 1 : 0) + end + # Deletes the Group def delete() super(GROUP_METHODS[:delete]) diff --git a/src/rm/RequestManager.cc b/src/rm/RequestManager.cc index 54d57f33d1..83eb937f23 100644 --- a/src/rm/RequestManager.cc +++ b/src/rm/RequestManager.cc @@ -437,6 +437,7 @@ void RequestManager::register_xml_methods() /* Group related methods */ xmlrpc_c::method * group_allocate_pt; + xmlrpc_c::method * group_update_pt; xmlrpc_c::method * group_delete_pt; xmlrpc_c::method * group_add_provider_pt; xmlrpc_c::method * group_del_provider_pt; @@ -447,6 +448,7 @@ void RequestManager::register_xml_methods() group_delete_pt = new RequestManagerProxy("one.group.delete"); group_add_provider_pt = new RequestManagerProxy("one.group.addprovider"); group_del_provider_pt = new RequestManagerProxy("one.group.delprovider"); + group_update_pt = new RequestManagerProxy("one.group.update"); } else { @@ -454,12 +456,14 @@ void RequestManager::register_xml_methods() group_delete_pt = new GroupDelete(); group_add_provider_pt = new GroupAddProvider(); group_del_provider_pt = new GroupDelProvider(); + group_update_pt = new GroupUpdateTemplate(); } xmlrpc_c::methodPtr group_allocate(group_allocate_pt); xmlrpc_c::methodPtr group_delete(group_delete_pt); xmlrpc_c::methodPtr group_add_provider(group_add_provider_pt); xmlrpc_c::methodPtr group_del_provider(group_del_provider_pt); + xmlrpc_c::methodPtr group_update(group_update_pt); xmlrpc_c::methodPtr group_info(new GroupInfo()); xmlrpc_c::methodPtr group_set_quota(new GroupSetQuota()); @@ -467,12 +471,13 @@ void RequestManager::register_xml_methods() xmlrpc_c::methodPtr group_get_default_quota(new GroupQuotaInfo()); xmlrpc_c::methodPtr group_set_default_quota(new GroupQuotaUpdate()); - RequestManagerRegistry.addMethod("one.group.allocate", group_allocate); - RequestManagerRegistry.addMethod("one.group.delete", group_delete); - RequestManagerRegistry.addMethod("one.group.info", group_info); - RequestManagerRegistry.addMethod("one.group.quota", group_set_quota); + RequestManagerRegistry.addMethod("one.group.allocate", group_allocate); + RequestManagerRegistry.addMethod("one.group.delete", group_delete); + RequestManagerRegistry.addMethod("one.group.info", group_info); + RequestManagerRegistry.addMethod("one.group.quota", group_set_quota); RequestManagerRegistry.addMethod("one.group.addprovider",group_add_provider); RequestManagerRegistry.addMethod("one.group.delprovider",group_del_provider); + RequestManagerRegistry.addMethod("one.group.update", group_update); RequestManagerRegistry.addMethod("one.grouppool.info", grouppool_info); From 7b0136523f6d455e912e2cba82961accd8732203 Mon Sep 17 00:00:00 2001 From: Tino Vazquez Date: Wed, 26 Feb 2014 17:41:08 +0100 Subject: [PATCH 55/80] Feature #2696: Sunstone support for Group template --- .../models/OpenNebulaJSON/GroupJSON.rb | 5 +++ src/sunstone/public/js/opennebula.js | 7 +++ src/sunstone/public/js/plugins/groups-tab.js | 43 ++++++++++++++++++- src/sunstone/public/js/plugins/images-tab.js | 5 --- 4 files changed, 54 insertions(+), 6 deletions(-) diff --git a/src/sunstone/models/OpenNebulaJSON/GroupJSON.rb b/src/sunstone/models/OpenNebulaJSON/GroupJSON.rb index eae06612ff..a5e7576902 100644 --- a/src/sunstone/models/OpenNebulaJSON/GroupJSON.rb +++ b/src/sunstone/models/OpenNebulaJSON/GroupJSON.rb @@ -37,6 +37,7 @@ module OpenNebulaJSON rc = case action_hash['perform'] when "chown" then self.chown(action_hash['params']) + when "update" then self.update(action_hash['params']) when "set_quota" then self.set_quota(action_hash['params']) when "add_provider" then self.add_provider(action_hash['params']) when "del_provider" then self.del_provider(action_hash['params']) @@ -51,6 +52,10 @@ module OpenNebulaJSON super(params['owner_id'].to_i) end + def update(params=Hash.new) + super(params['template_raw']) + end + def set_quota(params=Hash.new) quota_json = params['quotas'] quota_template = template_to_str(quota_json) diff --git a/src/sunstone/public/js/opennebula.js b/src/sunstone/public/js/opennebula.js index e5497c875c..1f70137a08 100644 --- a/src/sunstone/public/js/opennebula.js +++ b/src/sunstone/public/js/opennebula.js @@ -932,6 +932,13 @@ var OpenNebula = { } }); }, + "update": function(params){ + var action_obj = {"template_raw" : params.data.extra_param }; + OpenNebula.Action.simple_action(params, + OpenNebula.Group.resource, + "update", + action_obj); + }, "set_quota" : function(params){ var action_obj = { quotas : params.data.extra_param }; OpenNebula.Action.simple_action(params,OpenNebula.Group.resource,"set_quota",action_obj); diff --git a/src/sunstone/public/js/plugins/groups-tab.js b/src/sunstone/public/js/plugins/groups-tab.js index e8482dc7f6..2286957d92 100644 --- a/src/sunstone/public/js/plugins/groups-tab.js +++ b/src/sunstone/public/js/plugins/groups-tab.js @@ -445,6 +445,16 @@ var group_actions = { error: onError }, + "Group.update_template" : { + type: "single", + call: OpenNebula.Group.update, + callback: function(request) { + notifyMessage("Template updated correctly"); + Sunstone.runAction('Group.showinfo',request.request.data[0][0]); + }, + error: onError + }, + "Group.delete" : { type: "multiple", call : OpenNebula.Group.del, @@ -587,6 +597,7 @@ var group_buttons = { }; var group_info_panel = { + }; var groups_tab = { @@ -773,6 +784,36 @@ function fromJSONtoProvidersTable(group_info){ function updateGroupInfo(request,group){ var info = group.GROUP; + var info_tab = { + title: tr("Information"), + content: + '
\ +
\ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ +
'+tr("Group")+' - '+info.NAME+'
'+tr("ID")+''+info.ID+'
'+tr("Name")+''+info.NAME+'
\ +
\ +
' + + insert_extended_template_table(info.TEMPLATE, + "Group", + info.ID, + "Configuration & Tags") + + '
\ +
' + } + var default_group_quotas = Quotas.default_quotas(info.DEFAULT_GROUP_QUOTAS); var quotas_tab_html = '
' + Quotas.vms(info, default_group_quotas) + '
'; quotas_tab_html += '
' + Quotas.cpu(info, default_group_quotas) + '
'; @@ -813,7 +854,7 @@ function updateGroupInfo(request,group){
' }; - + Sunstone.updateInfoPanelTab("group_info_panel","group_info_tab",info_tab); Sunstone.updateInfoPanelTab("group_info_panel","group_quotas_tab",quotas_tab); Sunstone.updateInfoPanelTab("group_info_panel","group_providers_tab",providers_tab); Sunstone.popUpInfoPanel("group_info_panel", 'groups-tab'); diff --git a/src/sunstone/public/js/plugins/images-tab.js b/src/sunstone/public/js/plugins/images-tab.js index 36dc3a9726..716722f2a5 100644 --- a/src/sunstone/public/js/plugins/images-tab.js +++ b/src/sunstone/public/js/plugins/images-tab.js @@ -593,11 +593,6 @@ var image_buttons = { layout: "del", text: tr("Delete") }, - //"Image.help" : { - // type: "action", - // text: '?', - // alwaysActive: true - //} } var image_info_panel = { From 4cf97994a08b46378a85019603b1490d5078d53a Mon Sep 17 00:00:00 2001 From: Tino Vazquez Date: Wed, 26 Feb 2014 19:20:01 +0100 Subject: [PATCH 56/80] Feature #2696: Retrieve views from primary group template --- src/sunstone/models/SunstoneViews.rb | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/src/sunstone/models/SunstoneViews.rb b/src/sunstone/models/SunstoneViews.rb index 2326a9975d..1c4177de05 100644 --- a/src/sunstone/models/SunstoneViews.rb +++ b/src/sunstone/models/SunstoneViews.rb @@ -49,9 +49,19 @@ class SunstoneViews end def available_views(user_name, group_name) - available_views = @views_config['users'][user_name] if @views_config['users'] - available_views ||= @views_config['groups'][group_name] if @views_config['groups'] - available_views ||= @views_config['default'] + user = OpenNebula::User.new_with_id( + OpenNebula::User::SELF, + $cloud_auth.client(user_name)) + user.info + + group = OpenNebula::Group.new_with_id(user.gid, $cloud_auth.client(user_name)) + group.info + + if group["TEMPLATE/SUNSTONE_VIEWS"] + available_views = group["TEMPLATE/SUNSTONE_VIEWS"].split(",") + else + available_views = ['cloud'] + end return available_views end From 154279f24fdd0897c0c7ce6b017ae5279b6ec587 Mon Sep 17 00:00:00 2001 From: "Ruben S. Montero" Date: Thu, 27 Feb 2014 12:43:50 +0100 Subject: [PATCH 57/80] bug #2762: Generate BRIDGE as "onebr" if vlan_id is defined (cherry picked from commit d1ea5db93c2683727f408d920bdae9c95308ef47) --- src/vnm/VirtualNetwork.cc | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/vnm/VirtualNetwork.cc b/src/vnm/VirtualNetwork.cc index c880e39555..93555029c0 100644 --- a/src/vnm/VirtualNetwork.cc +++ b/src/vnm/VirtualNetwork.cc @@ -255,7 +255,16 @@ int VirtualNetwork::insert(SqlDB * db, string& error_str) { ostringstream oss; - oss << "onebr" << oid; + oss << "onebr"; + + if (!vlan_id.empty()) + { + oss << vlan_id; + } + else + { + oss << oid; + } bridge = oss.str(); } From 1f80bb3b75428ca53ffd64d179b9b3cf7db9dfa0 Mon Sep 17 00:00:00 2001 From: "Ruben S. Montero" Date: Thu, 27 Feb 2014 13:05:27 +0100 Subject: [PATCH 58/80] bug #2762: Prevent collitions between vlanid and vnetid (cherry picked from commit ad1276927c8d6089f240a7bd8cf43f4ae7e57d96) --- src/vnm/VirtualNetwork.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/vnm/VirtualNetwork.cc b/src/vnm/VirtualNetwork.cc index 93555029c0..07389e9317 100644 --- a/src/vnm/VirtualNetwork.cc +++ b/src/vnm/VirtualNetwork.cc @@ -259,7 +259,7 @@ int VirtualNetwork::insert(SqlDB * db, string& error_str) if (!vlan_id.empty()) { - oss << vlan_id; + oss << "." << vlan_id; } else { From f1e3d105af4bbd10a46dcc9b07aa83840db0a19d Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Fri, 28 Feb 2014 10:55:15 +0100 Subject: [PATCH 59/80] feature #2371: parse cdata with ox parser --- src/oca/ruby/opennebula/xml_utils.rb | 1 + 1 file changed, 1 insertion(+) diff --git a/src/oca/ruby/opennebula/xml_utils.rb b/src/oca/ruby/opennebula/xml_utils.rb index 70d488bf72..d4a83bdde3 100644 --- a/src/oca/ruby/opennebula/xml_utils.rb +++ b/src/oca/ruby/opennebula/xml_utils.rb @@ -110,6 +110,7 @@ module OpenNebula include ParsePoolBase alias :text :characters + alias :cdata :characters end end elsif NOKOGIRI From 15af612918908fbe13094a0c1415be8c5b006cca Mon Sep 17 00:00:00 2001 From: Tino Vazquez Date: Fri, 28 Feb 2014 16:40:39 +0100 Subject: [PATCH 60/80] Feature #2696: Improve Sunstone views assigments --- src/sunstone/models/SunstoneViews.rb | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/sunstone/models/SunstoneViews.rb b/src/sunstone/models/SunstoneViews.rb index 1c4177de05..66fa6700ab 100644 --- a/src/sunstone/models/SunstoneViews.rb +++ b/src/sunstone/models/SunstoneViews.rb @@ -57,12 +57,19 @@ class SunstoneViews group = OpenNebula::Group.new_with_id(user.gid, $cloud_auth.client(user_name)) group.info + available_views = Array.new if group["TEMPLATE/SUNSTONE_VIEWS"] available_views = group["TEMPLATE/SUNSTONE_VIEWS"].split(",") - else - available_views = ['cloud'] end + static_views = @views_config['users'][user_name] if @views_config['users'] + static_views ||= @views_config['groups'][group_name] if @views_config['groups'] + static_views ||= @views_config['default'] + + available_views.concat(static_views) + available_views.select!{|view_name| @views[view_name]} + available_views.uniq! + return available_views end From 34ac8ff03c37a10b7d362e0d6a48921220955980 Mon Sep 17 00:00:00 2001 From: Tino Vazquez Date: Fri, 28 Feb 2014 17:09:38 +0100 Subject: [PATCH 61/80] Bug #2721: Add error check in onegroup creation --- src/cli/one_helper/onegroup_helper.rb | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/cli/one_helper/onegroup_helper.rb b/src/cli/one_helper/onegroup_helper.rb index 045c24a960..a250d3f082 100644 --- a/src/cli/one_helper/onegroup_helper.rb +++ b/src/cli/one_helper/onegroup_helper.rb @@ -41,6 +41,10 @@ class OneGroupHelper < OpenNebulaHelper::OneHelper exit_code , msg = group.create_default_acls + if OpenNebula.is_error?(exit_code) + return -1, exit_code.message + end + exit_code.to_i end From 0927a5b881815c438758a33ff4f4a48b99186eaf Mon Sep 17 00:00:00 2001 From: Tino Vazquez Date: Fri, 28 Feb 2014 18:35:56 +0100 Subject: [PATCH 62/80] Feature #2506: Change input to textarea to allow multiple lines --- src/sunstone/public/js/sunstone-util.js | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/src/sunstone/public/js/sunstone-util.js b/src/sunstone/public/js/sunstone-util.js index 52cbe24d68..e85f322c37 100644 --- a/src/sunstone/public/js/sunstone-util.js +++ b/src/sunstone/public/js/sunstone-util.js @@ -1810,7 +1810,7 @@ function insert_extended_template_table(template_json,resource_type,resource_id, \ \ \ - \ + \ \\ ' + fromJSONtoHTMLTable(template_json, resource_type, @@ -1854,19 +1854,6 @@ function insert_extended_template_table(template_json,resource_type,resource_id, } }); - // Capture the enter key - $('#new_value').live("keypress", function(e) { - var ev = e || window.event; - var key = ev.keyCode; - - if (key == 13) - { - //Get the button the user wants to have clicked - $('#button_add_value', $(this).parent().parent()).click(); - ev.preventDefault(); - } - }) - // Listener for single values // Listener for key,value pair remove action @@ -1901,7 +1888,7 @@ function insert_extended_template_table(template_json,resource_type,resource_id, var key_str=this.firstElementChild.id.substring(9,this.firstElementChild.id.length); var value_str = $("#value_td_input_"+key_str).text(); - input = $("#value_td_input_"+key_str).html(''); + input = $("#value_td_input_"+key_str).html(''); $('#input_edit_'+key_str).val(value_str); }); From 417cf910898c66e883f5ac3a871aa7cf68bc73cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Mon, 3 Mar 2014 14:46:34 +0100 Subject: [PATCH 63/80] Feature #2763: Add GROUP/TEMPLATE to migrator --- src/onedb/4.4.1_to_4.5.80.rb | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/onedb/4.4.1_to_4.5.80.rb b/src/onedb/4.4.1_to_4.5.80.rb index 4feb260ff2..a6bd9be56a 100644 --- a/src/onedb/4.4.1_to_4.5.80.rb +++ b/src/onedb/4.4.1_to_4.5.80.rb @@ -83,6 +83,7 @@ module Migrator # GROUP/RESOURCE_PROVIDER is not needed # Move GROUP/QUOTA to group_quotas table + # Add GROUP/TEMPLATE @db.run "ALTER TABLE group_pool RENAME TO old_group_pool;" @db.run "CREATE TABLE group_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" @@ -95,6 +96,8 @@ module Migrator quotas_doc = extract_quotas(doc) + doc.root.add_child(doc.create_element("TEMPLATE")) + @db[:group_pool].insert( :oid => row[:oid], :name => row[:name], From 8dcec74c429d0623c41e2fad2de5ab39e1d1d84a Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Mon, 3 Mar 2014 16:46:05 +0100 Subject: [PATCH 64/80] Typo in oneacct help --- src/cli/one_helper/oneacct_helper.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cli/one_helper/oneacct_helper.rb b/src/cli/one_helper/oneacct_helper.rb index 572777bf70..55187c696d 100644 --- a/src/cli/one_helper/oneacct_helper.rb +++ b/src/cli/one_helper/oneacct_helper.rb @@ -87,7 +87,7 @@ class AcctHelper < OpenNebulaHelper::OneHelper :name => "json", :short => "-j", :large => "--json", - :description => "Show the resource in xml format" + :description => "Show the resource in json format" } SPLIT={ @@ -185,4 +185,4 @@ class AcctHelper < OpenNebulaHelper::OneHelper CLIHelper.scr_restore puts end -end \ No newline at end of file +end From e1cde0733d121f94e69f299aff5c1715e4cbef76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Mon, 3 Mar 2014 16:48:09 +0100 Subject: [PATCH 65/80] Feature #2745: Make onedb fsck aware of the slave and master databases --- src/onedb/fsck.rb | 54 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 37 insertions(+), 17 deletions(-) diff --git a/src/onedb/fsck.rb b/src/onedb/fsck.rb index 07d1b533e3..5fb7012365 100644 --- a/src/onedb/fsck.rb +++ b/src/onedb/fsck.rb @@ -49,8 +49,6 @@ EOT def fsck - # TODO: different behaviour for slave/master database - ######################################################################## # Acl ######################################################################## @@ -124,6 +122,7 @@ EOT @errors = 0 puts + db_version = read_db_version() ######################################################################## # pool_control @@ -133,6 +132,8 @@ EOT "network_pool", "template_pool", "vm_pool", "cluster_pool", "datastore_pool", "document_pool", "zone_pool"] + federated_tables = ["group_pool", "user_pool", "acl", "zone_pool"] + tables.each do |table| max_oid = -1 @@ -160,7 +161,11 @@ EOT log_error("pool_control for table #{table} has last_oid #{control_oid}, but it is #{max_oid}") if control_oid != -1 - @db.run("UPDATE pool_control SET last_oid=#{max_oid} WHERE tablename='#{table}'") + if db_version[:is_slave] && federated_tables.include?(table) + log_error("^ Needs to be fixed in the master OpenNebula") + else + @db.run("UPDATE pool_control SET last_oid=#{max_oid} WHERE tablename='#{table}'") + end else @db[:pool_control].insert( :tablename => table, @@ -258,18 +263,23 @@ EOT end end - @db.transaction do - users_fix.each do |id, user| - @db[:user_pool].where(:oid => id).update( - :body => user[:body], - :gid => user[:gid]) + if db_version[:is_slave] + log_error("^ User errors need to be fixed in the master OpenNebula") + else + @db.transaction do + users_fix.each do |id, user| + @db[:user_pool].where(:oid => id).update( + :body => user[:body], + :gid => user[:gid]) + end end end log_time() - - @db.run "CREATE TABLE group_pool_new (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" + if !db_version[:is_slave] + @db.run "CREATE TABLE group_pool_new (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" + end @db.transaction do @db.fetch("SELECT * from group_pool") do |row| @@ -296,14 +306,20 @@ EOT row[:body] = doc.to_s - # commit - @db[:group_pool_new].insert(row) + if db_version[:is_slave] + log_error("^ Group errors need to be fixed in the master OpenNebula") + else + # commit + @db[:group_pool_new].insert(row) + end end end - # Rename table - @db.run("DROP TABLE group_pool") - @db.run("ALTER TABLE group_pool_new RENAME TO group_pool") + if !db_version[:is_slave] + # Rename table + @db.run("DROP TABLE group_pool") + @db.run("ALTER TABLE group_pool_new RENAME TO group_pool") + end log_time() @@ -1275,6 +1291,8 @@ EOT # USER QUOTAS ######################################################################## + # This block is not needed for now +=begin @db.transaction do @db.fetch("SELECT oid FROM user_pool") do |row| found = false @@ -1290,7 +1308,7 @@ EOT end end end - +=end @db.run "ALTER TABLE user_quotas RENAME TO old_user_quotas;" @db.run "CREATE TABLE user_quotas (user_oid INTEGER PRIMARY KEY, body MEDIUMTEXT);" @@ -1321,6 +1339,8 @@ EOT # GROUP QUOTAS ######################################################################## + # This block is not needed for now +=begin @db.transaction do @db.fetch("SELECT oid FROM group_pool") do |row| found = false @@ -1336,7 +1356,7 @@ EOT end end end - +=end @db.run "ALTER TABLE group_quotas RENAME TO old_group_quotas;" @db.run "CREATE TABLE group_quotas (group_oid INTEGER PRIMARY KEY, body MEDIUMTEXT);" From e317c57a555506b5be61180361da4bfb9f10a3d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Mon, 3 Mar 2014 19:11:17 +0100 Subject: [PATCH 66/80] Feature #2743: onedb upgrade now has two set of migrator files, for shared and local tables --- install.sh | 85 +++++++++------- src/onedb/onedb.rb | 111 +++++++++++++-------- src/onedb/onedb_backend.rb | 44 +++++++- src/onedb/{ => shared}/2.0_to_2.9.80.rb | 0 src/onedb/{ => shared}/2.9.80_to_2.9.85.rb | 0 src/onedb/{ => shared}/2.9.85_to_2.9.90.rb | 0 src/onedb/{ => shared}/2.9.90_to_3.0.0.rb | 0 src/onedb/{ => shared}/3.0.0_to_3.1.0.rb | 0 src/onedb/{ => shared}/3.1.0_to_3.1.80.rb | 0 src/onedb/{ => shared}/3.1.80_to_3.2.0.rb | 0 src/onedb/{ => shared}/3.2.0_to_3.2.1.rb | 0 src/onedb/{ => shared}/3.2.1_to_3.3.0.rb | 0 src/onedb/{ => shared}/3.3.0_to_3.3.80.rb | 0 src/onedb/{ => shared}/3.3.80_to_3.4.0.rb | 0 src/onedb/{ => shared}/3.4.0_to_3.4.1.rb | 0 src/onedb/{ => shared}/3.4.1_to_3.5.80.rb | 0 src/onedb/{ => shared}/3.5.80_to_3.6.0.rb | 0 src/onedb/{ => shared}/3.6.0_to_3.7.80.rb | 0 src/onedb/{ => shared}/3.7.80_to_3.8.0.rb | 0 src/onedb/{ => shared}/3.8.0_to_3.8.1.rb | 0 src/onedb/{ => shared}/3.8.1_to_3.8.2.rb | 0 src/onedb/{ => shared}/3.8.2_to_3.8.3.rb | 0 src/onedb/{ => shared}/3.8.3_to_3.8.4.rb | 0 src/onedb/{ => shared}/3.8.4_to_3.8.5.rb | 0 src/onedb/{ => shared}/3.8.5_to_3.9.80.rb | 0 src/onedb/{ => shared}/3.9.80_to_3.9.90.rb | 0 src/onedb/{ => shared}/3.9.90_to_4.0.0.rb | 0 src/onedb/{ => shared}/4.0.0_to_4.0.1.rb | 0 src/onedb/{ => shared}/4.0.1_to_4.1.80.rb | 0 src/onedb/{ => shared}/4.1.80_to_4.2.0.rb | 0 src/onedb/{ => shared}/4.2.0_to_4.3.80.rb | 0 src/onedb/{ => shared}/4.3.80_to_4.3.85.rb | 0 src/onedb/{ => shared}/4.3.85_to_4.3.90.rb | 0 src/onedb/{ => shared}/4.3.90_to_4.4.0.rb | 0 src/onedb/{ => shared}/4.4.0_to_4.4.1.rb | 0 src/onedb/{ => shared}/4.4.1_to_4.5.80.rb | 0 36 files changed, 156 insertions(+), 84 deletions(-) rename src/onedb/{ => shared}/2.0_to_2.9.80.rb (100%) rename src/onedb/{ => shared}/2.9.80_to_2.9.85.rb (100%) rename src/onedb/{ => shared}/2.9.85_to_2.9.90.rb (100%) rename src/onedb/{ => shared}/2.9.90_to_3.0.0.rb (100%) rename src/onedb/{ => shared}/3.0.0_to_3.1.0.rb (100%) rename src/onedb/{ => shared}/3.1.0_to_3.1.80.rb (100%) rename src/onedb/{ => shared}/3.1.80_to_3.2.0.rb (100%) rename src/onedb/{ => shared}/3.2.0_to_3.2.1.rb (100%) rename src/onedb/{ => shared}/3.2.1_to_3.3.0.rb (100%) rename src/onedb/{ => shared}/3.3.0_to_3.3.80.rb (100%) rename src/onedb/{ => shared}/3.3.80_to_3.4.0.rb (100%) rename src/onedb/{ => shared}/3.4.0_to_3.4.1.rb (100%) rename src/onedb/{ => shared}/3.4.1_to_3.5.80.rb (100%) rename src/onedb/{ => shared}/3.5.80_to_3.6.0.rb (100%) rename src/onedb/{ => shared}/3.6.0_to_3.7.80.rb (100%) rename src/onedb/{ => shared}/3.7.80_to_3.8.0.rb (100%) rename src/onedb/{ => shared}/3.8.0_to_3.8.1.rb (100%) rename src/onedb/{ => shared}/3.8.1_to_3.8.2.rb (100%) rename src/onedb/{ => shared}/3.8.2_to_3.8.3.rb (100%) rename src/onedb/{ => shared}/3.8.3_to_3.8.4.rb (100%) rename src/onedb/{ => shared}/3.8.4_to_3.8.5.rb (100%) rename src/onedb/{ => shared}/3.8.5_to_3.9.80.rb (100%) rename src/onedb/{ => shared}/3.9.80_to_3.9.90.rb (100%) rename src/onedb/{ => shared}/3.9.90_to_4.0.0.rb (100%) rename src/onedb/{ => shared}/4.0.0_to_4.0.1.rb (100%) rename src/onedb/{ => shared}/4.0.1_to_4.1.80.rb (100%) rename src/onedb/{ => shared}/4.1.80_to_4.2.0.rb (100%) rename src/onedb/{ => shared}/4.2.0_to_4.3.80.rb (100%) rename src/onedb/{ => shared}/4.3.80_to_4.3.85.rb (100%) rename src/onedb/{ => shared}/4.3.85_to_4.3.90.rb (100%) rename src/onedb/{ => shared}/4.3.90_to_4.4.0.rb (100%) rename src/onedb/{ => shared}/4.4.0_to_4.4.1.rb (100%) rename src/onedb/{ => shared}/4.4.1_to_4.5.80.rb (100%) diff --git a/install.sh b/install.sh index 257e2e12d9..45c87eeeb3 100755 --- a/install.sh +++ b/install.sh @@ -227,6 +227,8 @@ LIB_DIRS="$LIB_LOCATION/ruby \ $LIB_LOCATION/ruby/cloud/marketplace \ $LIB_LOCATION/ruby/cloud/CloudAuth \ $LIB_LOCATION/ruby/onedb \ + $LIB_LOCATION/ruby/onedb/shared \ + $LIB_LOCATION/ruby/onedb/local \ $LIB_LOCATION/ruby/vendors \ $LIB_LOCATION/ruby/vendors/rbvmomi \ $LIB_LOCATION/ruby/vendors/rbvmomi/lib \ @@ -394,7 +396,9 @@ INSTALL_FILES=( MAD_RUBY_LIB_FILES:$VAR_LOCATION/remotes MAD_SH_LIB_FILES:$LIB_LOCATION/sh MAD_SH_LIB_FILES:$VAR_LOCATION/remotes - ONEDB_MIGRATOR_FILES:$LIB_LOCATION/ruby/onedb + ONEDB_FILES:$LIB_LOCATION/ruby/onedb + ONEDB_SHARED_MIGRATOR_FILES:$LIB_LOCATION/ruby/onedb/shared + ONEDB_LOCAL_MIGRATOR_FILES:$LIB_LOCATION/ruby/onedb/local MADS_LIB_FILES:$LIB_LOCATION/mads IM_PROBES_FILES:$VAR_LOCATION/remotes/im IM_PROBES_KVM_FILES:$VAR_LOCATION/remotes/im/kvm.d @@ -1041,43 +1045,48 @@ DATASTORE_DRIVER_CEPH_SCRIPTS="src/datastore_mad/remotes/ceph/cp \ #------------------------------------------------------------------------------- # Migration scripts for onedb command, to be installed under $LIB_LOCATION #------------------------------------------------------------------------------- -ONEDB_MIGRATOR_FILES="src/onedb/2.0_to_2.9.80.rb \ - src/onedb/2.9.80_to_2.9.85.rb \ - src/onedb/2.9.85_to_2.9.90.rb \ - src/onedb/2.9.90_to_3.0.0.rb \ - src/onedb/3.0.0_to_3.1.0.rb \ - src/onedb/3.1.0_to_3.1.80.rb \ - src/onedb/3.1.80_to_3.2.0.rb \ - src/onedb/3.2.0_to_3.2.1.rb \ - src/onedb/3.2.1_to_3.3.0.rb \ - src/onedb/3.3.0_to_3.3.80.rb \ - src/onedb/3.3.80_to_3.4.0.rb \ - src/onedb/3.4.0_to_3.4.1.rb \ - src/onedb/3.4.1_to_3.5.80.rb \ - src/onedb/3.5.80_to_3.6.0.rb \ - src/onedb/3.6.0_to_3.7.80.rb \ - src/onedb/3.7.80_to_3.8.0.rb \ - src/onedb/3.8.0_to_3.8.1.rb \ - src/onedb/3.8.1_to_3.8.2.rb \ - src/onedb/3.8.2_to_3.8.3.rb \ - src/onedb/3.8.3_to_3.8.4.rb \ - src/onedb/3.8.4_to_3.8.5.rb \ - src/onedb/3.8.5_to_3.9.80.rb \ - src/onedb/3.9.80_to_3.9.90.rb \ - src/onedb/3.9.90_to_4.0.0.rb \ - src/onedb/4.0.0_to_4.0.1.rb \ - src/onedb/4.0.1_to_4.1.80.rb \ - src/onedb/4.1.80_to_4.2.0.rb \ - src/onedb/4.2.0_to_4.3.80.rb \ - src/onedb/4.3.80_to_4.3.85.rb \ - src/onedb/4.3.85_to_4.3.90.rb \ - src/onedb/4.3.90_to_4.4.0.rb \ - src/onedb/4.4.0_to_4.4.1.rb \ - src/onedb/4.4.1_to_4.5.80.rb \ - src/onedb/fsck.rb \ - src/onedb/import_slave.rb \ - src/onedb/onedb.rb \ - src/onedb/onedb_backend.rb" + + +ONEDB_FILES="src/onedb/fsck.rb \ + src/onedb/import_slave.rb \ + src/onedb/onedb.rb \ + src/onedb/onedb_backend.rb" + +ONEDB_SHARED_MIGRATOR_FILES="src/onedb/shared/2.0_to_2.9.80.rb \ + src/onedb/shared/2.9.80_to_2.9.85.rb \ + src/onedb/shared/2.9.85_to_2.9.90.rb \ + src/onedb/shared/2.9.90_to_3.0.0.rb \ + src/onedb/shared/3.0.0_to_3.1.0.rb \ + src/onedb/shared/3.1.0_to_3.1.80.rb \ + src/onedb/shared/3.1.80_to_3.2.0.rb \ + src/onedb/shared/3.2.0_to_3.2.1.rb \ + src/onedb/shared/3.2.1_to_3.3.0.rb \ + src/onedb/shared/3.3.0_to_3.3.80.rb \ + src/onedb/shared/3.3.80_to_3.4.0.rb \ + src/onedb/shared/3.4.0_to_3.4.1.rb \ + src/onedb/shared/3.4.1_to_3.5.80.rb \ + src/onedb/shared/3.5.80_to_3.6.0.rb \ + src/onedb/shared/3.6.0_to_3.7.80.rb \ + src/onedb/shared/3.7.80_to_3.8.0.rb \ + src/onedb/shared/3.8.0_to_3.8.1.rb \ + src/onedb/shared/3.8.1_to_3.8.2.rb \ + src/onedb/shared/3.8.2_to_3.8.3.rb \ + src/onedb/shared/3.8.3_to_3.8.4.rb \ + src/onedb/shared/3.8.4_to_3.8.5.rb \ + src/onedb/shared/3.8.5_to_3.9.80.rb \ + src/onedb/shared/3.9.80_to_3.9.90.rb \ + src/onedb/shared/3.9.90_to_4.0.0.rb \ + src/onedb/shared/4.0.0_to_4.0.1.rb \ + src/onedb/shared/4.0.1_to_4.1.80.rb \ + src/onedb/shared/4.1.80_to_4.2.0.rb \ + src/onedb/shared/4.2.0_to_4.3.80.rb \ + src/onedb/shared/4.3.80_to_4.3.85.rb \ + src/onedb/shared/4.3.85_to_4.3.90.rb \ + src/onedb/shared/4.3.90_to_4.4.0.rb \ + src/onedb/shared/4.4.0_to_4.4.1.rb \ + src/onedb/shared/4.4.1_to_4.5.80.rb" + +ONEDB_LOCAL_MIGRATOR_FILES="" #------------------------------------------------------------------------------- # Configuration files for OpenNebula, to be installed under $ETC_LOCATION diff --git a/src/onedb/onedb.rb b/src/onedb/onedb.rb index b8e686dce5..6403c1d189 100644 --- a/src/onedb/onedb.rb +++ b/src/onedb/onedb.rb @@ -133,64 +133,52 @@ class OneDB # max_version is ignored for now, as this is the first onedb release. # May be used in next releases def upgrade(max_version, ops) - ret = @backend.read_db_version + db_version = @backend.read_db_version if ops[:verbose] - pretty_print_db_version(ret) + pretty_print_db_version(db_version) puts "" end - # TODO: different upgrade path for slave/master database tables - - matches = Dir.glob("#{RUBY_LIB_LOCATION}/onedb/#{ret[:version]}_to_*.rb") - - if ( matches.size > 0 ) - # At least one upgrade will be executed, make DB backup - backup(ops[:backup], ops) - end + backup(ops[:backup], ops) begin - result = nil - i = 0 - timea = Time.now - while ( matches.size > 0 ) - if ( matches.size > 1 ) - raise "There are more than one file that match \ - \"#{RUBY_LIB_LOCATION}/onedb/#{ret[:version]}_to_*.rb\"" + # Upgrade shared (federation) tables, only for standalone and master + if !db_version[:is_slave] + puts + puts ">>> Running migrators for shared tables" + + dir_prefix = "#{RUBY_LIB_LOCATION}/onedb/shared" + + result = apply_migrators(dir_prefix, db_version[:version], ops) + + # Modify db_versioning table + if result != nil + @backend.update_db_version(db_version[:version]) + else + puts "Database already uses version #{db_version[:version]}" end - - file = matches[0] - - puts " > Running migrator #{file}" if ops[:verbose] - - time0 = Time.now - - load(file) - @backend.extend Migrator - result = @backend.up - - time1 = Time.now - - if !result - raise "Error while upgrading from #{ret[:version]} to " << - " #{@backend.db_version}" - end - - puts " > Done in #{"%0.02f" % (time1 - time0).to_s}s" if ops[:verbose] - puts "" if ops[:verbose] - - matches = Dir.glob( - "#{RUBY_LIB_LOCATION}/onedb/#{@backend.db_version}_to_*.rb") end + db_version = @backend.read_db_version + + # Upgrade local tables, for standalone, master, and slave + + puts + puts ">>> Running migrators for local tables" + + dir_prefix = "#{RUBY_LIB_LOCATION}/onedb/local" + + result = apply_migrators(dir_prefix, db_version[:local_version], ops) + # Modify db_versioning table if result != nil - @backend.update_db_version(ret[:version]) + @backend.update_local_db_version(db_version[:local_version]) else - puts "Database already uses version #{ret[:version]}" + puts "Database already uses version #{db_version[:local_version]}" end timeb = Time.now @@ -213,6 +201,45 @@ class OneDB end end + def apply_migrators(prefix, db_version, ops) + result = nil + i = 0 + + matches = Dir.glob("#{prefix}/#{db_version}_to_*.rb") + + while ( matches.size > 0 ) + if ( matches.size > 1 ) + raise "There are more than one file that match \ + \"#{prefix}/#{db_version}_to_*.rb\"" + end + + file = matches[0] + + puts " > Running migrator #{file}" if ops[:verbose] + + time0 = Time.now + + load(file) + @backend.extend Migrator + result = @backend.up + + time1 = Time.now + + if !result + raise "Error while upgrading from #{db_version} to " << + " #{@backend.db_version}" + end + + puts " > Done in #{"%0.02f" % (time1 - time0).to_s}s" if ops[:verbose] + puts "" if ops[:verbose] + + matches = Dir.glob( + "#{prefix}/#{@backend.db_version}_to_*.rb") + end + + return result + end + def fsck(ops) ret = @backend.read_db_version diff --git a/src/onedb/onedb_backend.rb b/src/onedb/onedb_backend.rb index ca7b1a7519..fb0e85fb4e 100644 --- a/src/onedb/onedb_backend.rb +++ b/src/onedb/onedb_backend.rb @@ -42,14 +42,19 @@ class OneDBBacKEnd ret[:comment] = row[:comment] end + ret[:local_version] = ret[:version] + ret[:local_timestamp] = ret[:timestamp] + ret[:local_comment] = ret[:comment] + ret[:is_slave] = false + begin @db.fetch("SELECT version, timestamp, comment, is_slave FROM "+ "local_db_versioning WHERE oid=(SELECT MAX(oid) "+ "FROM local_db_versioning)") do |row| - ret[:local_version] = row[:version] - ret[:local_timestamp] = row[:timestamp] - ret[:local_comment] = row[:comment] - ret[:is_slave] = row[:is_slave] + ret[:local_version] = row[:version] + ret[:local_timestamp] = row[:timestamp] + ret[:local_comment] = row[:comment] + ret[:is_slave] = row[:is_slave] end rescue Exception => e if e.class == Sequel::DatabaseConnectionError @@ -127,6 +132,37 @@ class OneDBBacKEnd puts comment end + def update_local_db_version(version) + comment = "Database migrated from #{version} to #{db_version}"+ + " (#{one_version}) by onedb command." + + max_oid = nil + @db.fetch("SELECT MAX(oid) FROM local_db_versioning") do |row| + max_oid = row[:"MAX(oid)"].to_i + end + + max_oid = 0 if max_oid.nil? + + is_slave = 0 + + @db.fetch("SELECT is_slave FROM local_db_versioning "<< + "WHERE oid=#{max_oid}") do |row| + is_slave = row[:is_slave] + end + + @db.run( + "INSERT INTO local_db_versioning (oid, version, timestamp, comment, is_slave) "<< + "VALUES (" << + "#{max_oid+1}, " << + "'#{db_version}', " << + "#{Time.new.to_i}, " << + "'#{comment}'," << + "#{is_slave})" + ) + + puts comment + end + def db() return @db end diff --git a/src/onedb/2.0_to_2.9.80.rb b/src/onedb/shared/2.0_to_2.9.80.rb similarity index 100% rename from src/onedb/2.0_to_2.9.80.rb rename to src/onedb/shared/2.0_to_2.9.80.rb diff --git a/src/onedb/2.9.80_to_2.9.85.rb b/src/onedb/shared/2.9.80_to_2.9.85.rb similarity index 100% rename from src/onedb/2.9.80_to_2.9.85.rb rename to src/onedb/shared/2.9.80_to_2.9.85.rb diff --git a/src/onedb/2.9.85_to_2.9.90.rb b/src/onedb/shared/2.9.85_to_2.9.90.rb similarity index 100% rename from src/onedb/2.9.85_to_2.9.90.rb rename to src/onedb/shared/2.9.85_to_2.9.90.rb diff --git a/src/onedb/2.9.90_to_3.0.0.rb b/src/onedb/shared/2.9.90_to_3.0.0.rb similarity index 100% rename from src/onedb/2.9.90_to_3.0.0.rb rename to src/onedb/shared/2.9.90_to_3.0.0.rb diff --git a/src/onedb/3.0.0_to_3.1.0.rb b/src/onedb/shared/3.0.0_to_3.1.0.rb similarity index 100% rename from src/onedb/3.0.0_to_3.1.0.rb rename to src/onedb/shared/3.0.0_to_3.1.0.rb diff --git a/src/onedb/3.1.0_to_3.1.80.rb b/src/onedb/shared/3.1.0_to_3.1.80.rb similarity index 100% rename from src/onedb/3.1.0_to_3.1.80.rb rename to src/onedb/shared/3.1.0_to_3.1.80.rb diff --git a/src/onedb/3.1.80_to_3.2.0.rb b/src/onedb/shared/3.1.80_to_3.2.0.rb similarity index 100% rename from src/onedb/3.1.80_to_3.2.0.rb rename to src/onedb/shared/3.1.80_to_3.2.0.rb diff --git a/src/onedb/3.2.0_to_3.2.1.rb b/src/onedb/shared/3.2.0_to_3.2.1.rb similarity index 100% rename from src/onedb/3.2.0_to_3.2.1.rb rename to src/onedb/shared/3.2.0_to_3.2.1.rb diff --git a/src/onedb/3.2.1_to_3.3.0.rb b/src/onedb/shared/3.2.1_to_3.3.0.rb similarity index 100% rename from src/onedb/3.2.1_to_3.3.0.rb rename to src/onedb/shared/3.2.1_to_3.3.0.rb diff --git a/src/onedb/3.3.0_to_3.3.80.rb b/src/onedb/shared/3.3.0_to_3.3.80.rb similarity index 100% rename from src/onedb/3.3.0_to_3.3.80.rb rename to src/onedb/shared/3.3.0_to_3.3.80.rb diff --git a/src/onedb/3.3.80_to_3.4.0.rb b/src/onedb/shared/3.3.80_to_3.4.0.rb similarity index 100% rename from src/onedb/3.3.80_to_3.4.0.rb rename to src/onedb/shared/3.3.80_to_3.4.0.rb diff --git a/src/onedb/3.4.0_to_3.4.1.rb b/src/onedb/shared/3.4.0_to_3.4.1.rb similarity index 100% rename from src/onedb/3.4.0_to_3.4.1.rb rename to src/onedb/shared/3.4.0_to_3.4.1.rb diff --git a/src/onedb/3.4.1_to_3.5.80.rb b/src/onedb/shared/3.4.1_to_3.5.80.rb similarity index 100% rename from src/onedb/3.4.1_to_3.5.80.rb rename to src/onedb/shared/3.4.1_to_3.5.80.rb diff --git a/src/onedb/3.5.80_to_3.6.0.rb b/src/onedb/shared/3.5.80_to_3.6.0.rb similarity index 100% rename from src/onedb/3.5.80_to_3.6.0.rb rename to src/onedb/shared/3.5.80_to_3.6.0.rb diff --git a/src/onedb/3.6.0_to_3.7.80.rb b/src/onedb/shared/3.6.0_to_3.7.80.rb similarity index 100% rename from src/onedb/3.6.0_to_3.7.80.rb rename to src/onedb/shared/3.6.0_to_3.7.80.rb diff --git a/src/onedb/3.7.80_to_3.8.0.rb b/src/onedb/shared/3.7.80_to_3.8.0.rb similarity index 100% rename from src/onedb/3.7.80_to_3.8.0.rb rename to src/onedb/shared/3.7.80_to_3.8.0.rb diff --git a/src/onedb/3.8.0_to_3.8.1.rb b/src/onedb/shared/3.8.0_to_3.8.1.rb similarity index 100% rename from src/onedb/3.8.0_to_3.8.1.rb rename to src/onedb/shared/3.8.0_to_3.8.1.rb diff --git a/src/onedb/3.8.1_to_3.8.2.rb b/src/onedb/shared/3.8.1_to_3.8.2.rb similarity index 100% rename from src/onedb/3.8.1_to_3.8.2.rb rename to src/onedb/shared/3.8.1_to_3.8.2.rb diff --git a/src/onedb/3.8.2_to_3.8.3.rb b/src/onedb/shared/3.8.2_to_3.8.3.rb similarity index 100% rename from src/onedb/3.8.2_to_3.8.3.rb rename to src/onedb/shared/3.8.2_to_3.8.3.rb diff --git a/src/onedb/3.8.3_to_3.8.4.rb b/src/onedb/shared/3.8.3_to_3.8.4.rb similarity index 100% rename from src/onedb/3.8.3_to_3.8.4.rb rename to src/onedb/shared/3.8.3_to_3.8.4.rb diff --git a/src/onedb/3.8.4_to_3.8.5.rb b/src/onedb/shared/3.8.4_to_3.8.5.rb similarity index 100% rename from src/onedb/3.8.4_to_3.8.5.rb rename to src/onedb/shared/3.8.4_to_3.8.5.rb diff --git a/src/onedb/3.8.5_to_3.9.80.rb b/src/onedb/shared/3.8.5_to_3.9.80.rb similarity index 100% rename from src/onedb/3.8.5_to_3.9.80.rb rename to src/onedb/shared/3.8.5_to_3.9.80.rb diff --git a/src/onedb/3.9.80_to_3.9.90.rb b/src/onedb/shared/3.9.80_to_3.9.90.rb similarity index 100% rename from src/onedb/3.9.80_to_3.9.90.rb rename to src/onedb/shared/3.9.80_to_3.9.90.rb diff --git a/src/onedb/3.9.90_to_4.0.0.rb b/src/onedb/shared/3.9.90_to_4.0.0.rb similarity index 100% rename from src/onedb/3.9.90_to_4.0.0.rb rename to src/onedb/shared/3.9.90_to_4.0.0.rb diff --git a/src/onedb/4.0.0_to_4.0.1.rb b/src/onedb/shared/4.0.0_to_4.0.1.rb similarity index 100% rename from src/onedb/4.0.0_to_4.0.1.rb rename to src/onedb/shared/4.0.0_to_4.0.1.rb diff --git a/src/onedb/4.0.1_to_4.1.80.rb b/src/onedb/shared/4.0.1_to_4.1.80.rb similarity index 100% rename from src/onedb/4.0.1_to_4.1.80.rb rename to src/onedb/shared/4.0.1_to_4.1.80.rb diff --git a/src/onedb/4.1.80_to_4.2.0.rb b/src/onedb/shared/4.1.80_to_4.2.0.rb similarity index 100% rename from src/onedb/4.1.80_to_4.2.0.rb rename to src/onedb/shared/4.1.80_to_4.2.0.rb diff --git a/src/onedb/4.2.0_to_4.3.80.rb b/src/onedb/shared/4.2.0_to_4.3.80.rb similarity index 100% rename from src/onedb/4.2.0_to_4.3.80.rb rename to src/onedb/shared/4.2.0_to_4.3.80.rb diff --git a/src/onedb/4.3.80_to_4.3.85.rb b/src/onedb/shared/4.3.80_to_4.3.85.rb similarity index 100% rename from src/onedb/4.3.80_to_4.3.85.rb rename to src/onedb/shared/4.3.80_to_4.3.85.rb diff --git a/src/onedb/4.3.85_to_4.3.90.rb b/src/onedb/shared/4.3.85_to_4.3.90.rb similarity index 100% rename from src/onedb/4.3.85_to_4.3.90.rb rename to src/onedb/shared/4.3.85_to_4.3.90.rb diff --git a/src/onedb/4.3.90_to_4.4.0.rb b/src/onedb/shared/4.3.90_to_4.4.0.rb similarity index 100% rename from src/onedb/4.3.90_to_4.4.0.rb rename to src/onedb/shared/4.3.90_to_4.4.0.rb diff --git a/src/onedb/4.4.0_to_4.4.1.rb b/src/onedb/shared/4.4.0_to_4.4.1.rb similarity index 100% rename from src/onedb/4.4.0_to_4.4.1.rb rename to src/onedb/shared/4.4.0_to_4.4.1.rb diff --git a/src/onedb/4.4.1_to_4.5.80.rb b/src/onedb/shared/4.4.1_to_4.5.80.rb similarity index 100% rename from src/onedb/4.4.1_to_4.5.80.rb rename to src/onedb/shared/4.4.1_to_4.5.80.rb From 8df6d07a48311a756b4f9dc2ee89e76c86b0b3c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Tue, 4 Mar 2014 11:34:43 +0100 Subject: [PATCH 67/80] Feature #2565: Fix bug in acl creation --- src/group/Group.cc | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/src/group/Group.cc b/src/group/Group.cc index afa9ab0df3..363e75a4eb 100644 --- a/src/group/Group.cc +++ b/src/group/Group.cc @@ -371,6 +371,11 @@ int Group::add_resource_provider(int zone_id, int cluster_id, string& error_msg) error_msg); + if (rc < 0) + { + NebulaLog::log("GROUP",Log::ERROR,error_msg); + } + // @ DATASTORE+NET/% USE # rc += aclm->add_rule( AclRule::GROUP_ID | @@ -387,9 +392,9 @@ int Group::add_resource_provider(int zone_id, int cluster_id, string& error_msg) error_msg); - if (rc != 0) + if (rc < 0) { - return -1; + NebulaLog::log("GROUP",Log::ERROR,error_msg); } return 0; @@ -436,6 +441,11 @@ int Group::del_resource_provider(int zone_id, int cluster_id, string& error_msg) error_msg); + if (rc < 0) + { + NebulaLog::log("GROUP",Log::ERROR,error_msg); + } + // @ DATASTORE+NET/% USE # rc += aclm->del_rule( AclRule::GROUP_ID | @@ -452,9 +462,9 @@ int Group::del_resource_provider(int zone_id, int cluster_id, string& error_msg) error_msg); - if (rc != 0) + if (rc < 0) { - return -1; + NebulaLog::log("GROUP",Log::ERROR,error_msg); } return 0; From 7577d6d6b9e1c73b9086d64584ee305023c084c1 Mon Sep 17 00:00:00 2001 From: "Ruben S. Montero" Date: Tue, 4 Mar 2014 11:42:44 +0100 Subject: [PATCH 68/80] feature #1798: Optionally reserve capacity from a host. Cluster defaults can be defined --- include/Cluster.h | 13 +++++++++++++ include/Host.h | 34 ++++++++++++++++++++++++++++++---- include/MonitorThread.h | 3 +++ src/host/Host.cc | 10 +++++++--- src/im/MonitorThread.cc | 29 ++++++++++++++++++++++++++--- 5 files changed, 79 insertions(+), 10 deletions(-) diff --git a/include/Cluster.h b/include/Cluster.h index 3513a56819..0f4ad2261b 100644 --- a/include/Cluster.h +++ b/include/Cluster.h @@ -170,6 +170,19 @@ public: return vnets.get_collection_copy(); } + /** + * Get the default reserved capacity for hosts in the cluster. It can be + * overridden if defined in the host template. + * @param cpu reserved cpu (in percentage) + * @param mem reserved mem (in KB) + */ + void get_reserved_capacity(long long &cpu, long long& mem) + { + get_template_attribute("RESERVED_CPU", cpu); + + get_template_attribute("RESERVED_MEM", mem); + } + // ************************************************************************* // DataBase implementation (Public) // ************************************************************************* diff --git a/include/Host.h b/include/Host.h index 35261e3f65..08bb75a6da 100644 --- a/include/Host.h +++ b/include/Host.h @@ -157,13 +157,17 @@ public: * @param with_vm_info if monitoring contains VM information * @param lost set of VMs that should be in the host and were not found * @param found VMs running in the host (as expected) and info. + * @param reserved_cpu from cluster defaults + * @param reserved_mem from cluster defaults * @return 0 on success **/ int update_info(Template &tmpl, bool &with_vm_info, set &lost, map &found, - const set &non_shared_ds); + const set &non_shared_ds, + long long reserved_cpu, + long long reserved_mem); /** * Extracts the DS attributes from the given template * @param parse_str string with values to be parsed @@ -268,11 +272,33 @@ public: return last_monitored; }; - // ------------------------------------------------------------------------ + /** + * Get the reserved capacity for this host. Parameters will be only updated + * if values are defined in the host. Reserved capacity will be subtracted + * from the Host total capacity. + * @param cpu reserved cpu (in percentage) + * @param mem reserved mem (in KB) + */ + void get_reserved_capacity(long long &cpu, long long& mem) + { + long long tcpu; + long long tmem; + + if (get_template_attribute("RESERVED_CPU", tcpu)) + { + cpu = tcpu; + } + + if (get_template_attribute("RESERVED_MEM", tmem)) + { + mem = tmem; + } + } + + // ------------------------------------------------------------------------- // Share functions. Returns the value associated with each host share // metric - // ------------------------------------------------------------------------ - + // ------------------------------------------------------------------------- long long get_share_running_vms() { return host_share.running_vms; diff --git a/include/MonitorThread.h b/include/MonitorThread.h index c8911d87f7..00d3145963 100644 --- a/include/MonitorThread.h +++ b/include/MonitorThread.h @@ -22,6 +22,7 @@ #include class HostPool; +class ClusterPool; class DatastorePool; class LifeCycleManager; @@ -53,6 +54,8 @@ private: // Pointers shared by all the MonitorThreads, init by MonitorThreadPool static HostPool * hpool; + static ClusterPool *cpool; + static DatastorePool * dspool; static LifeCycleManager *lcm; diff --git a/src/host/Host.cc b/src/host/Host.cc index 42a47b97db..8676c57b3e 100644 --- a/src/host/Host.cc +++ b/src/host/Host.cc @@ -238,7 +238,9 @@ int Host::update_info(Template &tmpl, bool &with_vm_info, set &lost, map &found, - const set &non_shared_ds) + const set &non_shared_ds, + long long reserved_cpu, + long long reserved_mem) { VectorAttribute* vatt; vector::iterator it; @@ -289,10 +291,12 @@ int Host::update_info(Template &tmpl, if (isEnabled()) { + get_reserved_capacity(reserved_cpu, reserved_mem); + erase_template_attribute("TOTALCPU", val); - host_share.max_cpu = val; + host_share.max_cpu = val - reserved_cpu; erase_template_attribute("TOTALMEMORY", val); - host_share.max_mem = val; + host_share.max_mem = val - reserved_mem; erase_template_attribute("DS_LOCATION_TOTAL_MB", val); host_share.max_disk = val; diff --git a/src/im/MonitorThread.cc b/src/im/MonitorThread.cc index bfd4f561bb..921133fcbf 100644 --- a/src/im/MonitorThread.cc +++ b/src/im/MonitorThread.cc @@ -36,6 +36,8 @@ LifeCycleManager * MonitorThread::lcm; MonitorThreadPool * MonitorThread::mthpool; +ClusterPool * MonitorThread::cpool; + /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ @@ -94,7 +96,7 @@ void MonitorThread::do_message() } // ------------------------------------------------------------------------- - // Get DS Information from Moniroting Information + // Get DS Information from Moniroting Information & Reserved Capacity // ------------------------------------------------------------------------- map datastores; map::iterator itm; @@ -104,7 +106,13 @@ void MonitorThread::do_message() set non_shared_ds; - int rc = host->extract_ds_info(*hinfo, tmpl, datastores); + int rc = host->extract_ds_info(*hinfo, tmpl, datastores); + + int cid = host->get_cluster_id(); + + long long reserved_cpu = 0; + + long long reserved_mem = 0; delete hinfo; @@ -115,6 +123,18 @@ void MonitorThread::do_message() return; } + if (cid != -1) + { + Cluster *cluster = cpool->get(cid, true); + + if (cluster != 0) + { + cluster->get_reserved_capacity(reserved_cpu, reserved_mem); + + cluster->unlock(); + } + } + for (itm = datastores.begin(); itm != datastores.end(); itm++) { ds = dspool->get(itm->first, true); @@ -170,7 +190,8 @@ void MonitorThread::do_message() return; } - rc = host->update_info(tmpl, vm_poll, lost, found, non_shared_ds); + rc = host->update_info(tmpl, vm_poll, lost, found, non_shared_ds, + reserved_cpu, reserved_mem); hpool->update(host); @@ -220,6 +241,8 @@ MonitorThreadPool::MonitorThreadPool(int max_thr):concurrent_threads(max_thr), MonitorThread::lcm = Nebula::instance().get_lcm(); + MonitorThread::cpool = Nebula::instance().get_clpool(); + MonitorThread::mthpool= this; //Initialize concurrency variables From 050c445cc3b03c556c19c164adceb16818ddaff3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Tue, 4 Mar 2014 11:46:51 +0100 Subject: [PATCH 69/80] Set same defaults for onezone list in code and yaml file --- src/cli/one_helper/onezone_helper.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cli/one_helper/onezone_helper.rb b/src/cli/one_helper/onezone_helper.rb index f854201794..e393355756 100644 --- a/src/cli/one_helper/onezone_helper.rb +++ b/src/cli/one_helper/onezone_helper.rb @@ -43,11 +43,11 @@ class OneZoneHelper < OpenNebulaHelper::OneHelper d["NAME"] end - column :ENDPOINT, "Endpoint of the Zone", :left, :size=>50 do |d| + column :ENDPOINT, "Endpoint of the Zone", :left, :size=>45 do |d| d["TEMPLATE"]['ENDPOINT'] end - default :ID, :NAME, :ENDPOINT + default :CURRENT, :ID, :NAME, :ENDPOINT end table From f8977e26c3c64ee88b3c647ac1f8c49917ae2f00 Mon Sep 17 00:00:00 2001 From: "Ruben S. Montero" Date: Tue, 4 Mar 2014 12:02:08 +0100 Subject: [PATCH 70/80] feature #1798: Get rid of HYPERVISOR_MEM as resources can be now limited host or cluster-wise --- src/scheduler/etc/sched.conf | 5 ----- src/scheduler/include/HostPoolXML.h | 5 +---- src/scheduler/include/HostXML.h | 11 ----------- src/scheduler/include/Scheduler.h | 6 ------ src/scheduler/src/pool/HostXML.cc | 7 ------- src/scheduler/src/sched/Scheduler.cc | 4 +--- src/scheduler/src/sched/SchedulerTemplate.cc | 7 ------- 7 files changed, 2 insertions(+), 43 deletions(-) diff --git a/src/scheduler/etc/sched.conf b/src/scheduler/etc/sched.conf index 05e5897b37..0156ca8bbb 100644 --- a/src/scheduler/etc/sched.conf +++ b/src/scheduler/etc/sched.conf @@ -22,9 +22,6 @@ # # LIVE_RESCHEDS: Perform live (1) or cold migrations (0) when rescheduling a VM # -# HYPERVISOR_MEM: Fraction of total MEMORY reserved for the hypervisor. -# E.g. 0.1 means that only 90% of the total MEMORY will be used -# # DEFAULT_SCHED: Definition of the default scheduling algorithm # - policy: # 0 = Packing. Heuristic that minimizes the number of hosts in use by @@ -71,8 +68,6 @@ MAX_HOST = 1 LIVE_RESCHEDS = 0 -HYPERVISOR_MEM = 0.1 - DEFAULT_SCHED = [ policy = 1 ] diff --git a/src/scheduler/include/HostPoolXML.h b/src/scheduler/include/HostPoolXML.h index d2378b1a9e..49a8e3dd86 100644 --- a/src/scheduler/include/HostPoolXML.h +++ b/src/scheduler/include/HostPoolXML.h @@ -28,10 +28,7 @@ class HostPoolXML : public PoolXML { public: - HostPoolXML(Client* client, float mem):PoolXML(client) - { - HostXML::set_hypervisor_mem(mem); - }; + HostPoolXML(Client* client):PoolXML(client) {}; ~HostPoolXML(){}; diff --git a/src/scheduler/include/HostXML.h b/src/scheduler/include/HostXML.h index 3aabe433f4..c9f5bc0dd2 100644 --- a/src/scheduler/include/HostXML.h +++ b/src/scheduler/include/HostXML.h @@ -113,15 +113,6 @@ public: */ int search(const char *name, int& value); - /** - * Sets the memory fraction reserved for the hypervisor. This function - * should be called before using the host pool. - */ - static void set_hypervisor_mem(float mem) - { - hypervisor_mem = 1.0 - mem; - }; - /** * Checks if the host is a remote public cloud * @return true if the host is a remote public cloud @@ -151,8 +142,6 @@ private: bool public_cloud; // Configuration attributes - static float hypervisor_mem; /**< Fraction of memory for the VMs */ - static const char *host_paths[]; /**< paths for search function */ static int host_num_paths; /**< number of paths*/ diff --git a/src/scheduler/include/Scheduler.h b/src/scheduler/include/Scheduler.h index 1a659f28d0..f05131c863 100644 --- a/src/scheduler/include/Scheduler.h +++ b/src/scheduler/include/Scheduler.h @@ -60,7 +60,6 @@ protected: machines_limit(0), dispatch_limit(0), host_dispatch_limit(0), - hypervisor_mem(0), client(0) { am.addListener(this); @@ -171,11 +170,6 @@ private: */ unsigned int host_dispatch_limit; - /** - * Memory reserved for the hypervisor - */ - float hypervisor_mem; - /** * OpenNebula zone id. */ diff --git a/src/scheduler/src/pool/HostXML.cc b/src/scheduler/src/pool/HostXML.cc index 748a2617fb..5fc38f6941 100644 --- a/src/scheduler/src/pool/HostXML.cc +++ b/src/scheduler/src/pool/HostXML.cc @@ -22,9 +22,6 @@ /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ - -float HostXML::hypervisor_mem; - int HostXML::host_num_paths = 4; const char *HostXML::host_paths[] = { @@ -51,10 +48,6 @@ void HostXML::init_attributes() running_vms = atoll(((*this)["/HOST/HOST_SHARE/RUNNING_VMS"])[0].c_str()); - //Reserve memory for the hypervisor - max_mem = static_cast(hypervisor_mem * static_cast(max_mem)); - - public_cloud = false; vector public_cloud_vector = (*this)["/HOST/TEMPLATE/PUBLIC_CLOUD"]; diff --git a/src/scheduler/src/sched/Scheduler.cc b/src/scheduler/src/sched/Scheduler.cc index 608231dcab..04dea96140 100644 --- a/src/scheduler/src/sched/Scheduler.cc +++ b/src/scheduler/src/sched/Scheduler.cc @@ -122,8 +122,6 @@ void Scheduler::start() conf.get("LIVE_RESCHEDS", live_rescheds); - conf.get("HYPERVISOR_MEM", hypervisor_mem); - // ----------------------------------------------------------- // Log system & Configuration File // ----------------------------------------------------------- @@ -285,7 +283,7 @@ void Scheduler::start() // Pools // ------------------------------------------------------------------------- - hpool = new HostPoolXML(client, hypervisor_mem); + hpool = new HostPoolXML(client); clpool = new ClusterPoolXML(client); vmpool = new VirtualMachinePoolXML(client,machines_limit,(live_rescheds==1)); diff --git a/src/scheduler/src/sched/SchedulerTemplate.cc b/src/scheduler/src/sched/SchedulerTemplate.cc index e4d32e91e8..c1e9fa9a7b 100644 --- a/src/scheduler/src/sched/SchedulerTemplate.cc +++ b/src/scheduler/src/sched/SchedulerTemplate.cc @@ -44,7 +44,6 @@ void SchedulerTemplate::set_conf_default() # DEFAULT_SCHED # DEFAULT_DS_SCHED # LIVE_RESCHEDS -# HYPERVISOR_MEM # LOG #------------------------------------------------------------------------------- */ @@ -104,12 +103,6 @@ void SchedulerTemplate::set_conf_default() vattribute = new VectorAttribute("DEFAULT_DS_SCHED",vvalue); conf_default.insert(make_pair(vattribute->name(),vattribute)); - //HYPERVISOR_MEM - value = "0.1"; - - attribute = new SingleAttribute("HYPERVISOR_MEM",value); - conf_default.insert(make_pair(attribute->name(),attribute)); - //LOG CONFIGURATION vvalue.clear(); vvalue.insert(make_pair("SYSTEM","file")); From 68a7909348176c5d1f2b1a3c6d563c0facbbf209 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Tue, 4 Mar 2014 12:49:42 +0100 Subject: [PATCH 71/80] Change Nebula.h version for bump version script --- include/Nebula.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/Nebula.h b/include/Nebula.h index eb34014bd5..ae53bd5103 100644 --- a/include/Nebula.h +++ b/include/Nebula.h @@ -363,7 +363,7 @@ public: */ static string version() { - return "OpenNebula 4.5.0"; + return "OpenNebula " + code_version(); }; /** @@ -372,7 +372,7 @@ public: */ static string code_version() { - return "4.5.0"; + return "4.5.0"; // bump version } /** From 03f577bbeb5cf1a8ea98e95038bdbdd87beaae13 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Tue, 4 Mar 2014 12:53:44 +0100 Subject: [PATCH 72/80] Bump version --- include/Nebula.h | 2 +- include/Zone.h | 2 +- include/ZonePool.h | 2 +- share/rubygems/generate | 2 +- share/scripts/context-packages/generate.sh | 2 +- src/cli/one_helper/onezone_helper.rb | 2 +- src/cli/onezone | 2 +- src/cloud/common/CloudClient.rb | 2 +- src/im_mad/remotes/VERSION | 2 +- src/oca/java/src/org/opennebula/client/OneSystem.java | 2 +- src/oca/ruby/opennebula.rb | 2 +- src/oca/ruby/opennebula/zone.rb | 2 +- src/oca/ruby/opennebula/zone_pool.rb | 2 +- src/sunstone/models/OpenNebulaJSON/ZoneJSON.rb | 2 +- src/sunstone/public/js/plugins/zones-tab.js | 2 +- src/sunstone/views/index.erb | 2 +- src/sunstone/views/login.erb | 2 +- src/um/QuotasSQL.cc | 2 +- src/zone/SConstruct | 2 +- src/zone/Zone.cc | 2 +- src/zone/ZonePool.cc | 2 +- 21 files changed, 21 insertions(+), 21 deletions(-) diff --git a/include/Nebula.h b/include/Nebula.h index ae53bd5103..130b836de2 100644 --- a/include/Nebula.h +++ b/include/Nebula.h @@ -372,7 +372,7 @@ public: */ static string code_version() { - return "4.5.0"; // bump version + return "4.5.80"; // bump version } /** diff --git a/include/Zone.h b/include/Zone.h index 85a35b9442..5b39a011e0 100644 --- a/include/Zone.h +++ b/include/Zone.h @@ -1,5 +1,5 @@ /* ------------------------------------------------------------------------ */ -/* Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs */ +/* Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs */ /* */ /* Licensed under the Apache License, Version 2.0 (the "License"); you may */ /* not use this file except in compliance with the License. You may obtain */ diff --git a/include/ZonePool.h b/include/ZonePool.h index caa0e8de3c..9d9c811437 100644 --- a/include/ZonePool.h +++ b/include/ZonePool.h @@ -1,5 +1,5 @@ /* -------------------------------------------------------------------------- */ -/* Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs */ +/* Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs */ /* */ /* Licensed under the Apache License, Version 2.0 (the "License"); you may */ /* not use this file except in compliance with the License. You may obtain */ diff --git a/share/rubygems/generate b/share/rubygems/generate index af1520fb93..69de4fabdb 100755 --- a/share/rubygems/generate +++ b/share/rubygems/generate @@ -21,7 +21,7 @@ require 'tmpdir' DEFAULTS={ - :version => "4.5.0", + :version => "4.5.80", :date => Time.now.strftime("%Y-%m-%d"), :dependencies => [] } diff --git a/share/scripts/context-packages/generate.sh b/share/scripts/context-packages/generate.sh index a114c397bb..703b302ad2 100755 --- a/share/scripts/context-packages/generate.sh +++ b/share/scripts/context-packages/generate.sh @@ -16,7 +16,7 @@ # limitations under the License. # #--------------------------------------------------------------------------- # -VERSION=${VERSION:-4.5.0} +VERSION=${VERSION:-4.5.80} MAINTAINER=${MAINTAINER:-C12G Labs } LICENSE=${LICENSE:-Apache 2.0} PACKAGE_NAME=${PACKAGE_NAME:-one-context} diff --git a/src/cli/one_helper/onezone_helper.rb b/src/cli/one_helper/onezone_helper.rb index e393355756..8a8fe39da2 100644 --- a/src/cli/one_helper/onezone_helper.rb +++ b/src/cli/one_helper/onezone_helper.rb @@ -1,5 +1,5 @@ # -------------------------------------------------------------------------- # -# Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs # +# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs # # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # # not use this file except in compliance with the License. You may obtain # diff --git a/src/cli/onezone b/src/cli/onezone index 2b196d0cb7..24b1ec5c46 100755 --- a/src/cli/onezone +++ b/src/cli/onezone @@ -1,7 +1,7 @@ #!/usr/bin/env ruby # -------------------------------------------------------------------------- # -# Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs # +# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs # # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # # not use this file except in compliance with the License. You may obtain # diff --git a/src/cloud/common/CloudClient.rb b/src/cloud/common/CloudClient.rb index 555e6a308e..f915681438 100644 --- a/src/cloud/common/CloudClient.rb +++ b/src/cloud/common/CloudClient.rb @@ -50,7 +50,7 @@ end module CloudClient # OpenNebula version - VERSION = '4.5.0' + VERSION = '4.5.80' # ######################################################################### # Default location for the authentication file diff --git a/src/im_mad/remotes/VERSION b/src/im_mad/remotes/VERSION index ae153944ee..a6517be9b3 100644 --- a/src/im_mad/remotes/VERSION +++ b/src/im_mad/remotes/VERSION @@ -1 +1 @@ -4.5.0 \ No newline at end of file +4.5.80 \ No newline at end of file diff --git a/src/oca/java/src/org/opennebula/client/OneSystem.java b/src/oca/java/src/org/opennebula/client/OneSystem.java index a6413fa309..173903d34a 100644 --- a/src/oca/java/src/org/opennebula/client/OneSystem.java +++ b/src/oca/java/src/org/opennebula/client/OneSystem.java @@ -32,7 +32,7 @@ public class OneSystem private static final String GROUP_QUOTA_INFO = "groupquota.info"; private static final String GROUP_QUOTA_UPDATE = "groupquota.update"; - public static final String VERSION = "4.5.0"; + public static final String VERSION = "4.5.80"; public OneSystem(Client client) { diff --git a/src/oca/ruby/opennebula.rb b/src/oca/ruby/opennebula.rb index bb23d3163a..0639799180 100644 --- a/src/oca/ruby/opennebula.rb +++ b/src/oca/ruby/opennebula.rb @@ -56,5 +56,5 @@ require 'opennebula/system' module OpenNebula # OpenNebula version - VERSION = '4.5.0' + VERSION = '4.5.80' end diff --git a/src/oca/ruby/opennebula/zone.rb b/src/oca/ruby/opennebula/zone.rb index 2e371aedd0..483265304d 100644 --- a/src/oca/ruby/opennebula/zone.rb +++ b/src/oca/ruby/opennebula/zone.rb @@ -1,5 +1,5 @@ # -------------------------------------------------------------------------- # -# Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs # +# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs # # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # # not use this file except in compliance with the License. You may obtain # diff --git a/src/oca/ruby/opennebula/zone_pool.rb b/src/oca/ruby/opennebula/zone_pool.rb index dc929ce04c..d29174a35d 100644 --- a/src/oca/ruby/opennebula/zone_pool.rb +++ b/src/oca/ruby/opennebula/zone_pool.rb @@ -1,5 +1,5 @@ # -------------------------------------------------------------------------- # -# Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs # +# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs # # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # # not use this file except in compliance with the License. You may obtain # diff --git a/src/sunstone/models/OpenNebulaJSON/ZoneJSON.rb b/src/sunstone/models/OpenNebulaJSON/ZoneJSON.rb index 181dd3d08c..6e26ed004d 100644 --- a/src/sunstone/models/OpenNebulaJSON/ZoneJSON.rb +++ b/src/sunstone/models/OpenNebulaJSON/ZoneJSON.rb @@ -1,5 +1,5 @@ # -------------------------------------------------------------------------- # -# Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs # +# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs # # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # # not use this file except in compliance with the License. You may obtain # diff --git a/src/sunstone/public/js/plugins/zones-tab.js b/src/sunstone/public/js/plugins/zones-tab.js index 12c9927fc2..50df67ad23 100644 --- a/src/sunstone/public/js/plugins/zones-tab.js +++ b/src/sunstone/public/js/plugins/zones-tab.js @@ -1,5 +1,5 @@ /* -------------------------------------------------------------------------- */ -/* Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs */ +/* Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs */ /* */ /* Licensed under the Apache License, Version 2.0 (the "License"); you may */ /* not use this file except in compliance with the License. You may obtain */ diff --git a/src/sunstone/views/index.erb b/src/sunstone/views/index.erb index 3991beeb67..f39cf7ef8e 100644 --- a/src/sunstone/views/index.erb +++ b/src/sunstone/views/index.erb @@ -88,7 +88,7 @@
diff --git a/src/sunstone/views/login.erb b/src/sunstone/views/login.erb index 2f1921db7d..b04a3720df 100644 --- a/src/sunstone/views/login.erb +++ b/src/sunstone/views/login.erb @@ -26,7 +26,7 @@ diff --git a/src/um/QuotasSQL.cc b/src/um/QuotasSQL.cc index b1feaa1975..2e9f950d69 100644 --- a/src/um/QuotasSQL.cc +++ b/src/um/QuotasSQL.cc @@ -1,5 +1,5 @@ /* -------------------------------------------------------------------------- */ -/* Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs */ +/* Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs */ /* */ /* Licensed under the Apache License, Version 2.0 (the "License"); you may */ /* not use this file except in compliance with the License. You may obtain */ diff --git a/src/zone/SConstruct b/src/zone/SConstruct index d646c1953e..c5f2fcfd56 100644 --- a/src/zone/SConstruct +++ b/src/zone/SConstruct @@ -1,7 +1,7 @@ # SConstruct for src/zone # -------------------------------------------------------------------------- # -# Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs # +# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs # # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # # not use this file except in compliance with the License. You may obtain # diff --git a/src/zone/Zone.cc b/src/zone/Zone.cc index 2796294b09..82aea0b5ee 100644 --- a/src/zone/Zone.cc +++ b/src/zone/Zone.cc @@ -1,5 +1,5 @@ /* ------------------------------------------------------------------------ */ -/* Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs */ +/* Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs */ /* */ /* Licensed under the Apache License, Version 2.0 (the "License"); you may */ /* not use this file except in compliance with the License. You may obtain */ diff --git a/src/zone/ZonePool.cc b/src/zone/ZonePool.cc index d9aa6f504d..aa001448ca 100644 --- a/src/zone/ZonePool.cc +++ b/src/zone/ZonePool.cc @@ -1,5 +1,5 @@ /* -------------------------------------------------------------------------- */ -/* Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs */ +/* Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs */ /* */ /* Licensed under the Apache License, Version 2.0 (the "License"); you may */ /* not use this file except in compliance with the License. You may obtain */ From cee9188aaac3b12daa2c36f326a45c44adede62b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Tue, 4 Mar 2014 12:00:15 +0100 Subject: [PATCH 73/80] Bug #2735: Fix bug in import-slave --- src/onedb/import_slave.rb | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/onedb/import_slave.rb b/src/onedb/import_slave.rb index 5f01d80fea..c22c1fcc45 100644 --- a/src/onedb/import_slave.rb +++ b/src/onedb/import_slave.rb @@ -558,12 +558,9 @@ EOT end ######################################################################## - # Init slave_db_versioning table + # Cleanup shared tables form slave DB ######################################################################## - @slave_db.run "CREATE TABLE local_db_versioning (oid INTEGER PRIMARY KEY, version VARCHAR(256), timestamp INTEGER, comment VARCHAR(256), is_slave BOOLEAN);" - @slave_db.run "INSERT INTO local_db_versioning VALUES(0,'#{LOCAL_VERSION}',#{Time.now.to_i},'onedb import tool',1);" - @slave_db.run "DROP TABLE old_document_pool;" @slave_db.run "DROP TABLE old_image_pool;" @slave_db.run "DROP TABLE old_network_pool;" From fce451fc72fd57756a8294780c6c1c14e7209551 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Tue, 4 Mar 2014 12:05:41 +0100 Subject: [PATCH 74/80] Feature #2763: Add group/template to onedb import-slave --- src/onedb/import_slave.rb | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/onedb/import_slave.rb b/src/onedb/import_slave.rb index c22c1fcc45..1f7d3ab535 100644 --- a/src/onedb/import_slave.rb +++ b/src/onedb/import_slave.rb @@ -387,6 +387,20 @@ EOT end end + slave_template = slave_doc.root.at_xpath("TEMPLATE") + master_template = master_doc.root.at_xpath("TEMPLATE") + + # Avoid duplicated template attributes, removing + # them from the slave template + master_template.children.each do |e| + if slave_template.at_xpath(e.name) + slave_template.at_xpath(e.name).remove + end + end + + # Add slave template attributes to master template + master_template << slave_template.children + @db[:group_pool].where(:oid => new_group[:oid]).update( :body => master_doc.root.to_s) else From d2b97922401a0a2a10341770a4dd8939ed74b710 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Tue, 4 Mar 2014 15:32:03 +0100 Subject: [PATCH 75/80] Bug #2735: Translate zone id in resource providers and acl rules --- src/onedb/import_slave.rb | 88 ++++++++++++++++++++++++++++----------- 1 file changed, 63 insertions(+), 25 deletions(-) diff --git a/src/onedb/import_slave.rb b/src/onedb/import_slave.rb index 1f7d3ab535..ad21d66920 100644 --- a/src/onedb/import_slave.rb +++ b/src/onedb/import_slave.rb @@ -14,8 +14,6 @@ # limitations under the License. # #--------------------------------------------------------------------------- # -ONE_LOCATION = ENV["ONE_LOCATION"] - if !ONE_LOCATION LOG_LOCATION = "/var/log/one" else @@ -401,6 +399,14 @@ EOT # Add slave template attributes to master template master_template << slave_template.children + # Merge resource providers + slave_doc.root.xpath("RESOURCE_PROVIDER").each do |elem| + # Zone ID must be 0, will be changed to the target ID + elem.at_xpath("ZONE_ID").content = zone_id + + master_doc.root << elem + end + @db[:group_pool].where(:oid => new_group[:oid]).update( :body => master_doc.root.to_s) else @@ -419,6 +425,12 @@ EOT slave_doc.root.add_child(new_elem) + # Update resource providers + slave_doc.root.xpath("RESOURCE_PROVIDER").each do |elem| + # Zone ID must be 0, will be changed to the target ID + elem.at_xpath("ZONE_ID").content = zone_id + end + @db[:group_pool].insert( :oid => new_group[:oid], :name => new_group[:name], @@ -516,6 +528,19 @@ EOT ((row[:resource] & 0xFFFFFFFF00000000) | groups[gid][:oid]) end + elsif ( (row[:resource] & Acl::RESOURCES["GROUP"]) == Acl::RESOURCES["GROUP"] && + (row[:resource] & Acl::USERS["UID"]) == Acl::USERS["UID"] ) + + gid = (row[:resource] & 0xFFFFFFFF) + + if (groups[gid].nil?) + insert = false + error_str = "Group ##{gid} does not exist" + else + new_resource = + ((row[:resource] & 0xFFFFFFFF00000000) | groups[gid][:oid]) + end + elsif ( (row[:resource] & Acl::RESOURCES["USER"]) == Acl::RESOURCES["USER"] && (row[:resource] & Acl::USERS["UID"]) == Acl::USERS["UID"] ) @@ -531,6 +556,19 @@ EOT end + if ( (row[:resource] & Acl::RESOURCES["ZONE"]) == Acl::RESOURCES["ZONE"] && + (row[:resource] & Acl::USERS["UID"]) == Acl::USERS["UID"] ) + + zid = (row[:resource] & 0xFFFFFFFF) + + if (zid != 0) + insert = false + error_str = "Zone ##{zid} is unknown for the slave" + else + new_resource = (Acl::USERS["UID"] | zone_id) + end + end + if ( (row[:zone] & Acl::USERS["UID"]) == Acl::USERS["UID"] ) zid = (row[:zone] & 0xFFFFFFFF) @@ -542,31 +580,31 @@ EOT end end - if (!insert) + # Avoid duplicated ACL rules + @db.fetch("SELECT oid FROM acl WHERE "<< + "user = #{new_user} AND resource = #{new_resource} "<< + "AND rights = #{row[:rights]} AND "<< + "zone = #{new_zone}") do |acl_row| + + insert = false + error_str = "the same Rule exists with ID ##{acl_row[:oid]}" + end + + + if (insert) + last_acl_oid += 1 + + log("Slave DB ACL Rule ##{row[:oid]} imported with ID ##{last_acl_oid}") + + @db[:acl].insert( + :oid => last_acl_oid, + :user => new_user, + :resource => new_resource, + :rights => row[:rights], + :zone => new_zone) + else log("Slave DB ACL Rule ##{row[:oid]} will not be "<< "imported to the master DB, " << error_str) - else - # Avoid duplicated ACL rules - @db.fetch("SELECT oid FROM acl WHERE "<< - "user = #{new_user} AND resource = #{new_resource} "<< - "AND rights = #{row[:rights]} AND "<< - "zone = #{row[:zone]}") do |acl_row| - - insert = false - end - - if (insert) - last_acl_oid += 1 - - log("New ACL Rule imported with ID ##{last_acl_oid}") - - @db[:acl].insert( - :oid => last_acl_oid, - :user => new_user, - :resource => new_resource, - :rights => row[:rights], - :zone => new_zone) - end end end end From 91bb93157baef6fa57a73bf9f77a626f484dd950 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Tue, 4 Mar 2014 15:44:15 +0100 Subject: [PATCH 76/80] Bug #2712: Fix shell characters for mysql commands in onedb. Thanks to Nicolas Belan for the patch. --- src/onedb/onedb_backend.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/onedb/onedb_backend.rb b/src/onedb/onedb_backend.rb index fb0e85fb4e..653cfa4e14 100644 --- a/src/onedb/onedb_backend.rb +++ b/src/onedb/onedb_backend.rb @@ -233,7 +233,7 @@ class BackEndMySQL < OneDBBacKEnd end def backup(bck_file) - cmd = "mysqldump -u #{@user} -p#{@passwd} -h #{@server} " + + cmd = "mysqldump -u #{@user} -p'#{@passwd}' -h #{@server} " + "-P #{@port} #{@db_name} > #{bck_file}" rc = system(cmd) @@ -255,7 +255,7 @@ class BackEndMySQL < OneDBBacKEnd " use -f to overwrite." end - mysql_cmd = "mysql -u #{@user} -p#{@passwd} -h #{@server} -P #{@port} " + mysql_cmd = "mysql -u #{@user} -p'#{@passwd}' -h #{@server} -P #{@port} " drop_cmd = mysql_cmd + "-e 'DROP DATABASE IF EXISTS #{@db_name};'" rc = system(drop_cmd) From 0a3ba8aca91622feaf6f690fa1ebe266ef653bc7 Mon Sep 17 00:00:00 2001 From: JensHoffmann Date: Wed, 11 Dec 2013 22:40:01 +0100 Subject: [PATCH 77/80] Bug #2503: Improve setting of password/auth_driver UserChangePassword followed this strategy: a1. Eventually encrypt user password if the users auth_driver is CORE_AUTH a2. Set (probably encrypted) password with User::set_password a3. User::set_password tries to validate (probably encrypted) password instead of the raw password UserChangeAuth did something similar: b1. If password is given (not empty) do a1 through a3 b2. Set auth_driver The change proposes the following: * In set_password: 1. Validate the raw password 2. Do encryption if needed * In UserChangePassword: simply call set_password * In UserChangeAuth: set auth_driver *before* calling set_password, such that set_password does the right thing if the auth_driver changes Note: I needed to move the implementation of set_password from User.h to User.cc since it seems impossible to access UserPool::CORE_AUTH from within User.h. --- include/User.h | 18 ++---------------- src/rm/RequestManagerUser.cc | 21 +++------------------ src/um/User.cc | 26 ++++++++++++++++++++++++++ 3 files changed, 31 insertions(+), 34 deletions(-) diff --git a/include/User.h b/include/User.h index 58d170f3a5..38c105d421 100644 --- a/include/User.h +++ b/include/User.h @@ -21,6 +21,7 @@ #include "UserTemplate.h" #include "ObjectCollection.h" #include "QuotasSQL.h" +#include "NebulaUtil.h" class UserQuotas; @@ -112,22 +113,7 @@ public: * @param error_str Returns the error reason, if any * @returns -1 if the password is not valid */ - int set_password(const string& passwd, string& error_str) - { - int rc = 0; - - if (pass_is_valid(passwd, error_str)) - { - password = passwd; - invalidate_session(); - } - else - { - rc = -1; - } - - return rc; - }; + int set_password(const string& passwd, string& error_str); /** * Returns user password diff --git a/src/rm/RequestManagerUser.cc b/src/rm/RequestManagerUser.cc index 03ab3f6316..e6ffa49635 100644 --- a/src/rm/RequestManagerUser.cc +++ b/src/rm/RequestManagerUser.cc @@ -15,7 +15,6 @@ /* -------------------------------------------------------------------------- */ #include "RequestManagerUser.h" -#include "NebulaUtil.h" using namespace std; @@ -70,11 +69,6 @@ int UserChangePassword::user_action(int user_id, return -1; } - if (user->get_auth_driver() == UserPool::CORE_AUTH) - { - new_pass = one_util::sha1_digest(new_pass); - } - int rc = user->set_password(new_pass, error_str); if ( rc == 0 ) @@ -125,22 +119,13 @@ int UserChangeAuth::user_action(int user_id, return -1; } - if ( !new_pass.empty() ) - { - if ( new_auth == UserPool::CORE_AUTH) - { - new_pass = one_util::sha1_digest(new_pass); - } + rc = user->set_auth_driver(new_auth, error_str); - // The password may be invalid, try to change it first + if ( rc == 0 && !new_pass.empty() ) + { rc = user->set_password(new_pass, error_str); } - if ( rc == 0 ) - { - rc = user->set_auth_driver(new_auth, error_str); - } - if ( rc == 0 ) { pool->update(user); diff --git a/src/um/User.cc b/src/um/User.cc index 6376d9bb0d..77e68ccc0a 100644 --- a/src/um/User.cc +++ b/src/um/User.cc @@ -338,6 +338,32 @@ int User::split_secret(const string secret, string& user, string& pass) /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ +int User::set_password(const string& passwd, string& error_str) +{ + int rc = 0; + + if (pass_is_valid(passwd, error_str)) + { + if (auth_driver == UserPool::CORE_AUTH) + { + password = one_util::sha1_digest(passwd); + } + else + { + password = passwd; + } + + invalidate_session(); + } + else + { + rc = -1; + } + + return rc; +}; + + bool User::pass_is_valid(const string& pass, string& error_str) { if ( pass.empty() ) From 2e8dc65fd67ad9ecaae75b1098442bbf13a6b61d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Tue, 4 Mar 2014 17:09:04 +0100 Subject: [PATCH 78/80] Bug #2503: Add rollback in case of error --- include/User.h | 1 - src/rm/RequestManagerUser.cc | 9 +++++++++ src/um/User.cc | 5 ++++- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/include/User.h b/include/User.h index 38c105d421..28df797816 100644 --- a/include/User.h +++ b/include/User.h @@ -21,7 +21,6 @@ #include "UserTemplate.h" #include "ObjectCollection.h" #include "QuotasSQL.h" -#include "NebulaUtil.h" class UserQuotas; diff --git a/src/rm/RequestManagerUser.cc b/src/rm/RequestManagerUser.cc index e6ffa49635..0a98d057ae 100644 --- a/src/rm/RequestManagerUser.cc +++ b/src/rm/RequestManagerUser.cc @@ -119,11 +119,20 @@ int UserChangeAuth::user_action(int user_id, return -1; } + string old_auth = user->get_auth_driver(); + rc = user->set_auth_driver(new_auth, error_str); if ( rc == 0 && !new_pass.empty() ) { rc = user->set_password(new_pass, error_str); + + if (rc != 0) + { + string tmp_str; + + user->set_auth_driver(old_auth, tmp_str); + } } if ( rc == 0 ) diff --git a/src/um/User.cc b/src/um/User.cc index 77e68ccc0a..2677be0b15 100644 --- a/src/um/User.cc +++ b/src/um/User.cc @@ -25,7 +25,7 @@ #include "User.h" #include "Nebula.h" #include "Group.h" - +#include "NebulaUtil.h" const string User::INVALID_NAME_CHARS = " :\t\n\v\f\r"; const string User::INVALID_PASS_CHARS = " \t\n\v\f\r"; @@ -314,6 +314,7 @@ int User::from_xml(const string& xml) return 0; } + /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ @@ -363,6 +364,8 @@ int User::set_password(const string& passwd, string& error_str) return rc; }; +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ bool User::pass_is_valid(const string& pass, string& error_str) { From da8abafe72c4f1806aeecc17f4d09afc77b5cbdb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADn?= Date: Tue, 4 Mar 2014 19:16:54 +0100 Subject: [PATCH 79/80] Fix bug in add_provider 'all' argument --- src/cli/one_helper.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cli/one_helper.rb b/src/cli/one_helper.rb index 34440291f7..57929b6f3c 100644 --- a/src/cli/one_helper.rb +++ b/src/cli/one_helper.rb @@ -604,7 +604,7 @@ EOT end def self.name_to_id(name, pool, ename) - if ename=="CLUSTER" and name=="ALL" + if ename=="CLUSTER" and name.upcase=="ALL" return 0, "ALL" end From 368c5cf32309a1a5810ba41cf840aeba6bee8aea Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Wed, 5 Mar 2014 12:10:56 +0100 Subject: [PATCH 80/80] onegroup list crashed when it was unable to connect to oned --- src/cli/one_helper/onegroup_helper.rb | 46 +++++++++++++++------------ 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/src/cli/one_helper/onegroup_helper.rb b/src/cli/one_helper/onegroup_helper.rb index a250d3f082..b011b9fd81 100644 --- a/src/cli/one_helper/onegroup_helper.rb +++ b/src/cli/one_helper/onegroup_helper.rb @@ -66,21 +66,27 @@ class OneGroupHelper < OpenNebulaHelper::OneHelper def format_pool(options) config_file = self.class.table_conf - prefix = '/GROUP_POOL/DEFAULT_GROUP_QUOTAS/' - group_pool = @group_pool - - quotas = group_pool.get_hash()['GROUP_POOL']['QUOTAS'] - quotas_hash = Hash.new - - if (!quotas.nil?) - quotas = [quotas].flatten - - quotas.each do |q| - quotas_hash[q['ID']] = q - end - end - table = CLIHelper::ShowTable.new(config_file, self) do + def pool_default_quotas(path) + @data.dsearch('/GROUP_POOL/DEFAULT_GROUP_QUOTAS/'+path) + end + + def quotas + if !defined?(@quotas) + quotas = @data.dsearch('GROUP_POOL/QUOTAS') + @quotas = Hash.new + + if (!quotas.nil?) + quotas = [quotas].flatten + + quotas.each do |q| + @quotas[q['ID']] = q + end + end + end + @quotas + end + column :ID, "ONE identifier for the Group", :size=>4 do |d| d["ID"] end @@ -104,11 +110,11 @@ class OneGroupHelper < OpenNebulaHelper::OneHelper column :VMS , "Number of VMS", :size=>9 do |d| begin - q = quotas_hash[d['ID']] + q = quotas[d['ID']] limit = q['VM_QUOTA']['VM']["VMS"] if limit == "-1" - limit = group_pool["#{prefix}VM_QUOTA/VM/VMS"] + limit = pool_default_quotas["#{prefix}VM_QUOTA/VM/VMS"] limit = "0" if limit.nil? || limit == "" end @@ -121,11 +127,11 @@ class OneGroupHelper < OpenNebulaHelper::OneHelper column :MEMORY, "Total memory allocated to user VMs", :size=>17 do |d| begin - q = quotas_hash[d['ID']] + q = quotas[d['ID']] limit = q['VM_QUOTA']['VM']["MEMORY"] if limit == "-1" - limit = group_pool["#{prefix}VM_QUOTA/VM/MEMORY"] + limit = pool_default_quotas["#{prefix}VM_QUOTA/VM/MEMORY"] limit = "0" if limit.nil? || limit == "" end @@ -139,11 +145,11 @@ class OneGroupHelper < OpenNebulaHelper::OneHelper column :CPU, "Total CPU allocated to user VMs", :size=>11 do |d| begin - q = quotas_hash[d['ID']] + q = quotas[d['ID']] limit = q['VM_QUOTA']['VM']["CPU"] if limit == "-1" - limit = group_pool["#{prefix}VM_QUOTA/VM/CPU"] + limit = pool_default_quotas["#{prefix}VM_QUOTA/VM/CPU"] limit = "0" if limit.nil? || limit == "" end