From 977fe907b2e04a7280a859639dda235bbf76a0a4 Mon Sep 17 00:00:00 2001 From: Stefan Kooman Date: Thu, 29 Jun 2017 11:10:39 +0200 Subject: [PATCH 01/21] Add support for CEPH_KEY datastore attribute to be able to authenticate to a Ceph cluster. See https://dev.opennebula.org/issues/5208 --- src/datastore_mad/remotes/ceph/clone | 6 ++++++ src/datastore_mad/remotes/ceph/cp | 6 ++++++ src/datastore_mad/remotes/ceph/export | 9 +++++++++ src/datastore_mad/remotes/ceph/mkfs | 6 ++++++ src/datastore_mad/remotes/ceph/monitor | 6 ++++++ src/datastore_mad/remotes/ceph/rm | 6 ++++++ src/datastore_mad/remotes/ceph/snap_delete | 6 ++++++ src/datastore_mad/remotes/ceph/snap_flatten | 6 ++++++ src/datastore_mad/remotes/ceph/snap_revert | 6 ++++++ src/oca/java/test/oned.conf | 1 + src/tm_mad/ceph/clone | 6 ++++++ src/tm_mad/ceph/cpds | 6 ++++++ src/tm_mad/ceph/delete | 6 ++++++ src/tm_mad/ceph/ln | 6 ++++++ src/tm_mad/ceph/mkimage | 6 ++++++ src/tm_mad/ceph/monitor | 6 ++++++ src/tm_mad/ceph/resize | 6 ++++++ src/tm_mad/ceph/snap_create | 6 ++++++ src/tm_mad/ceph/snap_create_live | 6 ++++++ src/tm_mad/ceph/snap_delete | 6 ++++++ src/tm_mad/ceph/snap_revert | 2 ++ 21 files changed, 120 insertions(+) diff --git a/src/datastore_mad/remotes/ceph/clone b/src/datastore_mad/remotes/ceph/clone index 95479d6c98..b0874612c0 100755 --- a/src/datastore_mad/remotes/ceph/clone +++ b/src/datastore_mad/remotes/ceph/clone @@ -51,6 +51,7 @@ done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \ /DS_DRIVER_ACTION_DATA/IMAGE/PATH \ /DS_DRIVER_ACTION_DATA/IMAGE/SIZE \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_USER \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_KEY \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_CONF) unset i @@ -62,6 +63,7 @@ RBD_FORMAT="${XPATH_ELEMENTS[i++]:-$RBD_FORMAT}" SRC="${XPATH_ELEMENTS[i++]}" SIZE="${XPATH_ELEMENTS[i++]}" CEPH_USER="${XPATH_ELEMENTS[i++]}" +CEPH_KEY="${XPATH_ELEMENTS[i++]}" CEPH_CONF="${XPATH_ELEMENTS[i++]}" DST_HOST=`get_destination_host $ID` @@ -75,6 +77,10 @@ if [ -n "$CEPH_CONF" ]; then RBD="$RBD --conf ${CEPH_CONF}" fi +if [ -n "$CEPH_KEY" ]; then + RBD="$RBD --keyfile ${CEPH_KEY}" +fi + if [ -n "$CEPH_USER" ]; then RBD="$RBD --id ${CEPH_USER}" fi diff --git a/src/datastore_mad/remotes/ceph/cp b/src/datastore_mad/remotes/ceph/cp index a4c38395c6..b7615f4eae 100755 --- a/src/datastore_mad/remotes/ceph/cp +++ b/src/datastore_mad/remotes/ceph/cp @@ -64,6 +64,7 @@ done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/NO_DECOMPRESS \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/LIMIT_TRANSFER_BW \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_USER \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_KEY \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_CONF) unset i @@ -82,6 +83,7 @@ SHA1="${XPATH_ELEMENTS[i++]}" NO_DECOMPRESS="${XPATH_ELEMENTS[i++]}" LIMIT_TRANSFER_BW="${XPATH_ELEMENTS[i++]}" CEPH_USER="${XPATH_ELEMENTS[i++]}" +CEPH_KEY="${XPATH_ELEMENTS[i++]}" CEPH_CONF="${XPATH_ELEMENTS[i++]}" DST_HOST=`get_destination_host $ID` @@ -95,6 +97,10 @@ if [ -n "$CEPH_USER" ]; then RBD="$RBD --id ${CEPH_USER}" fi +if [ -n "$CEPH_KEY" ]; then + RBD="$RBD --keyfile ${CEPH_KEY}" +fi + if [ -n "$CEPH_CONF" ]; then RBD="$RBD --conf ${CEPH_CONF}" fi diff --git a/src/datastore_mad/remotes/ceph/export b/src/datastore_mad/remotes/ceph/export index 0639115f68..51b969fef5 100755 --- a/src/datastore_mad/remotes/ceph/export +++ b/src/datastore_mad/remotes/ceph/export @@ -53,6 +53,7 @@ done < <($XPATH /DS_DRIVER_ACTION_DATA/IMAGE/SOURCE \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/BRIDGE_LIST \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/POOL_NAME \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_USER \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_KEY \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_CONF) unset i @@ -65,6 +66,7 @@ FORMAT="${XPATH_ELEMENTS[i++]:-raw}" BRIDGE_LIST="${XPATH_ELEMENTS[i++]}" POOL_NAME="${XPATH_ELEMENTS[i++]:-$POOL_NAME}" CEPH_USER="${XPATH_ELEMENTS[i++]}" +CEPH_KEY="${XPATH_ELEMENTS[i++]}" CEPH_CONF="${XPATH_ELEMENTS[i++]}" DST_HOST=`get_destination_host $ID` @@ -81,11 +83,18 @@ if [ -n "$CEPH_USER" ]; then IMPORT_SOURCE="${IMPORT_SOURCE}?CEPH_USER=${CEPH_USER}" fi +if [ -n "$CEPH_KEY" ]; then + RBD="$RBD --keyfile ${CEPH_USER}" + IMPORT_SOURCE="${IMPORT_SOURCE}?CEPH_USER=${CEPH_USER}?CEPH_KEY=${CEPH_KEY}?" +fi + if [ -n "$CEPH_CONF" ]; then RBD="$RBD --conf ${CEPH_CONF}" if [ -n "$CEPH_USER" ]; then IMPORT_SOURCE="${IMPORT_SOURCE}&" + elif [ -n "$CEPH_KEY" ]; then + IMPORT_SOURCE="${IMPORT_SOURCE}&" else IMPORT_SOURCE="${IMPORT_SOURCE}?" fi diff --git a/src/datastore_mad/remotes/ceph/mkfs b/src/datastore_mad/remotes/ceph/mkfs index 5fe7897593..37cd732b7d 100755 --- a/src/datastore_mad/remotes/ceph/mkfs +++ b/src/datastore_mad/remotes/ceph/mkfs @@ -56,6 +56,7 @@ done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \ /DS_DRIVER_ACTION_DATA/IMAGE/FSTYPE \ /DS_DRIVER_ACTION_DATA/IMAGE/SIZE \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_USER \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_KEY \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_CONF) unset i @@ -70,6 +71,7 @@ RBD_FORMAT="${XPATH_ELEMENTS[i++]:-$RBD_FORMAT}" FSTYPE="${XPATH_ELEMENTS[i++]}" SIZE="${XPATH_ELEMENTS[i++]}" CEPH_USER="${XPATH_ELEMENTS[i++]}" +CEPH_KEY="${XPATH_ELEMENTS[i++]}" CEPH_CONF="${XPATH_ELEMENTS[i++]}" DST_HOST=`get_destination_host $ID` @@ -83,6 +85,10 @@ if [ -n "$CEPH_USER" ]; then RBD="$RBD --id ${CEPH_USER}" fi +if [ -n "$CEPH_KEY" ]; then + RBD="$RBD --keyfile ${CEPH_KEY}" +fi + if [ -n "$CEPH_CONF" ]; then RBD="$RBD --conf ${CEPH_CONF}" fi diff --git a/src/datastore_mad/remotes/ceph/monitor b/src/datastore_mad/remotes/ceph/monitor index 5665c00705..9bba67f6fc 100755 --- a/src/datastore_mad/remotes/ceph/monitor +++ b/src/datastore_mad/remotes/ceph/monitor @@ -49,11 +49,13 @@ while IFS= read -r -d '' element; do done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/BRIDGE_LIST \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/POOL_NAME \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_USER \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_KEY \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_CONF) BRIDGE_LIST="${XPATH_ELEMENTS[j++]}" POOL_NAME="${XPATH_ELEMENTS[j++]:-$POOL_NAME}" CEPH_USER="${XPATH_ELEMENTS[j++]}" +CEPH_KEY="${XPATH_ELEMENTS[j++]}" CEPH_CONF="${XPATH_ELEMENTS[j++]}" HOST=`get_destination_host` @@ -67,6 +69,10 @@ if [ -n "$CEPH_USER" ]; then CEPH="$CEPH --id ${CEPH_USER}" fi +if [ -n "$CEPH_KEY" ]; then + RBD="$RBD --keyfile ${CEPH_KEY}" +fi + if [ -n "$CEPH_CONF" ]; then CEPH="$CEPH --conf ${CEPH_CONF}" RADOS="$RADOS --conf ${CEPH_CONF}" diff --git a/src/datastore_mad/remotes/ceph/rm b/src/datastore_mad/remotes/ceph/rm index 391712f2d4..5f364de773 100755 --- a/src/datastore_mad/remotes/ceph/rm +++ b/src/datastore_mad/remotes/ceph/rm @@ -49,11 +49,13 @@ while IFS= read -r -d '' element; do done < <($XPATH /DS_DRIVER_ACTION_DATA/IMAGE/SOURCE \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/BRIDGE_LIST \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_USER \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_KEY \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_CONF) RBD_SRC="${XPATH_ELEMENTS[j++]}" BRIDGE_LIST="${XPATH_ELEMENTS[j++]}" CEPH_USER="${XPATH_ELEMENTS[j++]}" +CEPH_KEY="${XPATH_ELEMENTS[j++]}" CEPH_CONF="${XPATH_ELEMENTS[j++]}" DST_HOST=`get_destination_host $ID` @@ -67,6 +69,10 @@ if [ -n "$CEPH_USER" ]; then RBD="$RBD --id ${CEPH_USER}" fi +if [ -n "$CEPH_KEY" ]; then + RBD="$RBD --keyfile ${CEPH_KEY}" +fi + if [ -n "$CEPH_CONF" ]; then RBD="$RBD --conf ${CEPH_CONF}" fi diff --git a/src/datastore_mad/remotes/ceph/snap_delete b/src/datastore_mad/remotes/ceph/snap_delete index 2427d7bfc5..b29995ce13 100755 --- a/src/datastore_mad/remotes/ceph/snap_delete +++ b/src/datastore_mad/remotes/ceph/snap_delete @@ -52,6 +52,7 @@ done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/BRIDGE_LIST \ /DS_DRIVER_ACTION_DATA/IMAGE/SOURCE \ /DS_DRIVER_ACTION_DATA/IMAGE/TARGET_SNAPSHOT \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_USER \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_KEY \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_CONF) unset i @@ -61,6 +62,7 @@ POOL_NAME="${XPATH_ELEMENTS[i++]:-$POOL_NAME}" RBD_SRC="${XPATH_ELEMENTS[i++]}" SNAP_ID="${XPATH_ELEMENTS[i++]}" CEPH_USER="${XPATH_ELEMENTS[i++]}" +CEPH_KEY="${XPATH_ELEMENTS[i++]}" CEPH_CONF="${XPATH_ELEMENTS[i++]}" DST_HOST=`get_destination_host $ID` @@ -74,6 +76,10 @@ if [ -n "$CEPH_USER" ]; then RBD="$RBD --id ${CEPH_USER}" fi +if [ -n "$CEPH_KEY" ]; then + RBD="$RBD --keyfile ${CEPH_KEY}" +fi + if [ -n "$CEPH_CONF" ]; then RBD="$RBD --conf ${CEPH_CONF}" fi diff --git a/src/datastore_mad/remotes/ceph/snap_flatten b/src/datastore_mad/remotes/ceph/snap_flatten index 3befc0e59c..a0d0e6c12b 100755 --- a/src/datastore_mad/remotes/ceph/snap_flatten +++ b/src/datastore_mad/remotes/ceph/snap_flatten @@ -52,6 +52,7 @@ done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/BRIDGE_LIST \ /DS_DRIVER_ACTION_DATA/IMAGE/SOURCE \ /DS_DRIVER_ACTION_DATA/IMAGE/TARGET_SNAPSHOT \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_USER \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_KEY \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_CONF) unset i @@ -61,6 +62,7 @@ POOL_NAME="${XPATH_ELEMENTS[i++]:-$POOL_NAME}" RBD_SRC="${XPATH_ELEMENTS[i++]}" SNAP_ID="${XPATH_ELEMENTS[i++]}" CEPH_USER="${XPATH_ELEMENTS[i++]}" +CEPH_KEY="${XPATH_ELEMENTS[i++]}" CEPH_CONF="${XPATH_ELEMENTS[i++]}" DST_HOST=`get_destination_host $ID` @@ -74,6 +76,10 @@ if [ -n "$CEPH_USER" ]; then RBD="$RBD --id ${CEPH_USER}" fi +if [ -n "$CEPH_KEY" ]; then + RBD="$RBD --keyfile ${CEPH_KEY}" +fi + if [ -n "$CEPH_CONF" ]; then RBD="$RBD --conf ${CEPH_CONF}" fi diff --git a/src/datastore_mad/remotes/ceph/snap_revert b/src/datastore_mad/remotes/ceph/snap_revert index f0eef03176..204115a3d6 100755 --- a/src/datastore_mad/remotes/ceph/snap_revert +++ b/src/datastore_mad/remotes/ceph/snap_revert @@ -52,6 +52,7 @@ done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/BRIDGE_LIST \ /DS_DRIVER_ACTION_DATA/IMAGE/SOURCE \ /DS_DRIVER_ACTION_DATA/IMAGE/TARGET_SNAPSHOT \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_USER \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_KEY \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_CONF) unset i @@ -61,6 +62,7 @@ POOL_NAME="${XPATH_ELEMENTS[i++]:-$POOL_NAME}" RBD_SRC="${XPATH_ELEMENTS[i++]}" SNAP_ID="${XPATH_ELEMENTS[i++]}" CEPH_USER="${XPATH_ELEMENTS[i++]}" +CEPH_KEY="${XPATH_ELEMENTS[i++]}" CEPH_CONF="${XPATH_ELEMENTS[i++]}" DST_HOST=`get_destination_host $ID` @@ -74,6 +76,10 @@ if [ -n "$CEPH_USER" ]; then RBD="$RBD --id ${CEPH_USER}" fi +if [ -n "$CEPH_KEY" ]; then + RBD="$RBD --keyfile ${CEPH_KEY}" +fi + if [ -n "$CEPH_CONF" ]; then RBD="$RBD --conf ${CEPH_CONF}" fi diff --git a/src/oca/java/test/oned.conf b/src/oca/java/test/oned.conf index c8f6851e63..95f732c096 100644 --- a/src/oca/java/test/oned.conf +++ b/src/oca/java/test/oned.conf @@ -817,6 +817,7 @@ VNET_RESTRICTED_ATTR = "AR/BRIDGE" INHERIT_DATASTORE_ATTR = "CEPH_HOST" INHERIT_DATASTORE_ATTR = "CEPH_SECRET" INHERIT_DATASTORE_ATTR = "CEPH_USER" +INHERIT_DATASTORE_ATTR = "CEPH_KEY" INHERIT_DATASTORE_ATTR = "CEPH_CONF" INHERIT_DATASTORE_ATTR = "POOL_NAME" diff --git a/src/tm_mad/ceph/clone b/src/tm_mad/ceph/clone index 47b4d8bd74..fd1507da0f 100755 --- a/src/tm_mad/ceph/clone +++ b/src/tm_mad/ceph/clone @@ -67,11 +67,13 @@ while IFS= read -r -d '' element; do XPATH_ELEMENTS[i++]="$element" done < <(onevm show -x $VM_ID| $XPATH \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_USER \ + /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_KEY \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_CONF \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/SIZE \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/ORIGINAL_SIZE) CEPH_USER="${XPATH_ELEMENTS[j++]}" +CEPH_KEY="${XPATH_ELEMENTS[j++]}" CEPH_CONF="${XPATH_ELEMENTS[j++]}" SIZE="${XPATH_ELEMENTS[j++]}" ORIGINAL_SIZE="${XPATH_ELEMENTS[j++]}" @@ -84,6 +86,10 @@ if [ -n "$CEPH_USER" ]; then RBD="$RBD --id ${CEPH_USER}" fi +if [ -n "$CEPH_KEY" ]; then + RBD="$RBD --keyfile ${CEPH_KEY}" +fi + if [ -n "$CEPH_CONF" ]; then RBD="$RBD --conf ${CEPH_CONF}" fi diff --git a/src/tm_mad/ceph/cpds b/src/tm_mad/ceph/cpds index d0ec85632e..8c93696be7 100755 --- a/src/tm_mad/ceph/cpds +++ b/src/tm_mad/ceph/cpds @@ -69,11 +69,13 @@ done < <(onevm show -x $VM_ID| $XPATH \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/SOURCE \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CLONE \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_USER \ + /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_KEY \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_CONF) RBD_SRC="${XPATH_ELEMENTS[j++]}" CLONE="${XPATH_ELEMENTS[j++]}" CEPH_USER="${XPATH_ELEMENTS[j++]}" +CEPH_KEY="${XPATH_ELEMENTS[j++]}" CEPH_CONF="${XPATH_ELEMENTS[j++]}" #------------------------------------------------------------------------------- @@ -90,6 +92,10 @@ if [ -n "$CEPH_USER" ]; then RBD="$RBD --id ${CEPH_USER}" fi +if [ -n "$CEPH_KEY" ]; then + RBD="$RBD --keyfile ${CEPH_KEY}" +fi + if [ -n "$CEPH_CONF" ]; then RBD="$RBD --conf ${CEPH_CONF}" fi diff --git a/src/tm_mad/ceph/delete b/src/tm_mad/ceph/delete index 5214ef274c..1189dcac0b 100755 --- a/src/tm_mad/ceph/delete +++ b/src/tm_mad/ceph/delete @@ -66,12 +66,14 @@ if [ `is_disk $DST_PATH` -eq 0 ]; then /DATASTORE/TEMPLATE/SOURCE \ /DATASTORE/TEMPLATE/CLONE \ /DATASTORE/TEMPLATE/CEPH_USER \ + /DATASTORE/TEMPLATE/CEPH_KEY \ /DATASTORE/TEMPLATE/CEPH_CONF \ /DATASTORE/TEMPLATE/POOL_NAME) SRC="${XPATH_ELEMENTS[j++]}" CLONE="${XPATH_ELEMENTS[j++]}" CEPH_USER="${XPATH_ELEMENTS[j++]}" + CEPH_KEY="${XPATH_ELEMENTS[j++]}" CEPH_CONF="${XPATH_ELEMENTS[j++]}" POOL_NAME="${XPATH_ELEMENTS[j++]:-$POOL_NAME}" @@ -79,6 +81,10 @@ if [ `is_disk $DST_PATH` -eq 0 ]; then RBD="$RBD --id ${CEPH_USER}" fi + if [ -n "$CEPH_KEY" ]; then + RBD="$RBD --keyfile ${CEPH_KEY}" + fi + if [ -n "$CEPH_CONF" ]; then RBD="$RBD --conf ${CEPH_CONF}" fi diff --git a/src/tm_mad/ceph/ln b/src/tm_mad/ceph/ln index df8029d8b9..669eaeab1a 100755 --- a/src/tm_mad/ceph/ln +++ b/src/tm_mad/ceph/ln @@ -65,9 +65,11 @@ while IFS= read -r -d '' element; do XPATH_ELEMENTS[i++]="$element" done < <(onevm show -x $VM_ID| $XPATH \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_USER \ + /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_KEY \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_CONF) CEPH_USER="${XPATH_ELEMENTS[j++]}" +CEPH_KEY="${XPATH_ELEMENTS[j++]}" CEPH_CONF="${XPATH_ELEMENTS[j++]}" #------------------------------------------------------------------------------- @@ -78,6 +80,10 @@ if [ -n "$CEPH_USER" ]; then RBD="$RBD --id ${CEPH_USER}" fi +if [ -n "$CEPH_KEY" ]; then + RBD="$RBD --keyfile ${CEPH_KEY}" +fi + if [ -n "$CEPH_CONF" ]; then RBD="$RBD --conf ${CEPH_CONF}" fi diff --git a/src/tm_mad/ceph/mkimage b/src/tm_mad/ceph/mkimage index f01bb71711..e2c069d973 100755 --- a/src/tm_mad/ceph/mkimage +++ b/src/tm_mad/ceph/mkimage @@ -72,11 +72,13 @@ while IFS= read -r -d '' element; do XPATH_ELEMENTS[i++]="$element" done < <(onevm show -x $VMID | $XPATH \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_USER \ + /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_KEY \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_CONF \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/POOL_NAME \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/RBD_FORMAT) CEPH_USER="${XPATH_ELEMENTS[j++]}" +CEPH_KEY="${XPATH_ELEMENTS[j++]}" CEPH_CONF="${XPATH_ELEMENTS[j++]}" POOL_NAME="${XPATH_ELEMENTS[j++]:-$POOL_NAME}" RBD_FORMAT="${XPATH_ELEMENTS[j++]:-$RBD_FORMAT}" @@ -85,6 +87,10 @@ if [ -n "$CEPH_USER" ]; then RBD="$RBD --id ${CEPH_USER}" fi +if [ -n "$CEPH_KEY" ]; then + RBD="$RBD --keyfile ${CEPH_KEY}" +fi + if [ -n "$CEPH_CONF" ]; then RBD="$RBD --conf ${CEPH_CONF}" fi diff --git a/src/tm_mad/ceph/monitor b/src/tm_mad/ceph/monitor index 505572cfff..e151b26529 100755 --- a/src/tm_mad/ceph/monitor +++ b/src/tm_mad/ceph/monitor @@ -49,11 +49,13 @@ while IFS= read -r -d '' element; do done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/BRIDGE_LIST \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/POOL_NAME \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_USER \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_KEY \ /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_CONF) BRIDGE_LIST="${XPATH_ELEMENTS[j++]}" POOL_NAME="${XPATH_ELEMENTS[j++]:-$POOL_NAME}" CEPH_USER="${XPATH_ELEMENTS[j++]}" +CEPH_KEY="${XPATH_ELEMENTS[j++]}" CEPH_CONF="${XPATH_ELEMENTS[j++]}" HOST=`get_destination_host` @@ -67,6 +69,10 @@ if [ -n "$CEPH_USER" ]; then CEPH="$CEPH --id ${CEPH_USER}" fi +if [ -n "$CEPH_KEY" ]; then + RBD="$RBD --keyfile ${CEPH_KEY}" +fi + if [ -n "$CEPH_CONF" ]; then CEPH="$CEPH --conf ${CEPH_CONF}" RADOS="$RADOS --conf ${CEPH_CONF}" diff --git a/src/tm_mad/ceph/resize b/src/tm_mad/ceph/resize index 03bd8c185c..572b7ea544 100755 --- a/src/tm_mad/ceph/resize +++ b/src/tm_mad/ceph/resize @@ -61,11 +61,13 @@ while IFS= read -r -d '' element; do done < <(onevm show -x $VM_ID| $XPATH \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/SOURCE \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_USER \ + /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_KEY \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_CONF \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/PERSISTENT) RBD_SRC="${XPATH_ELEMENTS[j++]}" CEPH_USER="${XPATH_ELEMENTS[j++]}" +CEPH_KEY="${XPATH_ELEMENTS[j++]}" CEPH_CONF="${XPATH_ELEMENTS[j++]}" PERSISTENT="${XPATH_ELEMENTS[j++]}" @@ -83,6 +85,10 @@ if [ -n "$CEPH_USER" ]; then RBD="$RBD --id ${CEPH_USER}" fi +if [ -n "$CEPH_KEY" ]; then + RBD="$RBD --keyfile ${CEPH_KEY}" +fi + if [ -n "$CEPH_CONF" ]; then RBD="$RBD --conf ${CEPH_CONF}" fi diff --git a/src/tm_mad/ceph/snap_create b/src/tm_mad/ceph/snap_create index f603e1b817..e4f09e4f0c 100755 --- a/src/tm_mad/ceph/snap_create +++ b/src/tm_mad/ceph/snap_create @@ -63,11 +63,13 @@ done < <(onevm show -x $VM_ID| $XPATH \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/SOURCE \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CLONE \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_USER \ + /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_KEY \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_CONF) RBD_SRC="${XPATH_ELEMENTS[j++]}" CLONE="${XPATH_ELEMENTS[j++]}" CEPH_USER="${XPATH_ELEMENTS[j++]}" +CEPH_KEY="${XPATH_ELEMENTS[j++]}" CEPH_CONF="${XPATH_ELEMENTS[j++]}" if [ "$CLONE" = "NO" ]; then @@ -84,6 +86,10 @@ if [ -n "$CEPH_USER" ]; then RBD="$RBD --id ${CEPH_USER}" fi +if [ -n "$CEPH_KEY" ]; then + RBD="$RBD --keyfile ${CEPH_KEY}" +fi + if [ -n "$CEPH_CONF" ]; then RBD="$RBD --conf ${CEPH_CONF}" fi diff --git a/src/tm_mad/ceph/snap_create_live b/src/tm_mad/ceph/snap_create_live index aceae44a5c..ec645b610e 100755 --- a/src/tm_mad/ceph/snap_create_live +++ b/src/tm_mad/ceph/snap_create_live @@ -65,12 +65,14 @@ done < <(onevm show -x $VM_ID| $XPATH \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/SOURCE \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CLONE \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_USER \ + /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_KEY \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_CONF) DEPLOY_ID="${XPATH_ELEMENTS[j++]}" RBD_SRC="${XPATH_ELEMENTS[j++]}" CLONE="${XPATH_ELEMENTS[j++]}" CEPH_USER="${XPATH_ELEMENTS[j++]}" +CEPH_KEY="${XPATH_ELEMENTS[j++]}" CEPH_CONF="${XPATH_ELEMENTS[j++]}" if [ "$CLONE" = "NO" ]; then @@ -87,6 +89,10 @@ if [ -n "$CEPH_USER" ]; then RBD="$RBD --id ${CEPH_USER}" fi +if [ -n "$CEPH_KEY" ]; then + RBD="$RBD --keyfile ${CEPH_KEY}" +fi + if [ -n "$CEPH_CONF" ]; then RBD="$RBD --conf ${CEPH_CONF}" fi diff --git a/src/tm_mad/ceph/snap_delete b/src/tm_mad/ceph/snap_delete index 63c3c86665..7b7e0c16bf 100755 --- a/src/tm_mad/ceph/snap_delete +++ b/src/tm_mad/ceph/snap_delete @@ -63,11 +63,13 @@ done < <(onevm show -x $VM_ID| $XPATH \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/SOURCE \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CLONE \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_USER \ + /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_KEY \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_CONF) RBD_SRC="${XPATH_ELEMENTS[j++]}" CLONE="${XPATH_ELEMENTS[j++]}" CEPH_USER="${XPATH_ELEMENTS[j++]}" +CEPH_KEY="${XPATH_ELEMENTS[j++]}" CEPH_CONF="${XPATH_ELEMENTS[j++]}" if [ "$CLONE" = "NO" ]; then @@ -86,6 +88,10 @@ if [ -n "$CEPH_USER" ]; then RBD="$RBD --id ${CEPH_USER}" fi +if [ -n "$CEPH_KEY" ]; then + RBD="$RBD --keyfile ${CEPH_KEY}" +fi + if [ -n "$CEPH_CONF" ]; then RBD="$RBD --conf ${CEPH_CONF}" fi diff --git a/src/tm_mad/ceph/snap_revert b/src/tm_mad/ceph/snap_revert index aa58a156de..f65f7b7cfd 100755 --- a/src/tm_mad/ceph/snap_revert +++ b/src/tm_mad/ceph/snap_revert @@ -63,11 +63,13 @@ done < <(onevm show -x $VM_ID| $XPATH \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/SOURCE \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CLONE \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_USER \ + /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_KEY \ /VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CEPH_CONF) RBD_SRC="${XPATH_ELEMENTS[j++]}" CLONE="${XPATH_ELEMENTS[j++]}" CEPH_USER="${XPATH_ELEMENTS[j++]}" +CEPH_KEY="${XPATH_ELEMENTS[j++]}" CEPH_CONF="${XPATH_ELEMENTS[j++]}" if [ "$CLONE" = "NO" ]; then From 2d57768707358ebf6773a207e735420952f06fa2 Mon Sep 17 00:00:00 2001 From: mcabrerizo Date: Thu, 29 Jun 2017 15:29:39 +0200 Subject: [PATCH 02/21] F #5207: Fix vCenter tm_resize --- src/tm_mad/vcenter/resize | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/tm_mad/vcenter/resize b/src/tm_mad/vcenter/resize index 687df99bda..e2c488d97c 100755 --- a/src/tm_mad/vcenter/resize +++ b/src/tm_mad/vcenter/resize @@ -68,13 +68,13 @@ begin # Get disk element to be resized disk = one_vm.retrieve_xmlelements("TEMPLATE/DISK[DISK_ID=#{disk_id}]").first - if disk["ORIGINAL_SIZE"].to_i >= new_size.to_i + if disk["ORIGINAL_SIZE"] && disk["ORIGINAL_SIZE"].to_i >= new_size.to_i raise "'disk-resize' cannot decrease the disk's size" end # Resize operation - if !disk["OPENNEBULA_MANAGED"] || disk["OPENNEBULA_MANAGED"].downcase != "no" && - vm.resize_managed_disk(disk,new_size) + if !disk["OPENNEBULA_MANAGED"] || disk["OPENNEBULA_MANAGED"].downcase != "no" + vm.resize_managed_disk(disk, new_size) else vm.resize_unmanaged_disk(disk, new_size) end From d289fb8f0492258a0f2570fd0e6daae1ef55207a Mon Sep 17 00:00:00 2001 From: abelCoronado93 Date: Thu, 29 Jun 2017 12:24:11 +0200 Subject: [PATCH 03/21] F #4816 Make configurable a persistent instance done --- src/sunstone/etc/sunstone-views/admin.yaml | 5 ++- .../etc/sunstone-views/admin_vcenter.yaml | 5 ++- src/sunstone/etc/sunstone-views/cloud.yaml | 5 ++- .../etc/sunstone-views/cloud_vcenter.yaml | 5 ++- .../etc/sunstone-views/groupadmin.yaml | 5 ++- .../sunstone-views/groupadmin_vcenter.yaml | 5 ++- src/sunstone/etc/sunstone-views/user.yaml | 5 ++- src/sunstone/public/app/sunstone-config.js | 2 +- src/sunstone/public/app/tabs/provision-tab.js | 6 ++- .../app/tabs/provision-tab/vms/create.hbs | 2 + .../wizard-tabs/general/capacity-inputs.js | 2 +- .../templates-tab/form-panels/instantiate.js | 37 +++++++++++-------- .../form-panels/instantiate/html.hbs | 2 + 13 files changed, 60 insertions(+), 26 deletions(-) diff --git a/src/sunstone/etc/sunstone-views/admin.yaml b/src/sunstone/etc/sunstone-views/admin.yaml index 40188e0d0d..e8f8291f51 100644 --- a/src/sunstone/etc/sunstone-views/admin.yaml +++ b/src/sunstone/etc/sunstone-views/admin.yaml @@ -51,7 +51,10 @@ features: instantiate_hide_cpu: false # False to not scale the CPU. Number [0, 1] to scale from VCPU - instanciate_cpu_factor: false + instantiate_cpu_factor: false + + # True to show the option to make persistent a instance + instantiate_persistent: true # True to show an input to specify the the VMs and Template path/folder where a vCenter VM will # deployed to diff --git a/src/sunstone/etc/sunstone-views/admin_vcenter.yaml b/src/sunstone/etc/sunstone-views/admin_vcenter.yaml index 89feae0475..c0ed051a49 100644 --- a/src/sunstone/etc/sunstone-views/admin_vcenter.yaml +++ b/src/sunstone/etc/sunstone-views/admin_vcenter.yaml @@ -51,7 +51,10 @@ features: instantiate_hide_cpu: false # False to not scale the CPU. Number [0, 1] to scale from VCPU - instanciate_cpu_factor: false + instantiate_cpu_factor: false + + # True to show the option to make persistent a instance + instantiate_persistent: true # True to show an input to specify the the VMs and Template path/folder where a vCenter VM will # deployed to diff --git a/src/sunstone/etc/sunstone-views/cloud.yaml b/src/sunstone/etc/sunstone-views/cloud.yaml index 94da408af1..00b6cbeba8 100644 --- a/src/sunstone/etc/sunstone-views/cloud.yaml +++ b/src/sunstone/etc/sunstone-views/cloud.yaml @@ -19,7 +19,10 @@ features: instantiate_hide_cpu: false # False to not scale the CPU. Number [0, 1] to scale from VCPU - instanciate_cpu_factor: false + instantiate_cpu_factor: false + + # True to show the option to make persistent a instance + instantiate_persistent: true tabs: provision-tab: panel_tabs: diff --git a/src/sunstone/etc/sunstone-views/cloud_vcenter.yaml b/src/sunstone/etc/sunstone-views/cloud_vcenter.yaml index f293350afd..e80cd76784 100644 --- a/src/sunstone/etc/sunstone-views/cloud_vcenter.yaml +++ b/src/sunstone/etc/sunstone-views/cloud_vcenter.yaml @@ -19,7 +19,10 @@ features: instantiate_hide_cpu: true # False to not scale the CPU. Number [0, 1] to scale from VCPU - instanciate_cpu_factor: false + instantiate_cpu_factor: false + + # True to show the option to make persistent a instance + instantiate_persistent: true tabs: provision-tab: panel_tabs: diff --git a/src/sunstone/etc/sunstone-views/groupadmin.yaml b/src/sunstone/etc/sunstone-views/groupadmin.yaml index e9e4f85a9c..3e6a3abf5f 100644 --- a/src/sunstone/etc/sunstone-views/groupadmin.yaml +++ b/src/sunstone/etc/sunstone-views/groupadmin.yaml @@ -51,7 +51,10 @@ features: instantiate_hide_cpu: false # False to not scale the CPU. Number [0, 1] to scale from VCPU - instanciate_cpu_factor: false + instantiate_cpu_factor: false + + # True to show the option to make persistent a instance + instantiate_persistent: true # True to show an input to specify the the VMs and Template path/folder where a vCenter VM will # deployed to diff --git a/src/sunstone/etc/sunstone-views/groupadmin_vcenter.yaml b/src/sunstone/etc/sunstone-views/groupadmin_vcenter.yaml index e1162f2644..c58ad92c94 100644 --- a/src/sunstone/etc/sunstone-views/groupadmin_vcenter.yaml +++ b/src/sunstone/etc/sunstone-views/groupadmin_vcenter.yaml @@ -51,7 +51,10 @@ features: instantiate_hide_cpu: true # False to not scale the CPU. Number [0, 1] to scale from VCPU - instanciate_cpu_factor: false + instantiate_cpu_factor: false + + # True to show the option to make persistent a instance + instantiate_persistent: true # True to show an input to specify the the VMs and Template path/folder where a vCenter VM will # deployed to diff --git a/src/sunstone/etc/sunstone-views/user.yaml b/src/sunstone/etc/sunstone-views/user.yaml index 4da1924150..aa165c3e24 100644 --- a/src/sunstone/etc/sunstone-views/user.yaml +++ b/src/sunstone/etc/sunstone-views/user.yaml @@ -51,7 +51,10 @@ features: instantiate_hide_cpu: false # False to not scale the CPU. Number [0, 1] to scale from VCPU - instanciate_cpu_factor: false + instantiate_cpu_factor: false + + # True to show the option to make persistent a instance + instantiate_persistent: true # True to show an input to specify the the VMs and Template path/folder where a vCenter VM will # deployed to diff --git a/src/sunstone/public/app/sunstone-config.js b/src/sunstone/public/app/sunstone-config.js index 585499a36d..d624d16dcd 100644 --- a/src/sunstone/public/app/sunstone-config.js +++ b/src/sunstone/public/app/sunstone-config.js @@ -156,7 +156,7 @@ define(function(require) { 'enabledTabs': _config['view']['enabled_tabs'], 'onedConf': _config['oned_conf'], 'confirmVMActions': _config['view']['confirm_vms'], - 'scaleFactor': _config['view']['features']['instanciate_cpu_factor'], + 'scaleFactor': _config['view']['features']['instantiate_cpu_factor'], 'filterView': _config['view']['filter_view'], "allTabs": function() { diff --git a/src/sunstone/public/app/tabs/provision-tab.js b/src/sunstone/public/app/tabs/provision-tab.js index 5bf8b08d7e..30638bdd61 100644 --- a/src/sunstone/public/app/tabs/provision-tab.js +++ b/src/sunstone/public/app/tabs/provision-tab.js @@ -961,10 +961,14 @@ define(function(require) { disksContext.data("template_json", template_json); if (Config.provision.create_vm.isEnabled("disk_resize")) { + var pers = $("input.instantiate_pers", create_vm_context).prop("checked"); + if(pers == undefined){ + pers = false; + } DisksResize.insert({ template_json: template_json, disksContext: disksContext, - force_persistent: $("input.instantiate_pers", create_vm_context).prop("checked"), + force_persistent: pers, cost_callback: _calculateCost }); } else { diff --git a/src/sunstone/public/app/tabs/provision-tab/vms/create.hbs b/src/sunstone/public/app/tabs/provision-tab/vms/create.hbs index 654186edb0..7f2fd3ac8f 100644 --- a/src/sunstone/public/app/tabs/provision-tab/vms/create.hbs +++ b/src/sunstone/public/app/tabs/provision-tab/vms/create.hbs @@ -29,6 +29,7 @@
+ {{#isFeatureEnabled "instantiate_persistent"}}
@@ -41,6 +42,7 @@ {{{tip (tr "Creates a private persistent copy of the template plus any image defined in DISK, and instantiates that copy")}}}
+ {{/isFeatureEnabled}}
diff --git a/src/sunstone/public/app/tabs/templates-tab/form-panels/create/wizard-tabs/general/capacity-inputs.js b/src/sunstone/public/app/tabs/templates-tab/form-panels/create/wizard-tabs/general/capacity-inputs.js index deec5be508..b98ce418e9 100644 --- a/src/sunstone/public/app/tabs/templates-tab/form-panels/create/wizard-tabs/general/capacity-inputs.js +++ b/src/sunstone/public/app/tabs/templates-tab/form-panels/create/wizard-tabs/general/capacity-inputs.js @@ -135,7 +135,7 @@ define(function(require) { $("div.vcpu_input", context).html(input); - if (Config.isFeatureEnabled("instanciate_cpu_factor")){ + if (Config.isFeatureEnabled("instantiate_cpu_factor")){ $("div.vcpu_input input", context).on("change", function(){ var vcpuValue = $("div.vcpu_input input", context).val(); $("div.cpu_input input", context).val(vcpuValue * Config.scaleFactor); diff --git a/src/sunstone/public/app/tabs/templates-tab/form-panels/instantiate.js b/src/sunstone/public/app/tabs/templates-tab/form-panels/instantiate.js index 8c97044699..31d7087fe4 100644 --- a/src/sunstone/public/app/tabs/templates-tab/form-panels/instantiate.js +++ b/src/sunstone/public/app/tabs/templates-tab/form-panels/instantiate.js @@ -88,26 +88,31 @@ define(function(require) { function _setup(context) { var that = this; - $("input.instantiate_pers", context).on("change", function(){ - var persistent = $(this).prop('checked'); + if(Config.isFeatureEnabled("instantiate_persistent")){ + $("input.instantiate_pers", context).on("change", function(){ + var persistent = $(this).prop('checked'); - if(persistent){ - $("#vm_n_times_disabled", context).show(); - $("#vm_n_times", context).hide(); - } else { - $("#vm_n_times_disabled", context).hide(); - $("#vm_n_times", context).show(); - } + if(persistent){ + $("#vm_n_times_disabled", context).show(); + $("#vm_n_times", context).hide(); + } else { + $("#vm_n_times_disabled", context).hide(); + $("#vm_n_times", context).show(); + } - $.each(that.template_objects, function(index, template_json) { - DisksResize.insert({ - template_json: template_json, - disksContext: $(".disksContext" + template_json.VMTEMPLATE.ID, context), - force_persistent: persistent, - cost_callback: that.calculateCost.bind(that) + $.each(that.template_objects, function(index, template_json) { + DisksResize.insert({ + template_json: template_json, + disksContext: $(".disksContext" + template_json.VMTEMPLATE.ID, context), + force_persistent: persistent, + cost_callback: that.calculateCost.bind(that) + }); }); }); - }); + } else { + $("#vm_n_times_disabled", context).hide(); + $("#vm_n_times", context).show(); + } } function _calculateCost(){ diff --git a/src/sunstone/public/app/tabs/templates-tab/form-panels/instantiate/html.hbs b/src/sunstone/public/app/tabs/templates-tab/form-panels/instantiate/html.hbs index 651bd4faa3..8c786f5744 100644 --- a/src/sunstone/public/app/tabs/templates-tab/form-panels/instantiate/html.hbs +++ b/src/sunstone/public/app/tabs/templates-tab/form-panels/instantiate/html.hbs @@ -21,6 +21,7 @@
+ {{#isFeatureEnabled "instantiate_persistent"}}
+ {{/isFeatureEnabled}}
From 9e29673c7df30a7b9e70b8e0d7f68097a0961706 Mon Sep 17 00:00:00 2001 From: abelCoronado93 Date: Thu, 29 Jun 2017 12:53:51 +0200 Subject: [PATCH 04/21] Changed comments --- src/sunstone/etc/sunstone-views/admin.yaml | 2 +- src/sunstone/etc/sunstone-views/admin_vcenter.yaml | 2 +- src/sunstone/etc/sunstone-views/cloud.yaml | 2 +- src/sunstone/etc/sunstone-views/cloud_vcenter.yaml | 2 +- src/sunstone/etc/sunstone-views/groupadmin.yaml | 2 +- src/sunstone/etc/sunstone-views/groupadmin_vcenter.yaml | 2 +- src/sunstone/etc/sunstone-views/user.yaml | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/sunstone/etc/sunstone-views/admin.yaml b/src/sunstone/etc/sunstone-views/admin.yaml index e8f8291f51..7bc5086441 100644 --- a/src/sunstone/etc/sunstone-views/admin.yaml +++ b/src/sunstone/etc/sunstone-views/admin.yaml @@ -53,7 +53,7 @@ features: # False to not scale the CPU. Number [0, 1] to scale from VCPU instantiate_cpu_factor: false - # True to show the option to make persistent a instance + # True to show the option to make an instance persistent instantiate_persistent: true # True to show an input to specify the the VMs and Template path/folder where a vCenter VM will diff --git a/src/sunstone/etc/sunstone-views/admin_vcenter.yaml b/src/sunstone/etc/sunstone-views/admin_vcenter.yaml index c0ed051a49..da99ca650c 100644 --- a/src/sunstone/etc/sunstone-views/admin_vcenter.yaml +++ b/src/sunstone/etc/sunstone-views/admin_vcenter.yaml @@ -53,7 +53,7 @@ features: # False to not scale the CPU. Number [0, 1] to scale from VCPU instantiate_cpu_factor: false - # True to show the option to make persistent a instance + # True to show the option to make an instance persistent instantiate_persistent: true # True to show an input to specify the the VMs and Template path/folder where a vCenter VM will diff --git a/src/sunstone/etc/sunstone-views/cloud.yaml b/src/sunstone/etc/sunstone-views/cloud.yaml index 00b6cbeba8..e0399756d4 100644 --- a/src/sunstone/etc/sunstone-views/cloud.yaml +++ b/src/sunstone/etc/sunstone-views/cloud.yaml @@ -21,7 +21,7 @@ features: # False to not scale the CPU. Number [0, 1] to scale from VCPU instantiate_cpu_factor: false - # True to show the option to make persistent a instance + # True to show the option to make an instance persistent instantiate_persistent: true tabs: provision-tab: diff --git a/src/sunstone/etc/sunstone-views/cloud_vcenter.yaml b/src/sunstone/etc/sunstone-views/cloud_vcenter.yaml index e80cd76784..133058f3d3 100644 --- a/src/sunstone/etc/sunstone-views/cloud_vcenter.yaml +++ b/src/sunstone/etc/sunstone-views/cloud_vcenter.yaml @@ -21,7 +21,7 @@ features: # False to not scale the CPU. Number [0, 1] to scale from VCPU instantiate_cpu_factor: false - # True to show the option to make persistent a instance + # True to show the option to make an instance persistent instantiate_persistent: true tabs: provision-tab: diff --git a/src/sunstone/etc/sunstone-views/groupadmin.yaml b/src/sunstone/etc/sunstone-views/groupadmin.yaml index 3e6a3abf5f..d5bd632345 100644 --- a/src/sunstone/etc/sunstone-views/groupadmin.yaml +++ b/src/sunstone/etc/sunstone-views/groupadmin.yaml @@ -53,7 +53,7 @@ features: # False to not scale the CPU. Number [0, 1] to scale from VCPU instantiate_cpu_factor: false - # True to show the option to make persistent a instance + # True to show the option to make an instance persistent instantiate_persistent: true # True to show an input to specify the the VMs and Template path/folder where a vCenter VM will diff --git a/src/sunstone/etc/sunstone-views/groupadmin_vcenter.yaml b/src/sunstone/etc/sunstone-views/groupadmin_vcenter.yaml index c58ad92c94..10bcdde219 100644 --- a/src/sunstone/etc/sunstone-views/groupadmin_vcenter.yaml +++ b/src/sunstone/etc/sunstone-views/groupadmin_vcenter.yaml @@ -53,7 +53,7 @@ features: # False to not scale the CPU. Number [0, 1] to scale from VCPU instantiate_cpu_factor: false - # True to show the option to make persistent a instance + # True to show the option to make an instance persistent instantiate_persistent: true # True to show an input to specify the the VMs and Template path/folder where a vCenter VM will diff --git a/src/sunstone/etc/sunstone-views/user.yaml b/src/sunstone/etc/sunstone-views/user.yaml index aa165c3e24..f3a521c4b3 100644 --- a/src/sunstone/etc/sunstone-views/user.yaml +++ b/src/sunstone/etc/sunstone-views/user.yaml @@ -53,7 +53,7 @@ features: # False to not scale the CPU. Number [0, 1] to scale from VCPU instantiate_cpu_factor: false - # True to show the option to make persistent a instance + # True to show the option to make an instance persistent instantiate_persistent: true # True to show an input to specify the the VMs and Template path/folder where a vCenter VM will From 35a67abf3b96e08bef103dbc9a22a5ed9f5a8c1c Mon Sep 17 00:00:00 2001 From: abelCoronado93 Date: Thu, 29 Jun 2017 12:47:49 +0200 Subject: [PATCH 05/21] Solved bug in dashboard accounting --- src/sunstone/public/app/utils/accounting.js | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/sunstone/public/app/utils/accounting.js b/src/sunstone/public/app/utils/accounting.js index 139eb03056..c4812a9462 100644 --- a/src/sunstone/public/app/utils/accounting.js +++ b/src/sunstone/public/app/utils/accounting.js @@ -467,6 +467,10 @@ define(function(require) { var template = history.VM.TEMPLATE; + if(!template){ + break; + } + // --- cpu --- var val = parseFloat(template.CPU) * n_hours; From 87b5e5cb5b37ad0dfdd4fc5a5c9c5e87ded26f27 Mon Sep 17 00:00:00 2001 From: "Ruben S. Montero" Date: Thu, 29 Jun 2017 19:47:56 +0200 Subject: [PATCH 06/21] F #4809: Re-design replicated log structure --- include/FedReplicaManager.h | 112 +++----------- include/LogDB.h | 74 ++++++++- src/nebula/Nebula.cc | 3 +- src/raft/FedReplicaManager.cc | 277 ++++++---------------------------- src/raft/RaftManager.cc | 7 +- src/raft/ReplicaThread.cc | 1 + src/rm/RequestManagerZone.cc | 10 +- src/sql/LogDB.cc | 155 +++++++++++++++++-- 8 files changed, 287 insertions(+), 352 deletions(-) diff --git a/include/FedReplicaManager.h b/include/FedReplicaManager.h index 1e3a4d633c..59c135b6fb 100644 --- a/include/FedReplicaManager.h +++ b/include/FedReplicaManager.h @@ -26,38 +26,37 @@ extern "C" void * frm_loop(void *arg); -class SqlDB; +class LogDB; +class LogDBRecord; class FedReplicaManager : public ReplicaManager, ActionListener { public: /** - * @param _t timer for periofic actions (sec) - * @param _p purge timeout for log * @param d pointer to underlying DB (LogDB) - * @param l log_retention length (num records) */ - FedReplicaManager(time_t _t, time_t _p, SqlDB * d, unsigned int l); + FedReplicaManager(LogDB * d); virtual ~FedReplicaManager(); /** - * Creates a new record in the federation log and sends the replication - * event to the replica threads. [MASTER] - * @param sql db command to replicate - * @return 0 on success -1 otherwise + * Sends the replication event to the replica threads. [MASTER] */ - int replicate(const std::string& sql); + void replicate(const std::string& sql) + { + ReplicaManager::replicate(); + } /** * Updates the current index in the server and applies the command to the * server. It also stores the record in the zone log [SLAVE] * @param index of the record + * @param prev of index preceding this one * @param sql command to apply to DB * @return 0 on success, last_index if missing records, -1 on DB error */ - int apply_log_record(int index, const std::string& sql); + int apply_log_record(int index, int prev, const std::string& sql); /** * Record was successfully replicated on zone, increase next index and @@ -106,8 +105,6 @@ public: update_zones(zids); - get_last_index(last_index); - ReplicaManager::start_replica_threads(zids); } @@ -133,11 +130,6 @@ public: */ void delete_zone(int zone_id); - /** - * Bootstrap federated log - */ - static int bootstrap(SqlDB *_db); - /** * @return the id of fed. replica thread */ @@ -146,19 +138,6 @@ public: return frm_thread; }; - /** - * @return the last index of the fed log (from DB to use this method in - * HA followers) - */ - unsigned int get_last_index() const - { - unsigned int li; - - get_last_index(li); - - return li; - } - private: friend void * frm_loop(void *arg); @@ -177,29 +156,19 @@ private: */ pthread_mutex_t mutex; - //-------------------------------------------------------------------------- - // Timers - // - timer_period. Base timer to wake up the manager - // - purge_period. How often the replicated log is purged (600s) - // - xmlrpc_timeout. To timeout xml-rpc api calls to replicate log - //-------------------------------------------------------------------------- - time_t timer_period; - - time_t purge_period; - - static const time_t xmlrpc_timeout_ms; - // ------------------------------------------------------------------------- // Synchronization variables - // - last_index in the replication log + // - xmlrpc_timeout. To timeout xml-rpc api calls to replicate log // - zones list of zones in the federation with: // - list of servers // - next index to send to this zone // ------------------------------------------------------------------------- + static const time_t xmlrpc_timeout_ms; + struct ZoneServers { ZoneServers(int z, unsigned int l, const std::string& s): - zone_id(z), endpoint(s), next(l){}; + zone_id(z), endpoint(s), next(l), last(-1){}; ~ZoneServers(){}; @@ -207,16 +176,14 @@ private: std::string endpoint; - unsigned int next; + int next; + + int last; }; std::map zones; - unsigned int last_index; - - SqlDB * logdb; - - unsigned int log_retention; + LogDB * logdb; // ------------------------------------------------------------------------- // Action Listener interface @@ -228,45 +195,6 @@ private: */ void finalize_action(const ActionRequest& ar); - /** - * This function is executed periodically to purge the state log - */ - void timer_action(const ActionRequest& ar); - - // ------------------------------------------------------------------------- - // Database Implementation - // Store log records to replicate on federation slaves - // ------------------------------------------------------------------------- - static const char * table; - - static const char * db_names; - - static const char * db_bootstrap; - - /** - * Gets a record from the log - * @param index of the record - * @param sql command of the record - * @return 0 in case of success -1 otherwise - */ - int get_log_record(int index, std::string& sql); - - /** - * Inserts a new record in the log ans updates the last_index variable - * (memory and db) - * @param index of new record - * @param sql of DB command to execute - * @return 0 on success - */ - int insert_log_record(int index, const std::string& sql); - - /** - * Reads the last index from DB for initialization - * @param index - * @return 0 on success - */ - int get_last_index(unsigned int& index) const; - /** * Get the nest record to replicate in a zone * @param zone_id of the zone @@ -274,8 +202,8 @@ private: * @param sql command to replicate * @return 0 on success, -1 otherwise */ - int get_next_record(int zone_id, int& index, std::string& sql, - std::string& zservers); + int get_next_record(int zone_id, std::string& zedp, LogDBRecord& lr); + }; #endif /*FED_REPLICA_MANAGER_H_*/ diff --git a/include/LogDB.h b/include/LogDB.h index 8d8b51d75d..c16fc4643a 100644 --- a/include/LogDB.h +++ b/include/LogDB.h @@ -19,6 +19,7 @@ #include #include +#include #include "SqlDB.h" @@ -52,6 +53,12 @@ public: */ time_t timestamp; + /** + * The index in the federation, -1 if the log entry is not federated. + * At master fed_index is equal to index. + */ + int fed_index; + /** * Sets callback to load register from DB */ @@ -72,7 +79,7 @@ private: * This class implements a generic DB interface with replication. The associated * DB stores a log to replicate on followers. */ -class LogDB : public SqlDB +class LogDB : public SqlDB, Callbackable { public: LogDB(SqlDB * _db, bool solo, unsigned int log_retention); @@ -111,11 +118,12 @@ public: * @param term for the record * @param sql command of the record * @param timestamp associated to this record + * @param fed_index index in the federation -1 if not federated * * @return -1 on failure, index of the inserted record on success */ int insert_log_record(unsigned int index, unsigned int term, - std::ostringstream& sql, time_t timestamp); + std::ostringstream& sql, time_t timestamp, int fed_index); //-------------------------------------------------------------------------- // Functions to manage the Raft state. Log record 0, term -1 @@ -148,7 +156,20 @@ public: * This function replicates the DB changes on followers before updating * the DB state */ - int exec_wr(ostringstream& cmd); + int exec_wr(ostringstream& cmd) + { + return _exec_wr(cmd, -1); + } + + int exec_federated_wr(ostringstream& cmd) + { + return _exec_wr(cmd, 0); + } + + int exec_federated_wr(ostringstream& cmd, int index) + { + return _exec_wr(cmd, index); + } int exec_local_wr(ostringstream& cmd) { @@ -201,6 +222,18 @@ public: */ void get_last_record_index(unsigned int& _i, unsigned int& _t); + // ------------------------------------------------------------------------- + // Federate log methods + // ------------------------------------------------------------------------- + /** + * Get last federated index, and previous + */ + int last_federated(); + + int previous_federated(int index); + + int next_federated(int index); + protected: int exec(std::ostringstream& cmd, Callbackable* obj, bool quiet) { @@ -245,6 +278,21 @@ private: */ unsigned int log_retention; + // ------------------------------------------------------------------------- + // Federated Log + // ------------------------------------------------------------------------- + /** + * The federated log stores a map with the federated log index and its + * corresponding local index. For the master both are the same + */ + std::set fed_log; + + /** + * Generates the federated index, it should be called whenever a server + * takes leadership. + */ + void build_federated_index(); + // ------------------------------------------------------------------------- // DataBase implementation // ------------------------------------------------------------------------- @@ -254,6 +302,20 @@ private: static const char * db_bootstrap; + /** + * Replicates writes in the followers and apply changes to DB state once + * it is safe to do so. + * + * @param federated -1 not federated (fed_index = -1), 0 generate fed index + * (fed_index = index), > 0 set (fed_index = federated) + */ + int _exec_wr(ostringstream& cmd, int federated); + + /** + * Callback to store the IDs of federated records in the federated log. + */ + int index_cb(void *null, int num, char **values, char **names); + /** * Applies the SQL command of the given record to the database. The * timestamp of the record is updated. @@ -267,10 +329,11 @@ private: * @param term for the log entry * @param sql command to modify DB state * @param ts timestamp of record application to DB state + * @param fi the federated index -1 if none * * @return 0 on success */ - int insert(int index, int term, const std::string& sql, time_t ts); + int insert(int index, int term, const std::string& sql, time_t ts, int fi); /** * Inserts a new log record in the database. If the record is successfully @@ -278,11 +341,12 @@ private: * @param term for the record * @param sql command of the record * @param timestamp associated to this record + * @param federated, if true it will set fed_index == index, -1 otherwise * * @return -1 on failure, index of the inserted record on success */ int insert_log_record(unsigned int term, std::ostringstream& sql, - time_t timestamp); + time_t timestamp, int federated); }; // ----------------------------------------------------------------------------- diff --git a/src/nebula/Nebula.cc b/src/nebula/Nebula.cc index 77db5129c1..6d9f1684f7 100644 --- a/src/nebula/Nebula.cc +++ b/src/nebula/Nebula.cc @@ -386,7 +386,6 @@ void Nebula::start(bool bootstrap_only) rc += SecurityGroupPool::bootstrap(logdb); rc += VirtualRouterPool::bootstrap(logdb); rc += VMGroupPool::bootstrap(logdb); - rc += FedReplicaManager::bootstrap(logdb); // Create the system tables only if bootstrap went well if (rc == 0) @@ -743,7 +742,7 @@ void Nebula::start(bool bootstrap_only) // ---- FedReplica Manager ---- try { - frm = new FedReplicaManager(timer_period,log_purge,logdb,log_retention); + frm = new FedReplicaManager(logdb); } catch (bad_alloc&) { diff --git a/src/raft/FedReplicaManager.cc b/src/raft/FedReplicaManager.cc index 86e20de635..ecb7f7f170 100644 --- a/src/raft/FedReplicaManager.cc +++ b/src/raft/FedReplicaManager.cc @@ -22,27 +22,16 @@ /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -const char * FedReplicaManager::table = "fed_logdb"; - -const char * FedReplicaManager::db_names = "log_index, sqlcmd"; - -const char * FedReplicaManager::db_bootstrap = "CREATE TABLE IF NOT EXISTS " - "fed_logdb (log_index INTEGER PRIMARY KEY, sqlcmd MEDIUMTEXT)"; - const time_t FedReplicaManager::xmlrpc_timeout_ms = 10000; /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -FedReplicaManager::FedReplicaManager(time_t _t, time_t _p, SqlDB * d, - unsigned int l): ReplicaManager(), timer_period(_t), purge_period(_p), - last_index(-1), logdb(d), log_retention(l) +FedReplicaManager::FedReplicaManager(LogDB * d): ReplicaManager(), logdb(d) { pthread_mutex_init(&mutex, 0); am.addListener(this); - - get_last_index(last_index); }; /* -------------------------------------------------------------------------- */ @@ -69,35 +58,16 @@ FedReplicaManager::~FedReplicaManager() /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -int FedReplicaManager::replicate(const std::string& sql) -{ - pthread_mutex_lock(&mutex); - - if ( insert_log_record(last_index+1, sql) != 0 ) - { - pthread_mutex_unlock(&mutex); - return -1; - } - - last_index++; - - pthread_mutex_unlock(&mutex); - - ReplicaManager::replicate(); - - return 0; -} - -/* -------------------------------------------------------------------------- */ -/* -------------------------------------------------------------------------- */ - -int FedReplicaManager::apply_log_record(int index, const std::string& sql) +int FedReplicaManager::apply_log_record(int index, int prev, + const std::string& sql) { int rc; pthread_mutex_lock(&mutex); - if ( (unsigned int) index != last_index + 1 ) + int last_index = logdb->last_federated(); + + if ( prev != last_index ) { rc = last_index; @@ -105,40 +75,14 @@ int FedReplicaManager::apply_log_record(int index, const std::string& sql) return rc; } - std::ostringstream oss; + std::ostringstream oss(sql); - std::string * zsql = one_util::zlib_compress(sql, true); - - if ( zsql == 0 ) + if ( logdb->exec_federated_wr(oss, index) != 0 ) { pthread_mutex_unlock(&mutex); return -1; } - char * sql_db = logdb->escape_str(zsql->c_str()); - - delete zsql; - - if ( sql_db == 0 ) - { - pthread_mutex_unlock(&mutex); - return -1; - } - - oss << "BEGIN;\n" - << "REPLACE INTO " << table << " ("<< db_names <<") VALUES " - << "(" << last_index + 1 << ",'" << sql_db << "');\n" - << sql << ";\n" - << "END;"; - - if ( logdb->exec_wr(oss) != 0 ) - { - pthread_mutex_unlock(&mutex); - return -1; - } - - last_index++; - pthread_mutex_unlock(&mutex); return 0; @@ -160,7 +104,7 @@ extern "C" void * frm_loop(void *arg) NebulaLog::log("FRM",Log::INFO,"Federation Replica Manger started."); - fedrm->am.loop(fedrm->timer_period); + fedrm->am.loop(); NebulaLog::log("FRM",Log::INFO,"Federation Replica Manger stopped."); @@ -210,6 +154,8 @@ void FedReplicaManager::update_zones(std::vector& zone_ids) pthread_mutex_lock(&mutex); + int last_index = logdb->last_federated(); + zones.clear(); for (it = zone_ids.begin() ; it != zone_ids.end(); ) @@ -271,6 +217,8 @@ void FedReplicaManager::add_zone(int zone_id) pthread_mutex_lock(&mutex); + int last_index = logdb->last_federated(); + ZoneServers * zs = new ZoneServers(zone_id, last_index, zedp); zones.insert(make_pair(zone_id, zs)); @@ -325,58 +273,8 @@ ReplicaThread * FedReplicaManager::thread_factory(int zone_id) /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -void FedReplicaManager::timer_action(const ActionRequest& ar) -{ - static int mark_tics = 0; - static int purge_tics = 0; - - mark_tics++; - purge_tics++; - - // Thread heartbeat - if ( (mark_tics * timer_period) >= 600 ) - { - NebulaLog::log("FRM",Log::INFO,"--Mark--"); - mark_tics = 0; - } - - // Database housekeeping - if ( (purge_tics * timer_period) >= purge_period ) - { - Nebula& nd = Nebula::instance(); - RaftManager * raftm = nd.get_raftm(); - - if ( raftm->is_leader() || raftm->is_solo() ) - { - NebulaLog::log("FRM", Log::INFO, "Purging federated log"); - - std::ostringstream oss; - - pthread_mutex_lock(&mutex); - - if ( last_index > log_retention ) - { - unsigned int delete_index = last_index - log_retention; - - // keep the last "log_retention" records - oss << "DELETE FROM fed_logdb WHERE log_index >= 0 AND " - << "log_index < " << delete_index; - - logdb->exec_wr(oss); - } - - pthread_mutex_unlock(&mutex); - } - - purge_tics = 0; - } -} - -/* -------------------------------------------------------------------------- */ -/* -------------------------------------------------------------------------- */ - -int FedReplicaManager::get_next_record(int zone_id, int& index, - std::string& sql, std::string& zedp) +int FedReplicaManager::get_next_record(int zone_id, std::string& zedp, + LogDBRecord& lr) { pthread_mutex_lock(&mutex); @@ -388,114 +286,28 @@ int FedReplicaManager::get_next_record(int zone_id, int& index, return -1; } - index = it->second->next; - zedp = it->second->endpoint; + ZoneServers * zs = it->second; - int rc = get_log_record(index, sql); + zedp = zs->endpoint; + + if ( zs->next == -1 ) + { + zs->next= logdb->last_federated(); + } + + if ( zs->last == zs->next ) + { + pthread_mutex_unlock(&mutex); + return -1; + } + + int rc = logdb->get_log_record(zs->next, lr); pthread_mutex_unlock(&mutex); return rc; } -/* -------------------------------------------------------------------------- */ - -int FedReplicaManager::get_log_record(int index, std::string& sql) -{ - std::string zsql; - - ostringstream oss; - - single_cb cb; - - oss << "SELECT sqlcmd FROM fed_logdb WHERE log_index = " << index; - - cb.set_callback(&zsql); - - int rc = logdb->exec_rd(oss, &cb); - - cb.unset_callback(); - - std::string * _sql = one_util::zlib_decompress(zsql, true); - - if ( _sql == 0 ) - { - return -1; - } - - sql = *_sql; - - delete _sql; - - return rc; -} - -/* -------------------------------------------------------------------------- */ - -int FedReplicaManager::insert_log_record(int index, const std::string& sql) -{ - std::ostringstream oss; - - std::string * zsql = one_util::zlib_compress(sql, true); - - if ( zsql == 0 ) - { - return -1; - } - - char * sql_db = logdb->escape_str(zsql->c_str()); - - delete zsql; - - if ( sql_db == 0 ) - { - return -1; - } - - oss << "REPLACE INTO " << table << " ("<< db_names <<") VALUES " - << "(" << index << ",'" << sql_db << "')"; - - return logdb->exec_wr(oss); -} - -/* -------------------------------------------------------------------------- */ - -int FedReplicaManager::get_last_index(unsigned int& index) const -{ - ostringstream oss; - - single_cb cb; - - oss << "SELECT MAX(log_index) FROM fed_logdb"; - - cb.set_callback(&index); - - int rc = logdb->exec_rd(oss, &cb); - - cb.unset_callback(); - - return rc; -} - -/* -------------------------------------------------------------------------- */ - -int FedReplicaManager::bootstrap(SqlDB *_db) -{ - int rc; - - std::ostringstream oss(db_bootstrap); - - rc = _db->exec_local_wr(oss); - - oss.str(""); - - oss << "REPLACE INTO " << table << " ("<< db_names <<") VALUES (-1,-1)"; - - rc += _db->exec_local_wr(oss); - - return rc; -} - /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ @@ -513,9 +325,11 @@ void FedReplicaManager::replicate_success(int zone_id) ZoneServers * zs = it->second; - zs->next++; + zs->last = zs->next; - if ( last_index >= zs->next ) + zs->next = logdb->next_federated(zs->next); + + if ( zs->next != -1 ) { ReplicaManager::replicate(zone_id); } @@ -537,10 +351,12 @@ void FedReplicaManager::replicate_failure(int zone_id, int last_zone) if ( last_zone >= 0 ) { - zs->next = last_zone + 1; + zs->last = last_zone; + + zs->next = logdb->next_federated(zs->last); } - if ( last_index >= zs->next ) + if ( zs->next != -1 ) { ReplicaManager::replicate(zone_id); } @@ -558,18 +374,20 @@ int FedReplicaManager::xmlrpc_replicate_log(int zone_id, bool& success, { static const std::string replica_method = "one.zone.fedreplicate"; - int index; - - std::string sql, secret, zedp; + std::string secret, zedp; int xml_rc = 0; - if ( get_next_record(zone_id, index, sql, zedp) != 0 ) + LogDBRecord lr; + + if ( get_next_record(zone_id, zedp, lr) != 0 ) { error = "Failed to load federation log record"; return -1; } + int prev_index = logdb->previous_federated(lr.index); + // ------------------------------------------------------------------------- // Get parameters to call append entries on follower // ------------------------------------------------------------------------- @@ -582,8 +400,9 @@ int FedReplicaManager::xmlrpc_replicate_log(int zone_id, bool& success, xmlrpc_c::paramList replica_params; replica_params.add(xmlrpc_c::value_string(secret)); - replica_params.add(xmlrpc_c::value_int(index)); - replica_params.add(xmlrpc_c::value_string(sql)); + replica_params.add(xmlrpc_c::value_int(lr.index)); + replica_params.add(xmlrpc_c::value_int(prev_index)); + replica_params.add(xmlrpc_c::value_string(lr.sql)); // ------------------------------------------------------------------------- // Do the XML-RPC call @@ -612,7 +431,7 @@ int FedReplicaManager::xmlrpc_replicate_log(int zone_id, bool& success, { std::ostringstream ess; - ess << "Error replicating log entry " << index << " on zone " + ess << "Error replicating log entry " << lr.index << " on zone " << zone_id << " (" << zedp << "): " << error; NebulaLog::log("FRM", Log::ERROR, error); diff --git a/src/raft/RaftManager.cc b/src/raft/RaftManager.cc index fffa78b4e9..f268c1dd31 100644 --- a/src/raft/RaftManager.cc +++ b/src/raft/RaftManager.cc @@ -74,7 +74,7 @@ RaftManager::RaftManager(int id, const VectorAttribute * leader_hook_mad, bsr << "bootstrap state"; - logdb->insert_log_record(-1, -1, bsr, 0); + logdb->insert_log_record(-1, -1, bsr, 0, -1); raft_state.replace("TERM", 0); raft_state.replace("VOTEDFOR", -1); @@ -1038,6 +1038,7 @@ int RaftManager::xmlrpc_replicate_log(int follower_id, LogDBRecord * lr, replica_params.add(xmlrpc_c::value_int(lr->term)); replica_params.add(xmlrpc_c::value_int(lr->prev_index)); replica_params.add(xmlrpc_c::value_int(lr->prev_term)); + replica_params.add(xmlrpc_c::value_int(lr->fed_index)); replica_params.add(xmlrpc_c::value_string(lr->sql)); // ------------------------------------------------------------------------- @@ -1176,8 +1177,6 @@ std::string& RaftManager::to_xml(std::string& raft_xml) Nebula& nd = Nebula::instance(); LogDB * logdb = nd.get_logdb(); - FedReplicaManager * frm = nd.get_frm(); - unsigned int lindex, lterm; std::ostringstream oss; @@ -1206,7 +1205,7 @@ std::string& RaftManager::to_xml(std::string& raft_xml) if ( nd.is_federation_enabled() ) { - oss << "" << frm->get_last_index() << ""; + oss << "" << logdb->last_federated() << ""; } else { diff --git a/src/raft/ReplicaThread.cc b/src/raft/ReplicaThread.cc index b3ec8049e5..b4084cd6fa 100644 --- a/src/raft/ReplicaThread.cc +++ b/src/raft/ReplicaThread.cc @@ -304,6 +304,7 @@ int HeartBeatThread::replicate() lr.sql = ""; lr.timestamp = 0; + lr.fed_index = -1; rc = raftm->xmlrpc_replicate_log(follower_id, &lr, success, fterm, error); diff --git a/src/rm/RequestManagerZone.cc b/src/rm/RequestManagerZone.cc index e5d885627c..8c054dfa89 100644 --- a/src/rm/RequestManagerZone.cc +++ b/src/rm/RequestManagerZone.cc @@ -273,8 +273,9 @@ void ZoneReplicateLog::request_execute(xmlrpc_c::paramList const& paramList, unsigned int term = xmlrpc_c::value_int(paramList.getInt(5)); unsigned int prev_index = xmlrpc_c::value_int(paramList.getInt(6)); unsigned int prev_term = xmlrpc_c::value_int(paramList.getInt(7)); + unsigned int fed_index = xmlrpc_c::value_int(paramList.getInt(8)); - string sql = xmlrpc_c::value_string(paramList.getString(8)); + string sql = xmlrpc_c::value_string(paramList.getString(9)); unsigned int current_term = raftm->get_term(); @@ -392,7 +393,7 @@ void ZoneReplicateLog::request_execute(xmlrpc_c::paramList const& paramList, ostringstream sql_oss(sql); - if ( logdb->insert_log_record(index, term, sql_oss, 0) != 0 ) + if ( logdb->insert_log_record(index, term, sql_oss, 0, fed_index) != 0 ) { att.resp_msg = "Error writing log record"; att.resp_id = current_term; @@ -518,7 +519,8 @@ void ZoneReplicateFedLog::request_execute(xmlrpc_c::paramList const& paramList, FedReplicaManager * frm = nd.get_frm(); int index = xmlrpc_c::value_int(paramList.getInt(1)); - string sql = xmlrpc_c::value_string(paramList.getString(2)); + int prev = xmlrpc_c::value_int(paramList.getInt(2)); + string sql = xmlrpc_c::value_string(paramList.getString(3)); if ( att.uid != 0 ) { @@ -554,7 +556,7 @@ void ZoneReplicateFedLog::request_execute(xmlrpc_c::paramList const& paramList, return; } - int rc = frm->apply_log_record(index, sql); + int rc = frm->apply_log_record(index, prev, sql); if ( rc == 0 ) { diff --git a/src/sql/LogDB.cc b/src/sql/LogDB.cc index 99895f09c9..13e474c079 100644 --- a/src/sql/LogDB.cc +++ b/src/sql/LogDB.cc @@ -25,11 +25,11 @@ const char * LogDB::table = "logdb"; -const char * LogDB::db_names = "log_index, term, sqlcmd, timestamp"; +const char * LogDB::db_names = "log_index, term, sqlcmd, timestamp, fed_index"; const char * LogDB::db_bootstrap = "CREATE TABLE IF NOT EXISTS " "logdb (log_index INTEGER PRIMARY KEY, term INTEGER, sqlcmd MEDIUMTEXT, " - "timestamp INTEGER)"; + "timestamp INTEGER, fed_index INTEGER)"; /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ @@ -37,7 +37,7 @@ const char * LogDB::db_bootstrap = "CREATE TABLE IF NOT EXISTS " int LogDBRecord::select_cb(void *nil, int num, char **values, char **names) { if ( !values || !values[0] || !values[1] || !values[2] || !values[3] || - !values[4] || !values[5] || num != 6 ) + !values[4] || !values[5] || !values[6] || num != 7 ) { return -1; } @@ -52,8 +52,10 @@ int LogDBRecord::select_cb(void *nil, int num, char **values, char **names) timestamp = static_cast(atoi(values[3])); - prev_index = static_cast(atoi(values[4])); - prev_term = static_cast(atoi(values[5])); + fed_index = static_cast(atoi(values[4])); + + prev_index = static_cast(atoi(values[5])); + prev_term = static_cast(atoi(values[6])); _sql = one_util::zlib_decompress(zsql, true); @@ -88,7 +90,7 @@ LogDB::LogDB(SqlDB * _db, bool _solo, unsigned int _lret):solo(_solo), db(_db), oss << time(0); - insert_log_record(0, 0, oss, time(0)); + insert_log_record(0, 0, oss, time(0), false); } setup_index(r, i); @@ -153,6 +155,8 @@ int LogDB::setup_index(int& _last_applied, int& _last_index) last_term = lr.term; } + build_federated_index(); + pthread_mutex_unlock(&mutex); return rc; @@ -175,7 +179,7 @@ int LogDB::get_log_record(unsigned int index, LogDBRecord& lr) lr.index = index + 1; oss << "SELECT c.log_index, c.term, c.sqlcmd," - << " c.timestamp, p.log_index, p.term" + << " c.timestamp, c.fed_index, p.log_index, p.term" << " FROM logdb c, logdb p WHERE c.log_index = " << index << " AND p.log_index = " << prev_index; @@ -255,7 +259,8 @@ int LogDB::update_raft_state(std::string& raft_xml) /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -int LogDB::insert(int index, int term, const std::string& sql, time_t tstamp) +int LogDB::insert(int index, int term, const std::string& sql, time_t tstamp, + int fed_index) { std::ostringstream oss; @@ -278,7 +283,8 @@ int LogDB::insert(int index, int term, const std::string& sql, time_t tstamp) } oss << "INSERT INTO " << table << " ("<< db_names <<") VALUES (" - << index << "," << term << "," << "'" << sql_db << "'," << tstamp<< ")"; + << index << "," << term << "," << "'" << sql_db << "'," << tstamp + << "," << fed_index << ")"; int rc = db->exec_wr(oss); @@ -336,13 +342,24 @@ int LogDB::apply_log_record(LogDBRecord * lr) /* -------------------------------------------------------------------------- */ int LogDB::insert_log_record(unsigned int term, std::ostringstream& sql, - time_t timestamp) + time_t timestamp, int fed_index) { pthread_mutex_lock(&mutex); unsigned int index = next_index; - if ( insert(index, term, sql.str(), timestamp) != 0 ) + int _fed_index; + + if ( fed_index == 0 ) + { + _fed_index = index; + } + else + { + _fed_index = fed_index; + } + + if ( insert(index, term, sql.str(), timestamp, _fed_index) != 0 ) { NebulaLog::log("DBM", Log::ERROR, "Cannot insert log record in DB"); @@ -357,6 +374,11 @@ int LogDB::insert_log_record(unsigned int term, std::ostringstream& sql, next_index++; + if ( fed_index != -1 ) + { + fed_log.insert(_fed_index); + } + pthread_mutex_unlock(&mutex); return index; @@ -366,13 +388,13 @@ int LogDB::insert_log_record(unsigned int term, std::ostringstream& sql, /* -------------------------------------------------------------------------- */ int LogDB::insert_log_record(unsigned int index, unsigned int term, - std::ostringstream& sql, time_t timestamp) + std::ostringstream& sql, time_t timestamp, int fed_index) { int rc; pthread_mutex_lock(&mutex); - rc = insert(index, term, sql.str(), timestamp); + rc = insert(index, term, sql.str(), timestamp, fed_index); if ( rc == 0 ) { @@ -384,6 +406,11 @@ int LogDB::insert_log_record(unsigned int index, unsigned int term, next_index = last_index + 1; } + + if ( fed_index != -1 ) + { + fed_log.insert(fed_index); + } } pthread_mutex_unlock(&mutex); @@ -394,7 +421,7 @@ int LogDB::insert_log_record(unsigned int index, unsigned int term, /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -int LogDB::exec_wr(ostringstream& cmd) +int LogDB::_exec_wr(ostringstream& cmd, int federated_index) { int rc; @@ -416,7 +443,7 @@ int LogDB::exec_wr(ostringstream& cmd) // ------------------------------------------------------------------------- // Insert log entry in the database and replicate on followers // ------------------------------------------------------------------------- - int rindex = insert_log_record(raftm->get_term(), cmd, 0); + int rindex = insert_log_record(raftm->get_term(), cmd, 0, federated_index); if ( rindex == -1 ) { @@ -544,6 +571,102 @@ int LogDB::purge_log() return rc; } +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ +int LogDB::index_cb(void *null, int num, char **values, char **names) +{ + if ( num == 0 || values == 0 || values[0] == 0 ) + { + return -1; + } + + fed_log.insert(atoi(values[0])); + + return 0; +} + +void LogDB::build_federated_index() +{ + std::ostringstream oss; + + fed_log.clear(); + + set_callback(static_cast(&LogDB::index_cb), 0); + + oss << "SELECT fed_index FROM " << table << " WHERE fed_index != -1 "; + + db->exec_rd(oss, this); + + unset_callback(); +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +int LogDB::last_federated() +{ + pthread_mutex_lock(&mutex); + + int findex = -1; + + if ( !fed_log.empty() ) + { + set::reverse_iterator rit; + + rit = fed_log.rbegin(); + + findex = *rit; + } + + pthread_mutex_unlock(&mutex); + + return findex; +} + +/* -------------------------------------------------------------------------- */ + +int LogDB::previous_federated(int i) +{ + set::iterator it; + + pthread_mutex_lock(&mutex); + + int findex = -1; + + it = fed_log.find(i); + + if ( it != fed_log.end() && it != fed_log.begin() ) + { + findex = *(--it); + } + + pthread_mutex_unlock(&mutex); + + return findex; +} + +/* -------------------------------------------------------------------------- */ + +int LogDB::next_federated(int i) +{ + set::iterator it; + + pthread_mutex_lock(&mutex); + + int findex = -1; + + it = fed_log.find(i); + + if ( it != fed_log.end() && it != --fed_log.end() ) + { + findex = *(++it); + } + + pthread_mutex_unlock(&mutex); + + return findex; +} + /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ @@ -551,7 +674,7 @@ int FedLogDB::exec_wr(ostringstream& cmd) { FedReplicaManager * frm = Nebula::instance().get_frm(); - int rc = _logdb->exec_wr(cmd); + int rc = _logdb->exec_federated_wr(cmd); if ( rc != 0 ) { From 215bc0dff76d55af11ce7b8d3db68edc27b6b33f Mon Sep 17 00:00:00 2001 From: "Ruben S. Montero" Date: Fri, 30 Jun 2017 11:48:23 +0200 Subject: [PATCH 07/21] F #4809: Enable federated of solo Zones --- src/sql/LogDB.cc | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/sql/LogDB.cc b/src/sql/LogDB.cc index 13e474c079..130463dfbb 100644 --- a/src/sql/LogDB.cc +++ b/src/sql/LogDB.cc @@ -90,7 +90,7 @@ LogDB::LogDB(SqlDB * _db, bool _solo, unsigned int _lret):solo(_solo), db(_db), oss << time(0); - insert_log_record(0, 0, oss, time(0), false); + insert_log_record(0, 0, oss, time(0), -1); } setup_index(r, i); @@ -432,7 +432,14 @@ int LogDB::_exec_wr(ostringstream& cmd, int federated_index) // ------------------------------------------------------------------------- if ( solo ) { - return db->exec_wr(cmd); + rc = db->exec_wr(cmd); + + if ( rc == 0 && Nebula::instance().is_federation_enabled() ) + { + insert_log_record(0, cmd, time(0), federated_index); + } + + return rc; } else if ( raftm == 0 || !raftm->is_leader() ) { From 7b22d8755b37cdab4efee76a51199866d8f56ed5 Mon Sep 17 00:00:00 2001 From: "Ruben S. Montero" Date: Fri, 30 Jun 2017 11:49:04 +0200 Subject: [PATCH 08/21] F #4809: Log information to debug federated zones with HA clusters. THIS COMMIT IS MEANT TO BE REVERTED --- src/raft/FedReplicaManager.cc | 35 ++++++++++++++++++++++++++++++++++- src/rm/RequestManagerZone.cc | 7 +++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/src/raft/FedReplicaManager.cc b/src/raft/FedReplicaManager.cc index ecb7f7f170..b2d5f7bbf5 100644 --- a/src/raft/FedReplicaManager.cc +++ b/src/raft/FedReplicaManager.cc @@ -290,11 +290,23 @@ int FedReplicaManager::get_next_record(int zone_id, std::string& zedp, zedp = zs->endpoint; +//LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL + std::ostringstream loss; + loss << "REPLICATE FOR ZONE: " << zone_id << " NEXT IS: " << zs->next + << "LAST IS: " << zs->last; + NebulaLog::log("ReM", Log::INFO, loss); +//LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL if ( zs->next == -1 ) { zs->next= logdb->last_federated(); } +//LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL + loss.str(""); + loss << "REPLICATE FOR ZONE: " << zone_id << " NEXT IS: " << zs->next + << "LAST IS: " << zs->last; + NebulaLog::log("ReM", Log::INFO, loss); +//LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL if ( zs->last == zs->next ) { pthread_mutex_unlock(&mutex); @@ -325,10 +337,19 @@ void FedReplicaManager::replicate_success(int zone_id) ZoneServers * zs = it->second; +//LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL + std::ostringstream loss; + loss << "SUCCESS REPLICATE FOR ZONE: " << zone_id << " NEXT: " << zs->next + << " LAST: " << zs->last; +//LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL zs->last = zs->next; zs->next = logdb->next_federated(zs->next); - +//LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL + loss.str(""); + loss << " NEW NEXT: " << zs->next << " NEW LAST: " << zs->last; + NebulaLog::log("ReM", Log::INFO, loss); +//LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL if ( zs->next != -1 ) { ReplicaManager::replicate(zone_id); @@ -356,6 +377,12 @@ void FedReplicaManager::replicate_failure(int zone_id, int last_zone) zs->next = logdb->next_federated(zs->last); } +//LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL + std::ostringstream loss; + loss << "FAILURE REPLICATE FOR ZONE: " << zone_id << " NEXT: " << zs->next + << "LAST: " << zs->last << " LAST_ZONE: " << last_zone; + NebulaLog::log("ReM", Log::INFO, loss); +//LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL if ( zs->next != -1 ) { ReplicaManager::replicate(zone_id); @@ -404,6 +431,12 @@ int FedReplicaManager::xmlrpc_replicate_log(int zone_id, bool& success, replica_params.add(xmlrpc_c::value_int(prev_index)); replica_params.add(xmlrpc_c::value_string(lr.sql)); +//LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL + std::ostringstream loss; + loss << "REPLICATING INDEX: " < Date: Fri, 30 Jun 2017 12:13:39 +0200 Subject: [PATCH 09/21] Updated restricted attributes --- share/etc/oned.conf | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/share/etc/oned.conf b/share/etc/oned.conf index be16a23ca8..bf71b55699 100644 --- a/share/etc/oned.conf +++ b/share/etc/oned.conf @@ -897,12 +897,12 @@ VM_RESTRICTED_ATTR = "CONTEXT/FILES" VM_RESTRICTED_ATTR = "NIC/MAC" VM_RESTRICTED_ATTR = "NIC/VLAN_ID" VM_RESTRICTED_ATTR = "NIC/BRIDGE" -VM_RESTRICTED_ATTR = "NIC/INBOUND_AVG_BW" -VM_RESTRICTED_ATTR = "NIC/INBOUND_PEAK_BW" -VM_RESTRICTED_ATTR = "NIC/INBOUND_PEAK_KB" -VM_RESTRICTED_ATTR = "NIC/OUTBOUND_AVG_BW" -VM_RESTRICTED_ATTR = "NIC/OUTBOUND_PEAK_BW" -VM_RESTRICTED_ATTR = "NIC/OUTBOUND_PEAK_KB" +#VM_RESTRICTED_ATTR = "NIC/INBOUND_AVG_BW" +#VM_RESTRICTED_ATTR = "NIC/INBOUND_PEAK_BW" +#VM_RESTRICTED_ATTR = "NIC/INBOUND_PEAK_KB" +#VM_RESTRICTED_ATTR = "NIC/OUTBOUND_AVG_BW" +#VM_RESTRICTED_ATTR = "NIC/OUTBOUND_PEAK_BW" +#VM_RESTRICTED_ATTR = "NIC/OUTBOUND_PEAK_KB" #VM_RESTRICTED_ATTR = "NIC/OPENNEBULA_MANAGED" #VM_RESTRICTED_ATTR = "NIC/VCENTER_INSTANCE_ID" #VM_RESTRICTED_ATTR = "NIC/VCENTER_NET_REF" @@ -910,12 +910,12 @@ VM_RESTRICTED_ATTR = "NIC/OUTBOUND_PEAK_KB" VM_RESTRICTED_ATTR = "NIC_DEFAULT/MAC" VM_RESTRICTED_ATTR = "NIC_DEFAULT/VLAN_ID" VM_RESTRICTED_ATTR = "NIC_DEFAULT/BRIDGE" -VM_RESTRICTED_ATTR = "DISK/TOTAL_BYTES_SEC" -VM_RESTRICTED_ATTR = "DISK/READ_BYTES_SEC" -VM_RESTRICTED_ATTR = "DISK/WRITE_BYTES_SEC" -VM_RESTRICTED_ATTR = "DISK/TOTAL_IOPS_SEC" -VM_RESTRICTED_ATTR = "DISK/READ_IOPS_SEC" -VM_RESTRICTED_ATTR = "DISK/WRITE_IOPS_SEC" +#VM_RESTRICTED_ATTR = "DISK/TOTAL_BYTES_SEC" +#VM_RESTRICTED_ATTR = "DISK/READ_BYTES_SEC" +#VM_RESTRICTED_ATTR = "DISK/WRITE_BYTES_SEC" +#VM_RESTRICTED_ATTR = "DISK/TOTAL_IOPS_SEC" +#VM_RESTRICTED_ATTR = "DISK/READ_IOPS_SEC" +#VM_RESTRICTED_ATTR = "DISK/WRITE_IOPS_SEC" #VM_RESTRICTED_ATTR = "DISK/OPENNEBULA_MANAGED" #VM_RESTRICTED_ATTR = "DISK/VCENTER_DS_REF" #VM_RESTRICTED_ATTR = "DISK/VCENTER_INSTANCE_ID" @@ -927,9 +927,9 @@ VM_RESTRICTED_ATTR = "MEMORY_COST" VM_RESTRICTED_ATTR = "DISK_COST" VM_RESTRICTED_ATTR = "PCI" VM_RESTRICTED_ATTR = "EMULATOR" -VM_RESTRICTED_ATTR = "USER_INPUTS/CPU" -VM_RESTRICTED_ATTR = "USER_INPUTS/MEMORY" -VM_RESTRICTED_ATTR = "USER_INPUTS/VCPU" +#VM_RESTRICTED_ATTR = "USER_INPUTS/CPU" +#VM_RESTRICTED_ATTR = "USER_INPUTS/MEMORY" +#VM_RESTRICTED_ATTR = "USER_INPUTS/VCPU" #VM_RESTRICTED_ATTR = "TEMPLATE/VCENTER_VM_FOLDER" #VM_RESTRICTED_ATTR = "RANK" From a90a4dc252bf21895db33ec894810895bfe2b883 Mon Sep 17 00:00:00 2001 From: Abel Coronado Date: Fri, 30 Jun 2017 12:49:55 +0200 Subject: [PATCH 10/21] disk_resize in RUNNING kvm VMs (#366) * Added VM.resize to RUNNING state * disk_resize in RUNNING kvm VMs * Changed resize for disk_resize --- src/sunstone/public/app/tabs/vms-tab/panels/storage.js | 10 ++++++---- .../public/app/tabs/vms-tab/utils/state-actions.js | 4 ++-- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/sunstone/public/app/tabs/vms-tab/panels/storage.js b/src/sunstone/public/app/tabs/vms-tab/panels/storage.js index 50af8ee91a..f4e5b96694 100644 --- a/src/sunstone/public/app/tabs/vms-tab/panels/storage.js +++ b/src/sunstone/public/app/tabs/vms-tab/panels/storage.js @@ -311,10 +311,12 @@ define(function(require) { } } - if (Config.isTabActionEnabled("vms-tab", "VM.resize")) { - if (StateActions.enabledStateAction("VM.resize", that.element.STATE, that.element.LCM_STATE)) { - actions += ('\ - '); + if (Config.isTabActionEnabled("vms-tab", "VM.disk_resize")) { + if (StateActions.enabledStateAction("VM.disk_resize", that.element.STATE, that.element.LCM_STATE)) { + if(that.element.LCM_STATE != "3" || that.element.HISTORY_RECORDS.HISTORY.VM_MAD != "vcenter"){ + actions += ('\ + '); + } } } } diff --git a/src/sunstone/public/app/tabs/vms-tab/utils/state-actions.js b/src/sunstone/public/app/tabs/vms-tab/utils/state-actions.js index c902559fac..a82045f623 100644 --- a/src/sunstone/public/app/tabs/vms-tab/utils/state-actions.js +++ b/src/sunstone/public/app/tabs/vms-tab/utils/state-actions.js @@ -44,7 +44,7 @@ define(function(require) { []; STATE_ACTIONS[OpenNebulaVM.STATES.POWEROFF] = - ["VM.resume", "VM.resize", "VM.attachdisk", "VM.detachdisk", "VM.attachnic", "VM.detachnic", "VM.disk_saveas", "VM.disk_snapshot_create", "VM.disk_snapshot_revert", "VM.disk_snapshot_delete", "VM.migrate", "VM.undeploy", "VM.undeploy_hard", "VM.save_as_template", "VM.updateconf", "VM.terminate_hard", "VM.recover"]; + ["VM.resume", "VM.resize", "VM.attachdisk", "VM.detachdisk", "VM.attachnic", "VM.detachnic", "VM.disk_saveas", "VM.disk_snapshot_create", "VM.disk_snapshot_revert", "VM.disk_snapshot_delete", "VM.migrate", "VM.undeploy", "VM.undeploy_hard", "VM.save_as_template", "VM.updateconf", "VM.terminate_hard", "VM.recover", "VM.disk_resize"]; STATE_ACTIONS[OpenNebulaVM.STATES.UNDEPLOYED] = ["VM.resume", "VM.resize", "VM.deploy", "VM.updateconf", "VM.terminate_hard", "VM.recover"]; @@ -60,7 +60,7 @@ define(function(require) { LCM_STATE_ACTIONS[ OpenNebulaVM.LCM_STATES.PROLOG ] = ["VM.updateconf"]; LCM_STATE_ACTIONS[ OpenNebulaVM.LCM_STATES.BOOT ] = []; LCM_STATE_ACTIONS[ OpenNebulaVM.LCM_STATES.RUNNING ] = - ["VM.stop", "VM.suspend", "VM.reboot", "VM.reboot_hard", "VM.resched", "VM.unresched", "VM.poweroff", "VM.poweroff_hard", "VM.undeploy", "VM.undeploy_hard", "VM.migrate", "VM.migrate_live", "VM.attachdisk", "VM.detachdisk", "VM.attachnic", "VM.detachnic", "VM.disk_saveas", "VM.disk_snapshot_create", "VM.disk_snapshot_delete", "VM.terminate", "VM.terminate_hard"]; + ["VM.stop", "VM.suspend", "VM.reboot", "VM.reboot_hard", "VM.resched", "VM.unresched", "VM.poweroff", "VM.poweroff_hard", "VM.undeploy", "VM.undeploy_hard", "VM.migrate", "VM.migrate_live", "VM.attachdisk", "VM.detachdisk", "VM.attachnic", "VM.detachnic", "VM.disk_saveas", "VM.disk_snapshot_create", "VM.disk_snapshot_delete", "VM.terminate", "VM.terminate_hard", "VM.disk_resize"]; LCM_STATE_ACTIONS[ OpenNebulaVM.LCM_STATES.MIGRATE ] = []; LCM_STATE_ACTIONS[ OpenNebulaVM.LCM_STATES.SAVE_STOP ] = []; LCM_STATE_ACTIONS[ OpenNebulaVM.LCM_STATES.SAVE_SUSPEND ] = []; From 25b48b1ec45776ee6d78fcc6adfc9da7a5bab472 Mon Sep 17 00:00:00 2001 From: "Ruben S. Montero" Date: Fri, 30 Jun 2017 14:56:27 +0200 Subject: [PATCH 11/21] F #4809: Update onedb backup federated backup utility --- src/onedb/onedb_backend.rb | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/onedb/onedb_backend.rb b/src/onedb/onedb_backend.rb index 1713d42255..8e16dd9401 100644 --- a/src/onedb/onedb_backend.rb +++ b/src/onedb/onedb_backend.rb @@ -30,7 +30,7 @@ end class OneDBBacKEnd FEDERATED_TABLES = %w(group_pool user_pool acl zone_pool vdc_pool - marketplace_pool marketplaceapp_pool fed_logdb) + marketplace_pool marketplaceapp_pool) def read_db_version connect_db @@ -262,6 +262,18 @@ class BackEndMySQL < OneDBBacKEnd raise "Unknown error running '#{cmd}'" end + if federated + cmd = "mysqldump -u #{@user} -p'#{@passwd}' -h #{@server} " << + "-P #{@port} #{@db_name} logdb --where=\"fed_index!=-1\" "<< + " >> #{bck_file}" + + rc = system(cmd) + + if !rc + raise "Unknown error running '#{cmd}'" + end + end + puts "MySQL dump stored in #{bck_file}" puts "Use 'onedb restore' or restore the DB using the mysql command:" puts "mysql -u user -h server -P port db_name < backup_file" From 3378c9a2f010e79a6e66eca0f1178681f9b79e0e Mon Sep 17 00:00:00 2001 From: "Ruben S. Montero" Date: Fri, 30 Jun 2017 16:08:11 +0200 Subject: [PATCH 12/21] Revert "F #4809: Log information to debug federated zones with HA clusters. THIS" This reverts commit fab2a07f74f55528631fa5b6159e80c1fa884637. --- src/raft/FedReplicaManager.cc | 35 +---------------------------------- src/rm/RequestManagerZone.cc | 7 ------- 2 files changed, 1 insertion(+), 41 deletions(-) diff --git a/src/raft/FedReplicaManager.cc b/src/raft/FedReplicaManager.cc index b2d5f7bbf5..ecb7f7f170 100644 --- a/src/raft/FedReplicaManager.cc +++ b/src/raft/FedReplicaManager.cc @@ -290,23 +290,11 @@ int FedReplicaManager::get_next_record(int zone_id, std::string& zedp, zedp = zs->endpoint; -//LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL - std::ostringstream loss; - loss << "REPLICATE FOR ZONE: " << zone_id << " NEXT IS: " << zs->next - << "LAST IS: " << zs->last; - NebulaLog::log("ReM", Log::INFO, loss); -//LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL if ( zs->next == -1 ) { zs->next= logdb->last_federated(); } -//LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL - loss.str(""); - loss << "REPLICATE FOR ZONE: " << zone_id << " NEXT IS: " << zs->next - << "LAST IS: " << zs->last; - NebulaLog::log("ReM", Log::INFO, loss); -//LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL if ( zs->last == zs->next ) { pthread_mutex_unlock(&mutex); @@ -337,19 +325,10 @@ void FedReplicaManager::replicate_success(int zone_id) ZoneServers * zs = it->second; -//LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL - std::ostringstream loss; - loss << "SUCCESS REPLICATE FOR ZONE: " << zone_id << " NEXT: " << zs->next - << " LAST: " << zs->last; -//LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL zs->last = zs->next; zs->next = logdb->next_federated(zs->next); -//LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL - loss.str(""); - loss << " NEW NEXT: " << zs->next << " NEW LAST: " << zs->last; - NebulaLog::log("ReM", Log::INFO, loss); -//LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL + if ( zs->next != -1 ) { ReplicaManager::replicate(zone_id); @@ -377,12 +356,6 @@ void FedReplicaManager::replicate_failure(int zone_id, int last_zone) zs->next = logdb->next_federated(zs->last); } -//LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL - std::ostringstream loss; - loss << "FAILURE REPLICATE FOR ZONE: " << zone_id << " NEXT: " << zs->next - << "LAST: " << zs->last << " LAST_ZONE: " << last_zone; - NebulaLog::log("ReM", Log::INFO, loss); -//LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL if ( zs->next != -1 ) { ReplicaManager::replicate(zone_id); @@ -431,12 +404,6 @@ int FedReplicaManager::xmlrpc_replicate_log(int zone_id, bool& success, replica_params.add(xmlrpc_c::value_int(prev_index)); replica_params.add(xmlrpc_c::value_string(lr.sql)); -//LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL - std::ostringstream loss; - loss << "REPLICATING INDEX: " < Date: Fri, 30 Jun 2017 17:02:59 +0200 Subject: [PATCH 13/21] F #4809: Pre-allocate lastoid to prevent stale id's in the pool in case of leader failure --- src/pool/PoolSQL.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/pool/PoolSQL.cc b/src/pool/PoolSQL.cc index 8730e64f6b..74f2db2451 100644 --- a/src/pool/PoolSQL.cc +++ b/src/pool/PoolSQL.cc @@ -137,6 +137,8 @@ int PoolSQL::allocate(PoolObjectSQL *objsql, string& error_str) objsql->oid = ++lastOID; + _set_lastOID(lastOID, db, table); + rc = objsql->insert(db, error_str); if ( rc != 0 ) @@ -151,9 +153,9 @@ int PoolSQL::allocate(PoolObjectSQL *objsql, string& error_str) delete objsql; - if( rc != -1 ) + if( rc == -1 ) { - _set_lastOID(lastOID, db, table); + _set_lastOID(--lastOID, db, table); } unlock(); From d923ef3603b763764a8d5f3dbafd171bbc073171 Mon Sep 17 00:00:00 2001 From: mcabrerizo Date: Fri, 30 Jun 2017 18:20:00 +0200 Subject: [PATCH 14/21] vCenter import templates and wild vms where two or more nics share the same port group --- .../public/app/tabs/hosts-tab/panels/wilds.js | 9 ++++- .../public/app/utils/vcenter/templates.js | 39 ++++++++++++++++++- .../lib/vcenter_driver/virtual_machine.rb | 26 ++++++++++--- 3 files changed, 64 insertions(+), 10 deletions(-) diff --git a/src/sunstone/public/app/tabs/hosts-tab/panels/wilds.js b/src/sunstone/public/app/tabs/hosts-tab/panels/wilds.js index a393b09ecf..6ffdbe1ff7 100644 --- a/src/sunstone/public/app/tabs/hosts-tab/panels/wilds.js +++ b/src/sunstone/public/app/tabs/hosts-tab/panels/wilds.js @@ -129,6 +129,7 @@ define(function(require) { var index = 0; var template = ""; var rollback = []; + var duplicated_nics = {}; function getNext() { @@ -200,7 +201,6 @@ define(function(require) { } if (disks_and_nets[index].type === "EXISTING_DISK") { - template += disks_and_nets[index].image_tmpl; ++index; getNext(); } @@ -222,6 +222,8 @@ define(function(require) { var network_id = response.VNET.ID; if (one_cluster_id != -1) { Sunstone.runAction("Cluster.addvnet",one_cluster_id,response.VNET.ID); + // Remove vnet from cluster default 0 + Sunstone.runAction("Cluster.delvnet",0,response.VNET.ID); } ++index; var rollback_info = { type: "NETWORK", id: network_id}; @@ -241,11 +243,14 @@ define(function(require) { } if (disks_and_nets[index].type == "EXISTING_NIC") { - template += disks_and_nets[index].network_tmpl; ++index; getNext(); } + if (disks_and_nets[index].type === "DUPLICATED_NIC") { + ++index; + getNext(); + } } } getNext(); diff --git a/src/sunstone/public/app/utils/vcenter/templates.js b/src/sunstone/public/app/utils/vcenter/templates.js index a8e90f0796..b3eded515e 100644 --- a/src/sunstone/public/app/utils/vcenter/templates.js +++ b/src/sunstone/public/app/utils/vcenter/templates.js @@ -245,6 +245,7 @@ define(function(require) { var index = 0; var template = ""; var rollback = []; + var duplicated_nics = {}; function getNext() { @@ -263,7 +264,7 @@ define(function(require) { } else { - if (disks_and_nets[index].type === "NEW_DISK") { + if (disks_and_nets[index].type === "NEW_DISK") { var image_json = { "image": { @@ -281,7 +282,6 @@ define(function(require) { ++index; template += "DISK=[\n"; template += "IMAGE_ID=\"" + image_id + "\",\n"; - template += "IMAGE_UNAME=\"" + image_uname + "\",\n"; template += "OPENNEBULA_MANAGED=\"NO\"\n"; template += "]\n"; @@ -326,12 +326,17 @@ define(function(require) { var network_id = response.VNET.ID; if (one_cluster_id != -1) { Sunstone.runAction("Cluster.addvnet",one_cluster_id,response.VNET.ID); + //Remove bnet from default datastore + Sunstone.runAction("Cluster.delvnet",0,response.VNET.ID); } + duplicated_nics[disks_and_nets[index].network_name]=network_id; + ++index; template += "NIC=[\n"; template += "NETWORK_ID=\"" + network_id + "\",\n"; template += "OPENNEBULA_MANAGED=\"NO\"\n"; template += "]\n"; + var rollback_info = { type: "NETWORK", id: network_id}; rollback.push(rollback_info); getNext(); @@ -354,6 +359,18 @@ define(function(require) { getNext(); } + if (disks_and_nets[index].type === "DUPLICATED_NIC") { + var network_id = duplicated_nics[disks_and_nets[index].network_name]; + + template += "NIC=[\n"; + template += "NETWORK_ID=\"" + network_id + "\",\n"; + template += "OPENNEBULA_MANAGED=\"NO\"\n"; + template += "]\n"; + ++index; + getNext(); + } + + } } getNext(); @@ -445,6 +462,24 @@ define(function(require) { message : OpenNebulaError(response).error.message }); Notifier.onError({}, OpenNebulaError(response)); + + // Remove template - Rollback + var path = '/vcenter/template_rollback/' + template_id; + $.ajax({ + url: path, + type: "POST", + data: {timeout: false}, + dataType: "json", + success: function(response){ + // Do nothing + }, + error: function(response){ + VCenterCommon.importFailure({ + context : row_context, + message : Locale.tr("Could not delete the template " + template_id + " due to " + OpenNebulaError(response).error.message + ". Please remote it manually before importing this template again.") + }); + } + }); } }); }, diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb b/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb index fbafd0d890..b7b40a3571 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb @@ -295,7 +295,6 @@ class Template one_i.info disk_info << "DISK=[\n" disk_info << "IMAGE_ID=\"#{one_i["ID"]}\",\n" - disk_info << "IMAGE_UNAME=\"#{one_i["UNAME"]}\",\n" disk_info << "OPENNEBULA_MANAGED=\"NO\"\n" disk_info << "]\n" end @@ -344,6 +343,9 @@ class Template # Track allocated networks for rollback allocated_networks = [] + # Track port groups duplicated in this VM + duplicated_networks = [] + vc_nics.each do |nic| # Check if the network already exists network_found = VCenterDriver::Network.get_unmanaged_vnet_by_ref(nic[:net_ref], @@ -423,11 +425,20 @@ class Template one_vnet[:one] << ar_str if sunstone - sunstone_nic = {} - sunstone_nic[:type] = "NEW_NIC" - sunstone_nic[:network_tmpl] = one_vnet[:one] - sunstone_nic[:one_cluster_id] = cluster_id.to_i - sunstone_nic_info << sunstone_nic + if !duplicated_networks.include?(nic[:net_name]) + sunstone_nic = {} + sunstone_nic[:type] = "NEW_NIC" + sunstone_nic[:network_name] = nic[:net_name] + sunstone_nic[:network_tmpl] = one_vnet[:one] + sunstone_nic[:one_cluster_id] = cluster_id.to_i + sunstone_nic_info << sunstone_nic + duplicated_networks << nic[:net_name] + else + sunstone_nic = {} + sunstone_nic[:type] = "DUPLICATED_NIC" + sunstone_nic[:network_name] = nic[:net_name] + sunstone_nic_info << sunstone_nic + end else # Allocate the Virtual Network allocated_networks << one_vn @@ -444,6 +455,9 @@ class Template nic_info << "NETWORK_ID=\"#{one_vn["ID"]}\",\n" nic_info << "OPENNEBULA_MANAGED=\"NO\"\n" nic_info << "]\n" + + # Refresh npool + npool.info_all end end end From ae853eaebc54b5cef801802a524a317854a2ce17 Mon Sep 17 00:00:00 2001 From: "Ruben S. Montero" Date: Fri, 30 Jun 2017 18:57:02 +0200 Subject: [PATCH 15/21] Add a system.sqlquery API call to execute SELECT queries on the DB. It returns the row results --- include/RequestManagerSystem.h | 41 +++++++++++++++ src/oca/ruby/opennebula/system.rb | 19 ++++++- src/rm/RequestManager.cc | 2 + src/rm/RequestManagerSystem.cc | 87 +++++++++++++++++++++++++++++++ 4 files changed, 148 insertions(+), 1 deletion(-) diff --git a/include/RequestManagerSystem.h b/include/RequestManagerSystem.h index 697221acac..08f55a4c38 100644 --- a/include/RequestManagerSystem.h +++ b/include/RequestManagerSystem.h @@ -97,6 +97,47 @@ public: /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ +class SystemSqlQuery: public RequestManagerSystem +{ +public: + SystemSqlQuery():RequestManagerSystem("one.system.sqlquery", + "Executes SQL queries on the DB backend","A:ss") + { + auth_op = AuthRequest::ADMIN; + }; + + ~SystemSqlQuery(){}; + + void request_execute(xmlrpc_c::paramList const& _paramList, + RequestAttributes& att); +private: + + class select_cb : public Callbackable + { + public: + void set_callback() + { + oss.str(""); + + Callbackable::set_callback( + static_cast(&select_cb::callback)); + } + + std::string get_result() + { + return oss.str(); + } + + virtual int callback(void *nil, int num, char **values, char **names); + + private: + std::ostringstream oss; + }; +}; + +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + class UserQuotaInfo : public RequestManagerSystem { public: diff --git a/src/oca/ruby/opennebula/system.rb b/src/oca/ruby/opennebula/system.rb index 7a5c9ac104..45e77d05d0 100644 --- a/src/oca/ruby/opennebula/system.rb +++ b/src/oca/ruby/opennebula/system.rb @@ -30,7 +30,8 @@ module OpenNebula :groupquotaupdate => "groupquota.update", :version => "system.version", :config => "system.config", - :sql => "system.sql" + :sql => "system.sql", + :sqlquery => "system.sqlquery" } ####################################################################### @@ -57,6 +58,22 @@ module OpenNebula return @client.call(SYSTEM_METHODS[:sql], sql, federate) end + # Executes a SQL query command on OpenNebula DB + # @param [String] Sql string + # @return [String, OpenNebula::Error] Sql execution result in XML + # format in case of success, Error otherwise + # + # the query sent to oned + # + # + # + # column_value + # ... + # + # + def sql_query_command(sql) + return @client.call(SYSTEM_METHODS[:sqlquery], sql) + end # # Gets the oned version # diff --git a/src/rm/RequestManager.cc b/src/rm/RequestManager.cc index 202e84352e..da6cdab644 100644 --- a/src/rm/RequestManager.cc +++ b/src/rm/RequestManager.cc @@ -447,6 +447,7 @@ void RequestManager::register_xml_methods() xmlrpc_c::methodPtr system_version(new SystemVersion()); xmlrpc_c::methodPtr system_config(new SystemConfig()); xmlrpc_c::methodPtr system_sql(new SystemSql()); + xmlrpc_c::methodPtr system_sqlquery(new SystemSqlQuery()); // Rename Methods xmlrpc_c::methodPtr vm_rename(new VirtualMachineRename()); @@ -1065,6 +1066,7 @@ void RequestManager::register_xml_methods() RequestManagerRegistry.addMethod("one.system.version", system_version); RequestManagerRegistry.addMethod("one.system.config", system_config); RequestManagerRegistry.addMethod("one.system.sql", system_sql); + RequestManagerRegistry.addMethod("one.system.sqlquery", system_sqlquery); }; /* -------------------------------------------------------------------------- */ diff --git a/src/rm/RequestManagerSystem.cc b/src/rm/RequestManagerSystem.cc index 87bb51e5f0..7a958c78ae 100644 --- a/src/rm/RequestManagerSystem.cc +++ b/src/rm/RequestManagerSystem.cc @@ -114,6 +114,93 @@ void SystemSql::request_execute(xmlrpc_c::paramList const& paramList, /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ +int SystemSqlQuery::select_cb::callback(void *nil, int num, char **values, + char **names) +{ + oss << ""; + + for ( int i = 0 ; i < num ; ++i ) + { + if (values[i] != 0 && values[i][0] == '<') + { + std::string val(values[i]); + std::string * val64 = one_util::base64_encode(val); + + if ( val64 != 0 ) + { + oss << "<" << names[i] << "64>" + << "" + << ""; + + delete val64; + } + } + else + { + oss << "<" << names[i] << ">" + << "" + << ""; + } + } + + oss << ""; + + return 0; +} + +/* ------------------------------------------------------------------------- */ + +void SystemSqlQuery::request_execute(xmlrpc_c::paramList const& paramList, + RequestAttributes& att) +{ + std::string sql = xmlrpc_c::value_string(paramList.getString(1)); + + Nebula& nd = Nebula::instance(); + LogDB * logdb = nd.get_logdb(); + + SystemSqlQuery::select_cb cb; + + std::ostringstream oss(sql); + + std::string result; + + if ( att.uid != 0 ) + { + att.resp_id = -1; + + failure_response(AUTHORIZATION, att); + return; + } + + + cb.set_callback(); + + int rc = logdb->exec_rd(oss, &cb); + + result = cb.get_result(); + + cb.unset_callback(); + + if ( rc == 0 ) + { + oss.str(""); + + oss << "" + << "" << result << ""; + + success_response(oss.str(), att); + } + else + { + att.resp_id = rc; + failure_response(ACTION, att); + } + + return; +} +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + void UserQuotaInfo::request_execute(xmlrpc_c::paramList const& paramList, RequestAttributes& att) { From ef25a75478be73c01a80bc387a50b557f2d287d3 Mon Sep 17 00:00:00 2001 From: Anton Todorov Date: Fri, 30 Jun 2017 23:18:40 +0300 Subject: [PATCH 16/21] xpath.rb fix crash when querying empty-element tag by returning empty string. Handling both `` and `` test pattern: ``` echo -e "\nb\n\ne\n"| /var/lib/one/remotes/datastore/xpath.rb /A/B /A/C /A/D /A/E | while IFS= read -r -d '' e; do echo "'$e'"; done ``` --- src/datastore_mad/remotes/xpath.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/datastore_mad/remotes/xpath.rb b/src/datastore_mad/remotes/xpath.rb index 2997825a89..4fa0ad96de 100755 --- a/src/datastore_mad/remotes/xpath.rb +++ b/src/datastore_mad/remotes/xpath.rb @@ -66,9 +66,9 @@ ARGV.each do |xpath| element = xml.elements[xpath.dup] if !element.nil? if element.class.method_defined?(:text) - values << element.text + values << ( element.text || '' ) else - values << element.to_s + values << ( element.to_s || '' ) end end end From 89c5ae46da9fe3a24a5ab02b98dcbdd28f02f6bf Mon Sep 17 00:00:00 2001 From: "Ruben S. Montero" Date: Tue, 4 Jul 2017 12:03:19 +0200 Subject: [PATCH 17/21] B #5204: Wrong restricted attribute for VMs after a resize+chown operation --- share/etc/oned.conf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/share/etc/oned.conf b/share/etc/oned.conf index bf71b55699..5f8fa873c9 100644 --- a/share/etc/oned.conf +++ b/share/etc/oned.conf @@ -920,8 +920,8 @@ VM_RESTRICTED_ATTR = "NIC_DEFAULT/BRIDGE" #VM_RESTRICTED_ATTR = "DISK/VCENTER_DS_REF" #VM_RESTRICTED_ATTR = "DISK/VCENTER_INSTANCE_ID" #VM_RESTRICTED_ATTR = "DISK/SIZE" -VM_RESTRICTED_ATTR = "DISK/ORIGINAL_SIZE" -VM_RESTRICTED_ATTR = "DISK/SIZE_PREV" +#VM_RESTRICTED_ATTR = "DISK/ORIGINAL_SIZE" +#VM_RESTRICTED_ATTR = "DISK/SIZE_PREV" VM_RESTRICTED_ATTR = "CPU_COST" VM_RESTRICTED_ATTR = "MEMORY_COST" VM_RESTRICTED_ATTR = "DISK_COST" From 031da2d3781ed83839e1da7208e10c645d3e6c5e Mon Sep 17 00:00:00 2001 From: "Ruben S. Montero" Date: Tue, 4 Jul 2017 12:42:29 +0200 Subject: [PATCH 18/21] F #4809: Update migrator. There is no longer need to add servers to a zone to configure a federation if not using HA --- src/onedb/database_schema.rb | 1 - src/onedb/local/4.90.0_to_5.3.80.rb | 36 ----------------------------- 2 files changed, 37 deletions(-) diff --git a/src/onedb/database_schema.rb b/src/onedb/database_schema.rb index 0e76e31b89..7ca585d17a 100644 --- a/src/onedb/database_schema.rb +++ b/src/onedb/database_schema.rb @@ -58,7 +58,6 @@ class OneDBBacKEnd "owner_u INTEGER, group_u INTEGER, other_u INTEGER", logdb: "log_index INTEGER PRIMARY KEY, term INTEGER, " << "sqlcmd MEDIUMTEXT, timestamp INTEGER", - fed_logdb: "log_index INTEGER PRIMARY KEY, sqlcmd MEDIUMTEXT", history: "vid INTEGER, seq INTEGER, body MEDIUMTEXT, " << "stime INTEGER, etime INTEGER, PRIMARY KEY(vid,seq)", zone_pool: "oid INTEGER PRIMARY KEY, name VARCHAR(128), " << diff --git a/src/onedb/local/4.90.0_to_5.3.80.rb b/src/onedb/local/4.90.0_to_5.3.80.rb index 2cdab4d331..a84cb2d4c7 100644 --- a/src/onedb/local/4.90.0_to_5.3.80.rb +++ b/src/onedb/local/4.90.0_to_5.3.80.rb @@ -261,41 +261,5 @@ module Migrator ############################################################################ def feature_4809 create_table(:logdb) - create_table(:fed_logdb) - - @db.run "ALTER TABLE zone_pool RENAME TO old_zone_pool;" - create_table(:zone_pool) - - @db.transaction do - @db.fetch("SELECT * FROM old_zone_pool") do |row| - doc = Nokogiri::XML(row[:body], nil, NOKOGIRI_ENCODING) { |c| - c.default_xml.noblanks - } - - zedp = xpath(doc, "TEMPLATE/ENDPOINT") - - server_pool = doc.create_element "SERVER_POOL" - server = doc.create_element "SERVER" - - id = doc.create_element "ID", 0 - name = doc.create_element "NAME", "zone_server" - edp = doc.create_element "ENDPOINT", zedp - - server.add_child(id) - server.add_child(name) - server.add_child(edp) - - server_pool.add_child(server) - - doc.root.add_child(server_pool) - - row[:body] = doc.root.to_s - - @db[:zone_pool].insert(row) - end - end - - @db.run "DROP TABLE old_zone_pool;" - end end From e92bd4375338550eef5c822bc4caac94e10101d7 Mon Sep 17 00:00:00 2001 From: "Ruben S. Montero" Date: Tue, 4 Jul 2017 16:19:51 +0200 Subject: [PATCH 19/21] F #4894: Terminate action when a VM is in a failure state invokes delete action --- src/dm/DispatchManagerActions.cc | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/dm/DispatchManagerActions.cc b/src/dm/DispatchManagerActions.cc index 09412b0f6b..d4ce82ee0b 100644 --- a/src/dm/DispatchManagerActions.cc +++ b/src/dm/DispatchManagerActions.cc @@ -322,6 +322,23 @@ int DispatchManager::terminate(int vid, bool hard, const RequestAttributes& ra, } break; + case VirtualMachine::BOOT_FAILURE: + case VirtualMachine::BOOT_MIGRATE_FAILURE: + case VirtualMachine::PROLOG_MIGRATE_FAILURE: + case VirtualMachine::PROLOG_FAILURE: + case VirtualMachine::EPILOG_FAILURE: + case VirtualMachine::EPILOG_STOP_FAILURE: + case VirtualMachine::EPILOG_UNDEPLOY_FAILURE: + case VirtualMachine::PROLOG_MIGRATE_POWEROFF_FAILURE: + case VirtualMachine::PROLOG_MIGRATE_SUSPEND_FAILURE: + case VirtualMachine::BOOT_UNDEPLOY_FAILURE: + case VirtualMachine::BOOT_STOPPED_FAILURE: + case VirtualMachine::PROLOG_RESUME_FAILURE: + case VirtualMachine::PROLOG_UNDEPLOY_FAILURE: + case VirtualMachine::PROLOG_MIGRATE_UNKNOWN_FAILURE: + lcm->trigger(LCMAction::DELETE, vid, ra); + break; + default: oss.str(""); oss << "Could not terminate VM " << vid From 0fa579fd46538f666eb422d345aab573d1a2371e Mon Sep 17 00:00:00 2001 From: "Ruben S. Montero" Date: Tue, 4 Jul 2017 18:01:13 +0200 Subject: [PATCH 20/21] Disks not managed (TM) by opennebula cannot change persistent state --- include/Image.h | 12 ++++++++++++ include/VirtualMachineDisk.h | 12 ++++++++++++ src/rm/RequestManagerClone.cc | 17 ++++++++++------- src/rm/RequestManagerImage.cc | 9 +++++++++ 4 files changed, 43 insertions(+), 7 deletions(-) diff --git a/include/Image.h b/include/Image.h index ecbeadf6e4..6d3721cc22 100644 --- a/include/Image.h +++ b/include/Image.h @@ -191,6 +191,18 @@ public: return (persistent_img == 1); }; + bool is_managed() const + { + bool one_managed; + + if (get_template_attribute("OPENNEBULA_MANAGED", one_managed) == false) + { + one_managed = true; + } + + return one_managed; + } + /** * Check the PERSISTENT attribute in an image Template, if not set the * DEFAULT_IMAGE_PERSISTENT and DEFAULT_IMAGE_PERSISTENT_NEW are check in diff --git a/include/VirtualMachineDisk.h b/include/VirtualMachineDisk.h index 87194cfa3f..9494dbb5f0 100644 --- a/include/VirtualMachineDisk.h +++ b/include/VirtualMachineDisk.h @@ -52,6 +52,18 @@ public: return is_flag("PERSISTENT"); } + bool is_managed() const + { + bool one_managed; + + if (vector_value("OPENNEBULA_MANAGED", one_managed) == -1) + { + one_managed = true; + } + + return one_managed; + } + void set_attach() { set_flag("ATTACH"); diff --git a/src/rm/RequestManagerClone.cc b/src/rm/RequestManagerClone.cc index 90b3db1728..105f2aedaa 100644 --- a/src/rm/RequestManagerClone.cc +++ b/src/rm/RequestManagerClone.cc @@ -198,17 +198,20 @@ Request::ErrorCode VMTemplateClone::clone(int source_id, const string &name, goto error_images; } - ec = img_persistent.request_execute(new_img_id, true, img_att); - - if (ec != SUCCESS) + if ( (*disk)->is_managed() ) { - NebulaLog::log("ReM", Log::ERROR, failure_message(ec, img_att)); + ec = img_persistent.request_execute(new_img_id, true, img_att); - img_delete.request_execute(img_id, img_att); + if (ec != SUCCESS) + { + NebulaLog::log("ReM",Log::ERROR,failure_message(ec,img_att)); - att.resp_msg = "Failed to clone images: " + img_att.resp_msg; + img_delete.request_execute(img_id, img_att); - goto error_images; + att.resp_msg = "Failed to clone images: " + img_att.resp_msg; + + goto error_images; + } } (*disk)->remove("IMAGE"); diff --git a/src/rm/RequestManagerImage.cc b/src/rm/RequestManagerImage.cc index 99ff2776ae..c0d613d57c 100644 --- a/src/rm/RequestManagerImage.cc +++ b/src/rm/RequestManagerImage.cc @@ -118,6 +118,15 @@ Request::ErrorCode ImagePersistent::request_execute( ds_id = image->get_ds_id(); + if ( !image->is_managed() ) + { + att.resp_msg = "Cannot change persistent state for non-managed images"; + + image->unlock(); + + return ACTION; + } + image->unlock(); ds = dspool->get(ds_id, true); From dde9cf317d70ee51f95796c536604f146d62bf8a Mon Sep 17 00:00:00 2001 From: "Ruben S. Montero" Date: Tue, 4 Jul 2017 18:42:51 +0200 Subject: [PATCH 21/21] F #5208: push down ceph_key to downloader to import rbd in marketplace --- src/datastore_mad/remotes/ceph/export | 18 ++++++------------ src/datastore_mad/remotes/downloader.sh | 6 ++++++ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/datastore_mad/remotes/ceph/export b/src/datastore_mad/remotes/ceph/export index 51b969fef5..7d6dfc9426 100755 --- a/src/datastore_mad/remotes/ceph/export +++ b/src/datastore_mad/remotes/ceph/export @@ -77,29 +77,23 @@ if [ -z "$DST_HOST" ]; then fi IMPORT_SOURCE="rbd://$DST_HOST/$SRC" +IS_JOIN="?" if [ -n "$CEPH_USER" ]; then RBD="$RBD --id ${CEPH_USER}" - IMPORT_SOURCE="${IMPORT_SOURCE}?CEPH_USER=${CEPH_USER}" + IMPORT_SOURCE="${IMPORT_SOURCE}${IS_JOIN}CEPH_USER=${CEPH_USER}" + IS_JOIN="&" fi if [ -n "$CEPH_KEY" ]; then RBD="$RBD --keyfile ${CEPH_USER}" - IMPORT_SOURCE="${IMPORT_SOURCE}?CEPH_USER=${CEPH_USER}?CEPH_KEY=${CEPH_KEY}?" + IMPORT_SOURCE="${IMPORT_SOURCE}${IS_JOIN}CEPH_KEY=${CEPH_KEY}" + IS_JOIN="&" fi if [ -n "$CEPH_CONF" ]; then RBD="$RBD --conf ${CEPH_CONF}" - - if [ -n "$CEPH_USER" ]; then - IMPORT_SOURCE="${IMPORT_SOURCE}&" - elif [ -n "$CEPH_KEY" ]; then - IMPORT_SOURCE="${IMPORT_SOURCE}&" - else - IMPORT_SOURCE="${IMPORT_SOURCE}?" - fi - - IMPORT_SOURCE="${IMPORT_SOURCE}CEPH_CONF=${CEPH_CONF}" + IMPORT_SOURCE="${IMPORT_SOURCE}${IS_JOIN}CEPH_CONF=${CEPH_CONF}" fi #------------------------------------------------------------------------------- diff --git a/src/datastore_mad/remotes/downloader.sh b/src/datastore_mad/remotes/downloader.sh index 96cc7202df..c2b33c164f 100755 --- a/src/datastore_mad/remotes/downloader.sh +++ b/src/datastore_mad/remotes/downloader.sh @@ -182,6 +182,7 @@ function get_rbd_cmd SOURCE \ PARAM_DS \ PARAM_CEPH_USER \ + PARAM_CEPH_KEY \ PARAM_CEPH_CONF) USER="${URL_ELEMENTS[j++]}" @@ -189,6 +190,7 @@ function get_rbd_cmd SOURCE="${URL_ELEMENTS[j++]}" DS="${URL_ELEMENTS[j++]}" CEPH_USER="${URL_ELEMENTS[j++]}" + CEPH_KEY="${URL_ELEMENTS[j++]}" CEPH_CONF="${URL_ELEMENTS[j++]}" # Remove leading '/' @@ -202,6 +204,10 @@ function get_rbd_cmd RBD="$RBD --id ${CEPH_USER}" fi + if [ -n "$CEPH_KEY" ]; then + RBD="$RBD --keyfile ${CEPH_KEY}" + fi + if [ -n "$CEPH_CONF" ]; then RBD="$RBD --conf ${CEPH_CONF}" fi