1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-03-21 14:50:08 +03:00

Merge branch 'master' into f-3951

This commit is contained in:
Tino Vazquez 2021-01-19 18:09:51 +01:00
commit 14902b9766
No known key found for this signature in database
GPG Key ID: 14201E424D02047E
18 changed files with 196 additions and 60 deletions

View File

@ -21,6 +21,11 @@ inputs:
description: 'Number of AWS instances to create'
default: '1'
- name: 'number_public_ips'
type: text
description: 'Number of public IPs to get'
default: '1'
- name: 'aws_ami_image'
type: text
description: "AWS ami image used for host deployments"

View File

@ -19,10 +19,11 @@ networks:
- name: "${provision}-public"
vn_mad: 'elastic'
bridge: 'br0'
provision:
count: "${input.number_public_ips}"
ar:
- provison_id: "${provision_id}"
size: '1'
packet_ip_type: 'public_ipv4'
ipam_mad: 'aws'
cidr: "${cluster.0.cidr}"

View File

@ -73,6 +73,6 @@ cluster:
reserved_cpu: '0'
reserved_mem: '0'
datastores:
- 0
- 1
provision:
cidr: '10.0.0.0/16'

View File

@ -21,6 +21,11 @@ inputs:
description: "Number of metal servers to create"
default: '1'
- name: 'number_public_ips'
type: text
description: 'Number of public IPs to get'
default: '1'
- name: 'packet_plan'
type: text
description: "Packet plan (device type)"

View File

@ -19,6 +19,8 @@ networks:
- name: "${provision}-public"
vn_mad: 'elastic'
bridge: 'br0'
provision:
count: "${input.number_public_ips}"
ar:
- provison_id: "${provision_id}"
size: '1'

View File

@ -70,6 +70,6 @@ cluster:
name: "${provision}"
description: 'Packet cluster'
datastores:
- 0
- 1
reserved_cpu: '0'
reserved_mem: '0'

View File

@ -21,6 +21,7 @@ require 'uri'
require 'json'
require 'base64'
require 'rexml/document'
require 'securerandom'
# OpenNebula MarketPlace
class OneMarket
@ -111,14 +112,12 @@ class OneMarket
def render_vmtemplate(tmpl, app)
print_var(tmpl, 'SIZE', 0)
print_var(tmpl, 'MD5', app['md5'])
if app['disks']
app['disks'].each do |disk|
dname = disk.keys.first
app['disks'].each do |app_name|
tmpl << <<-EOT.strip
DISK = [ NAME = "#{dname}", APP="#{disk[dname]}" ]
DISK = [ NAME = "#{SecureRandom.hex[0..9]}",
APP="#{app_name}" ]
EOT
end
end
@ -137,14 +136,11 @@ class OneMarket
def render_service_template(tmpl, app)
print_var(tmpl, 'SIZE', 0)
print_var(tmpl, 'MD5', app['md5'])
if app['roles']
app['roles'].each do |role|
rname = role.keys.first
app['roles'].each do |role_name, app_name|
tmpl << <<-EOT.strip
ROLE = [ NAME = "#{rname}", APP="#{role[rname]}" ]
ROLE = [ NAME = "#{role_name}", APP="#{app_name}" ]
EOT
end
end

View File

@ -73,6 +73,8 @@ module OpenNebula::MarketPlaceAppExt
when 'IMAGE'
export_image(options)
when 'VMTEMPLATE'
options[:notemplate] = true
export_vm_template(options)
when 'SERVICE_TEMPLATE'
export_service_template(options)
@ -328,10 +330,12 @@ module OpenNebula::MarketPlaceAppExt
tmpl['roles'].each do |role|
t_id = roles.find {|_, v| v[:names].include?(role['name']) }
next if t_id.nil? || t_id[1].nil? || t_id[1][:vmtemplate]
if t_id.nil? || t_id[1].nil? || t_id[1][:vmtemplate].nil?
next
end
role['vm_template'] = nil
role['vm_template'] = t_id[1][:vmtemplate]
role['vm_template'] = t_id[1][:vmtemplate][0]
end
# --------------------------------------------------------------
@ -387,8 +391,9 @@ module OpenNebula::MarketPlaceAppExt
obj.extend(MarketPlaceAppExt)
rc = obj.export(
:dsid => options[:dsid],
:name => "#{options[:name]}-#{idx}"
:dsid => options[:dsid],
:name => "#{options[:name]}-#{idx}",
:notemplate => options[:notemplate]
)
image = rc[:image].first if rc[:image]

View File

@ -574,8 +574,10 @@ module OneProvision
cfg[r].each do |x|
Driver.retry_loop('Failed to create some resources',
self) do
x['provision'] = { 'id' => @id }
obj = Resource.object(r, nil, x)
x['provision'] ||= {}
x['provision'].merge!({ 'id' => @id })
obj = Resource.object(r, nil, x)
next if obj.nil?

View File

@ -32,6 +32,21 @@ module OneProvision
@type = 'network'
end
# Creates the object in OpenNebula
#
# @param cluster_id [Integer] Cluster ID
#
# @return [Integer] Resource ID
def create(cluster_id)
if @p_template['provision']['count'] && @p_template['ar']
(Integer(@p_template['provision']['count']) - 1).times do
@p_template['ar'] << @p_template['ar'][0]
end
end
super
end
# Info an specific object
#
# @param id [String] Object ID

View File

@ -400,13 +400,13 @@ void RaftManager::leader()
heartbeat_manager.replicate();
clock_gettime(CLOCK_REALTIME, &last_heartbeat);
auto im = nd.get_im();
im->raft_status(state);
}
aclm->reload_rules();
auto im = nd.get_im();
im->raft_status(state);
if ( nd.is_federation_master() )
{
frm->start_replica_threads();

View File

@ -110,11 +110,13 @@ REPLICA_STORAGE_IP=$(awk 'gsub(/[\0]/, x)' \
# ------------------------------------------------------------------------------
# Synchronize Image Datastore in the Replica Host. Use a recovery snapshot
# if present in the RECOVERY_SNAPS_DIR
# (recovery snap existence means recreate is running, VMIDs is reused)
# ------------------------------------------------------------------------------
if recovery_snap_exists $REPLICA_HOST $VMID/$DST_FILE; then
# point to [disk].recovery_snaphost files
SRC_DIR=${REPLICA_RECOVERY_SNAPS_DIR}/$VMID
SRC_FILE="${DST_FILE}.recovery_snapshot"
SRC_FILE=$DST_FILE
SRC_PATH="$SRC_DIR/$SRC_FILE"
RECOVERY="YES"
else
@ -132,15 +134,24 @@ log "Cloning $SRC_PATH via replica $REPLICA_HOST in $DST"
# copy locally, we hit the replica
if [ "$REPLICA_HOST" = "$DST_HOST" ]; then
# if recovery snapshot is needed, prepare base <- base.1 qcow2 structure
CLONE_CMD=$(cat <<EOF
cd $SRC_DIR
cp $SRC_FILE $DST_PATH
# this only prints create_base function content
$(type create_base| grep -v 'is a function')
if [ -d $SRC_PATH.snap ]; then
cp -r $SRC_PATH.snap $DST_PATH.snap
if [ "$RECOVERY" != "YES" ] && [ -n "$RECOVERY_SNAPSHOT_FREQ" ]; then
create_base "$SRC_PATH" "$DST_PATH"
else
cd $SRC_DIR
cp $SRC_FILE $DST_PATH
if [ -d $SRC_PATH.snap ]; then
cp -r $SRC_PATH.snap $DST_PATH.snap
fi
fi
EOF
)
else
# copy to remote using faster tar|ssh
CLONE_CMD=$(cat <<EOF
@ -153,35 +164,45 @@ else
ssh $REPLICA_SSH_OPTS ${REPLICA_STORAGE_IP:-$DST_HOST} "$TAR xSf - -C $DST_DIR"
EOF
)
# if recovery snapshot is needed, prepare base <- base.1 qcow2 structure
if [ "$RECOVERY" != "YES" ] && [ -n "$RECOVERY_SNAPSHOT_FREQ" ]; then
MAKE_SNAP_CMD=$(cat <<EOF
# this only prints create_base function content
$(type create_base| grep -v 'is a function')
create_base "$DST_PATH" "$DST_PATH" "mv"
EOF
)
fi
fi
# Prepare uncopressed base image copy on REPLICA for recovery snapshots
# to speedup first rsync
# Prepare base image copy on REPLICA for recovery snapshots
if [ -n "$RECOVERY_SNAPSHOT_FREQ" ] && [ "$RECOVERY" != "YES" ]; then
SNAP_DIR="$REPLICA_RECOVERY_SNAPS_DIR/$VMID"
BASE_CMD=$(cat <<EOF
# TODO: Consider multiple subsequent recoveries
REPLICA_BASE_CMD=$(cat <<EOF
set -e -o pipefail
mkdir -p $SNAP_DIR
cd $SNAP_DIR
if file $SRC_PATH | grep -q QCOW; then
qemu-img convert -O qcow2 $SRC_PATH ${DST_FILE}.recovery_snapshot
else
cp $SRC_PATH ${DST_FILE}.recovery_snapshot
fi
mkdir -p $SNAP_DIR/$DST_FILE.snap
ln -f -s $DST_FILE.snap/base.1 $SNAP_DIR/$DST_FILE
cp $SRC_PATH $SNAP_DIR/$DST_FILE.snap/base
EOF
)
ssh_forward ssh_exec_and_log "$REPLICA_HOST" "$BASE_CMD" \
ssh_forward ssh_exec_and_log "$REPLICA_HOST" "$REPLICA_BASE_CMD" \
"Error uploading base image to replica"
fi
ssh_forward ssh_exec_and_log $REPLICA_HOST "$CLONE_CMD" \
"Error copying $SRC to $DST"
if [ -n "$MAKE_SNAP_CMD" ]; then
ssh_exec_and_log "$DST_HOST" "$MAKE_SNAP_CMD" \
"Error resizing image $DST"
fi
if [ -n "$RESIZE_CMD" ]; then
ssh_exec_and_log "$DST_HOST" "$RESIZE_CMD" \
"Error resizing image $DST"

View File

@ -70,7 +70,7 @@ for vm in $vms; do
if [ -n "$snap_ts" ] && [ "$rc" = "0" ]; then
vm_monitor="${vm_monitor}\nDISK_RECOVERY_SNAPSHOT = [ ID=${disk_id}, TIMESTAMP=${snap_ts}]"
else
vm_monitor="${vm_monitor}\nDISK_RECOVERY_SNAPSHOT = [ ID=${disk_id}, MSG=\"ERROR $rc\"]"
vm_monitor="${vm_monitor}\nDISK_RECOVERY_SNAPSHOT = [ ID=${disk_id}, MSG=\"ERROR $rc ${snap_ts}\"]"
fi
fi
fi

View File

@ -24,14 +24,13 @@ DISK_PATH=$2
FREQ=$3
REPLICA_HOST=$4
DRIVER_PATH=$(dirname $0)
source ${DRIVER_PATH}/../../etc/vmm/kvm/kvmrc
source ${DRIVER_PATH}/../../etc/tm/ssh/sshrc
source ${DRIVER_PATH}/../../scripts_common.sh
mkdir -p "${DISK_PATH}.snap"
SNAP_PATH="${DISK_PATH}.snap/recovery_snapshot"
SNAP_PATH="${DISK_PATH}.snap/rs_tmp"
DISK_NAME="$(basename $DISK_PATH)"
if [ -f $SNAP_PATH ]; then
@ -45,15 +44,45 @@ if [ -f $SNAP_PATH ]; then
fi
fi
if [ ! -L $DISK_PATH ]; then
echo "$DISK_PATH not a symlink"
exit 1
fi
# Enumerate disks for which we don't create snapshot (all except $DISK_PATH)
DISKS=$(virsh -c ${LIBVIRT_URI} domblklist one-${VMID} | grep disk | awk '{print $2}')
OTHER_DISK_STR=""
for DISK in $DISKS; do
[ "$DISK" = "$DISK_PATH" ] && continue
OTHER_DISK_STR+="--diskspec $DISK,snapshot=no "
done
# saves disk changes to base.1 moves active snap to rs_tmp
# paths needs to absolute, otherwise snapshot-create and blockcommit fails
touch $SNAP_PATH
SNAP_CMD=$(cat <<EOF
virsh -c ${LIBVIRT_URI} snapshot-create-as one-${VMID} recovery_snap \
--diskspec $DISK_PATH,file=$SNAP_PATH \
$OTHER_DISK_STR \
--disk-only --atomic --no-metadata
EOF
)
retry_if "active block job" 3 5\
virsh -q -c ${LIBVIRT_URI} blockcopy one-${VMID} \
--path ${DISK_PATH} --dest $SNAP_PATH --wait --finish
# try with quiesce first (needs guest agent)
$SNAP_CMD --quiesce || $SNAP_CMD
ssh $REPLICA_HOST "mkdir -p $REPLICA_RECOVERY_SNAPS_DIR/$VMID"
# copy base.1 to the replica
ssh $REPLICA_HOST "mkdir -p $REPLICA_RECOVERY_SNAPS_DIR/$VMID/$DISK_NAME.snap"
rsync -q $DISK_PATH.snap/base.1 \
$REPLICA_HOST:$REPLICA_RECOVERY_SNAPS_DIR/$VMID/$DISK_NAME.snap/ > /dev/null
rsync -q $SNAP_PATH \
$REPLICA_HOST:$REPLICA_RECOVERY_SNAPS_DIR/$VMID/${DISK_NAME}.recovery_snapshot > /dev/null
# reduce the backing-chain using blockcommit
# base <- base.1 <- rs_tmp is reduced to base <- base.1
# outdated rs_tmp is deleted next cycle
virsh -c ${LIBVIRT_URI} blockcommit one-${VMID} $SNAP_PATH \
--base $DISK_PATH.snap/base.1 \
--top $SNAP_PATH \
--active --pivot --wait
stat -c "%Y" $SNAP_PATH

View File

@ -25,15 +25,18 @@ DSID=$4
if [ -z "${ONE_LOCATION}" ]; then
TMCOMMON=/var/lib/one/remotes/tm/tm_common.sh
SSH_UTILS=/var/lib/one/remotes/tm/ssh/ssh_utils.sh
DATASTORES=/var/lib/one/datastores
else
TMCOMMON=$ONE_LOCATION/var/remotes/tm/tm_common.sh
SSH_UTILS=$ONE_LOCATION/var/remotes/tm/ssh/ssh_utils.sh
DATASTORES=$ONE_LOCATION/var/datastores
fi
DRIVER_PATH=$(dirname $0)
. $TMCOMMON
. $SSH_UTILS
SRC_PATH=$(arg_path $SRC)
SRC_HOST=$(arg_host $SRC)
@ -52,11 +55,12 @@ while IFS= read -r -d '' element; do
XPATH_ELEMENTS[i++]="$element"
done < <(onevm show -x $VMID| $XPATH \
/VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/SOURCE \
/VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CLONE)
/VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/CLONE \
/VM/TEMPLATE/DISK[DISK_ID=$DISK_ID]/RECOVERY_SNAPSHOT_FREQ)
DISK_SRC="${XPATH_ELEMENTS[j++]}"
CLONE="${XPATH_ELEMENTS[j++]}"
RECOVERY_SNAPSHOT_FREQ="${XPATH_ELEMENTS[j++]}"
SYSTEM_DS_PATH=$(dirname ${SRC_PATH})
IMAGE_DS_PATH=$(dirname ${DISK_SRC})
@ -68,12 +72,27 @@ SNAP_PATH="${SNAP_DIR}/${SNAP_ID}"
SNAP_PATH_RELATIVE=$(basename ${SNAP_PATH})
CURRENT_PATH=${DISK_PATH}
CMD=$(cat <<EOF
set -e -o pipefail
rm "${CURRENT_PATH}"
cp "${SNAP_PATH}" "${CURRENT_PATH}"
if [ -n "$RECOVERY_SNAPSHOT_FREQ" ]; then
CMD=$(cat <<EOF
# this only prints create_base function content
$(type create_base| grep -v 'is a function')
set -e -o pipefail
rm "${DISK_PATH}.snap/base"
rm "${DISK_PATH}.snap/base.1"
create_base "$SNAP_PATH" "$DISK_PATH"
EOF
)
else
CMD=$(cat <<EOF
set -e -o pipefail
rm "${CURRENT_PATH}"
cp "${SNAP_PATH}" "${CURRENT_PATH}"
EOF
)
fi
ssh_exec_and_log "${SRC_HOST}" "${CMD}" \
"Error reverting snapshot to ${SNAP_PATH}"

View File

@ -179,7 +179,33 @@ function recovery_snap_exists() {
local REPLICA_HOST=$1
local DISK=$2
SNAP_PATH="${REPLICA_RECOVERY_SNAPS_DIR}/$DISK.recovery_snapshot"
SNAP_PATH="${REPLICA_RECOVERY_SNAPS_DIR}/$DISK.snap"
ssh "$REPLICA_HOST" "test -f \"$SNAP_PATH\""
ssh "$REPLICA_HOST" "test -f \"$SNAP_PATH/base\" && test -f \"$SNAP_PATH/base.1\""
}
# ------------------------------------------------------------------------------
# Creates base + base.1 overlay qcow2 structure as following:
#
# $VM_DIR/disk.0 symlink -> disk.0.snap/base.1
# $VM_DIR/disk.0.snap dir
# $VM_DIR/disk.0.snap/disk.0.snap symlink -> . for relative referencing
# $VM_DIR/disk.0.snap/base base image (cp/mv from SRC_PATH)
# $VM_DIR/disk.0.snap/base.1 qcow2 overlay (backing file = base)
#
# ------------------------------------------------------------------------------
function create_base() {
local SRC_PATH=$1
local DST_PATH=$2
local COPY=${3:-cp}
DST_FILE=$(basename $DST_PATH)
mkdir -p $DST_PATH.snap
cd $DST_PATH.snap
ln -f -s . $DST_FILE.snap
$COPY $SRC_PATH base
qemu-img create -b $DST_FILE.snap/base -f qcow2 base.1
ln -f -s $DST_FILE.snap/base.1 $DST_PATH
cd -
}

View File

@ -42,6 +42,13 @@ get_size_and_format_of_disk_img() {
local PARAM="$2"
if [ -L "$QEMU_IMG_PATH" ]; then
TARGET=$(readlink "$QEMU_IMG_PATH")A
# symlink to disk.X.snap/base.1
if [[ "$TARGET" =~ disk.[0-9]*.snap/base.1 ]]; then
echo unknown qcow2-symlink
return
fi
# symlink, assume network disk
echo unknown network-disk
return
@ -143,6 +150,10 @@ else
create_target_disk_img "$DEST_HOST" "$DISK_PATH" "$SIZE"
MIGRATE_DISKS+="${MIGRATE_DISKS:+,}${DISK_DEV}"
elif [ "$FORMAT" = "qcow2-symlink" ]; then
# don't create disk, .snap dir will be copied anyway
MIGRATE_DISKS+="${MIGRATE_DISKS:+,}${DISK_DEV}"
elif [ "$FORMAT" = "network-disk" ]; then
true # skip
fi

View File

@ -63,8 +63,7 @@ class VCenterConf < Hash
vcenterrc_path = "#{VAR_LOCATION}/remotes/etc/vmm/vcenter/vcenterrc"
merge!(YAML.load_file(vcenterrc_path))
rescue StandardError => e
STDERR.puts error_message("Couldn't load vcenterrc. \
Reason #{e.message}.")
STDERR.puts "Couldn't load vcenterrc. Reason #{e.message}."
end
super