1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-01-11 05:17:41 +03:00

F #6029: Add rsync incremental backups (#2516)

* Fix rsync datastore monitoring
* Add increment flatten for rsync backups
* Fix/Refactor rsync restore action handler
* Fix/Refactor rsync rm action handler
* Add dedicated downloader code for rsync

(cherry picked from commit f4eee21924)
This commit is contained in:
Michal Opala 2023-02-21 17:19:01 +01:00 committed by Ruben S. Montero
parent 7ae44ec4ca
commit 1458fd7ca4
No known key found for this signature in database
GPG Key ID: A0CEA6FA880A1D87
7 changed files with 491 additions and 190 deletions

View File

@ -2150,6 +2150,7 @@ DATASTORE_DRIVER_COMMON_SCRIPTS="src/datastore_mad/remotes/xpath.rb \
src/datastore_mad/remotes/lxd_downloader.sh \
src/datastore_mad/remotes/docker_downloader.sh \
src/datastore_mad/remotes/restic_downloader.rb \
src/datastore_mad/remotes/rsync_downloader.rb \
src/datastore_mad/remotes/vcenter_uploader.rb \
src/datastore_mad/remotes/vcenter_downloader.rb \
src/datastore_mad/remotes/url.rb \
@ -2236,7 +2237,8 @@ DATASTORE_DRIVER_RSYNC_SCRIPTS="src/datastore_mad/remotes/rsync/cp \
src/datastore_mad/remotes/rsync/rm \
src/datastore_mad/remotes/rsync/backup \
src/datastore_mad/remotes/rsync/restore \
src/datastore_mad/remotes/rsync/export"
src/datastore_mad/remotes/rsync/export \
src/datastore_mad/remotes/rsync/increment_flatten"
DATASTORE_DRIVER_ETC_SCRIPTS="src/datastore_mad/remotes/datastore.conf"

View File

@ -162,47 +162,6 @@ function s3_env
CURRENT_DATE_ISO8601="${CURRENT_DATE_DAY}T$(date -u '+%H%M%S')Z"
}
# Get rsync repo information from DS template
# Sets the following variables:
# - RSYNC_CMD = rsync -a user@host:/base/path
function rsync_env
{
XPATH="$DRIVER_PATH/xpath.rb --stdin"
unset i j XPATH_ELEMENTS
while IFS= read -r -d '' element; do
XPATH_ELEMENTS[i++]="$element"
done < <(onedatastore show -x --decrypt $1 | $XPATH \
/DATASTORE/TEMPLATE/RSYNC_HOST \
/DATASTORE/TEMPLATE/RSYNC_USER \
/DATASTORE/TEMPLATE/RSYNC_TMP_DIR \
/DATASTORE/BASE_PATH)
RSYNC_HOST="${XPATH_ELEMENTS[j++]}"
RSYNC_USER="${XPATH_ELEMENTS[j++]}"
TMP_DIR="${XPATH_ELEMENTS[j++]}"
RSYNC_BASE_PATH="${XPATH_ELEMENTS[j++]}"
if [ -z "${RSYNC_HOST}" -o -z "${RSYNC_USER}" ]; then
echo "RSYNC_HOST and RSYNC_USER are required" >&2
exit -1
fi
if [ -z "${TMP_DIR}" ]; then
TMP_DIR="/var/tmp/"
fi
RSYNC_TMP_DIR="${TMP_DIR}/`uuidgen`"
# scp is faster than rsync for singular files
RSYNC_PULL_CMD="scp ${RSYNC_USER}@${RSYNC_HOST}:"
export RSYNC_PULL_CMD
export RSYNC_TMP_DIR
export RSYNC_BASE_PATH
}
# Create an SHA-256 hash in hexadecimal.
# Usage:
# hash_sha256 <string>
@ -480,48 +439,7 @@ restic://*)
eval `$VAR_LOCATION/remotes/datastore/restic_downloader.rb "$FROM" | grep -e '^command=' -e '^clean_command='`
;;
rsync://*)
# example: rsync://100/0:8a3454,1:f6e63e//var/lib/one//datastores/100/6/8a3454/disk.0.0
# RSYNC_TMP_DIR="${TMP_DIR}/`uuidgen`"
# RSYNC_PULL_CMD="scp ${RSYNC_USER}@${RSYNC_HOST}:"
rsync_path=${FROM#rsync://}
d_id=`echo ${rsync_path} | cut -d'/' -f1`
b_id=`echo ${rsync_path} | cut -d'/' -f2`
file=`echo ${rsync_path} | cut -d'/' -f3-`
vm_path=${file%/*/*} #/var/lib/one/datastores/100/6
rsync_env $d_id
mkdir -p ${RSYNC_TMP_DIR}
pushd ${RSYNC_TMP_DIR}
incs=(${b_id//,/ })
f_name=`echo $file | rev | cut -d'/' -f1 | rev`
for i in "${incs[@]}"; do
inc_id=`echo $i | cut -d':' -f1`
snap_id=`echo $i | cut -d':' -f2`
${RSYNC_PULL_CMD}/${vm_path}/${snap_id}/${f_name%\.*}.${inc_id} ./disk.${inc_id}
done
for i in `ls disk* | sort -r`; do # for each dumped disk
id=`echo $i | cut -d'.' -f2`
pid=$((id - 1))
if [ -f "disk.${pid}" ]; then # rebase to one disk
qemu-img rebase -u -F qcow2 -b "disk.${pid}" "disk.${id}"
else
qemu-img rebase -u -b '' "disk.${id}"
fi
done
qemu-img convert -O qcow2 -m 4 `ls disk* | sort -r | head -1` disk.qcow2
command="cat `realpath disk.qcow2`"
clean_command="rm -rf ${RSYNC_TMP_DIR}"
popd
eval `$VAR_LOCATION/remotes/datastore/rsync_downloader.rb "$FROM" | grep -e '^command=' -e '^clean_command='`
;;
*)
if [ ! -r $FROM ]; then

View File

@ -0,0 +1,194 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2023, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
ONE_LOCATION = ENV['ONE_LOCATION']
if !ONE_LOCATION
RUBY_LIB_LOCATION = '/usr/lib/one/ruby'
GEMS_LOCATION = '/usr/share/one/gems'
VMDIR = '/var/lib/one'
CONFIG_FILE = '/var/lib/one/config'
else
RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby'
GEMS_LOCATION = ONE_LOCATION + '/share/gems'
VMDIR = ONE_LOCATION + '/var'
CONFIG_FILE = ONE_LOCATION + '/var/config'
end
# %%RUBYGEMS_SETUP_BEGIN%%
if File.directory?(GEMS_LOCATION)
real_gems_path = File.realpath(GEMS_LOCATION)
if !defined?(Gem) || Gem.path != [real_gems_path]
$LOAD_PATH.reject! {|l| l =~ /vendor_ruby/ }
# Suppress warnings from Rubygems
# https://github.com/OpenNebula/one/issues/5379
begin
verb = $VERBOSE
$VERBOSE = nil
require 'rubygems'
Gem.use_paths(real_gems_path)
ensure
$VERBOSE = verb
end
end
end
# %%RUBYGEMS_SETUP_END%%
$LOAD_PATH << RUBY_LIB_LOCATION
require 'base64'
require 'pathname'
require 'rexml/document'
require_relative '../../tm/lib/backup'
require_relative '../../tm/lib/tm_action'
daction64 = ARGV[0]
# Parse input data.
begin
action = Base64.decode64 daction64
image = TransferManager::BackupImage.new action
exit(0) if image.snapshots.size <= image.keep_last
snaps = image.snapshots.first(image.snapshots.size - image.keep_last + 1)
xml = REXML::Document.new(action).root.elements
base_path = xml['DATASTORE/BASE_PATH'].text
rsync_user = xml['DATASTORE/TEMPLATE/RSYNC_USER']&.text || 'oneadmin'
rsync_host = xml['DATASTORE/TEMPLATE/RSYNC_HOST'].text
rescue StandardError => e
STDERR.puts e.full_message
exit(-1)
end
# Gather and resolve paths.
begin
script = [<<~EOS]
set -e -o pipefail; shopt -qs failglob
EOS
snap_dirs = snaps.map do |snap|
raw = %(#{base_path}/#{image.vm_id}/#{snap}/)
cleaned = Pathname.new(raw).cleanpath.to_s
quoted = %('#{cleaned}/')
quoted
end
script << %(find #{snap_dirs.join(' ')} -type f -name 'disk.*')
rc = TransferManager::Action.ssh 'list_disks',
:host => "#{rsync_user}@#{rsync_host}",
:forward => true,
:cmds => script.join("\n"),
:nostdout => false,
:nostderr => false
raise StandardError, "Unable to list qcow2 images: #{rc.stderr}" if rc.code != 0
stdout_lines = rc.stdout.lines.map(&:strip).reject(&:empty?)
disks = { :by_snap => {}, :by_index => {} }
snaps.each do |snap|
disks[:by_snap][snap] = stdout_lines.select do |line|
line.include?("/#{snap}/disk.")
end
end
disks[:by_snap].values.flatten.each do |path|
name = Pathname.new(path).basename.to_s
tokens = name.split('.')
(disks[:by_index][tokens[1]] ||= []) << path
end
rescue StandardError => e
STDERR.puts e.full_message
exit(-1)
end
# Reconstruct qcow2 chains for all viable disks.
begin
script = [<<~EOS]
set -e -o pipefail; shopt -qs failglob
EOS
disks[:by_index].each do |_, disk_paths|
script << image.class.reconstruct_chain(disk_paths)
end
rc = TransferManager::Action.ssh 'reconstruct_chains',
:host => "#{rsync_user}@#{rsync_host}",
:forward => true,
:cmds => script.join("\n"),
:nostdout => false,
:nostderr => false
raise StandardError, "Unable to reconstruct qcow2 chains: #{rc.stderr}" if rc.code != 0
rescue StandardError => e
STDERR.puts e.full_message
exit(-1)
end
# Merge qcow2 chains for all viable disks.
begin
script = [<<~EOS]
set -e -o pipefail; shopt -qs failglob
EOS
disks[:by_index].each do |_, disk_paths|
script << image.class.merge_chain(disk_paths)
end
# Calculate the new snapshot's size (sum of all qcow2 image sizes in the snapshot).
script << 'RC=`('
disks[:by_index].each do |_, disk_paths|
script << "qemu-img info --output json '#{disk_paths.last}';"
end
script << ')`'
script << %(SIZE=`jq --slurp 'map(."actual-size" | tonumber) | add' <<< "$RC"`)
to_delete = snap_dirs.first(snap_dirs.size - 1)
script << %(rm -rf #{to_delete.join(' ')}) unless to_delete.empty?
script << %(echo "$SIZE")
rc = TransferManager::Action.ssh 'flatten_chains',
:host => "#{rsync_user}@#{rsync_host}",
:forward => true,
:cmds => script.join("\n"),
:nostdout => false,
:nostderr => false
raise StandardError, "Unable to flatten increments: #{rc.stderr}" if rc.code != 0
new_size = rc.stdout.lines.last.strip.to_i / 1024**2
STDOUT.puts "#{new_size} #{image.chain_keep_last(snaps.last)}"
rescue StandardError => e
STDERR.puts e.full_message
exit(-1)
end
exit(0)

View File

@ -50,7 +50,7 @@ RSYNC_HOST="${XPATH_ELEMENTS[i++]}"
RSYNC_USER="${XPATH_ELEMENTS[i++]}"
BASE_PATH="${XPATH_ELEMENTS[i++]}"
DF=$(ssh $RSYNC_USER@$RSYNC_HOST "df -PBM $BASE_PATH" | tail -1)
DF=$(ssh $RSYNC_USER@$RSYNC_HOST "mkdir -p '$BASE_PATH/' && df -PBM '$BASE_PATH/'" | tail -1)
#/dev/sda1 20469M 2983M 17487M 15% /
TOTAL=$(echo $DF | awk '{print $2}')
USED=$(echo $DF | awk '{print $3}')

View File

@ -22,14 +22,12 @@ if !ONE_LOCATION
GEMS_LOCATION = '/usr/share/one/gems'
VMDIR = '/var/lib/one'
CONFIG_FILE = '/var/lib/one/config'
VAR_LOCATION = '/var/lib/one'
else
RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby'
GEMS_LOCATION = ONE_LOCATION + '/share/gems'
VMDIR = ONE_LOCATION + '/var'
CONFIG_FILE = ONE_LOCATION + '/var/config'
VAR_LOCATION = ONE_LOCATION + '/var'
end
@ -58,11 +56,10 @@ end
$LOAD_PATH << RUBY_LIB_LOCATION
require 'base64'
require 'CommandManager'
require 'rexml/document'
require 'opennebula'
require 'opennebula/server_cipher_auth'
require 'pathname'
require 'rexml/document'
require_relative '../../tm/lib/backup'
require_relative '../../tm/lib/tm_action'
@ -72,72 +69,88 @@ require_relative '../../tm/lib/tm_action'
# - vm.xml description
# - list of disks in the backup
# ------------------------------------------------------------------------------
drv_action = Base64.decode64(ARGV[0])
daction64 = ARGV[0]
_request_id = ARGV[1]
rds = REXML::Document.new(drv_action).root
begin
buid = rds.elements['IMAGE/SOURCE'].text
iid = rds.elements['IMAGE/ID'].text.to_i
dsid = rds.elements['DATASTORE/ID'].text.to_i
base = rds.elements['DATASTORE/BASE_PATH'].text
action = Base64.decode64 daction64
rsync_host = rds.elements['DATASTORE/TEMPLATE/RSYNC_HOST'].text
rsync_user = rds.elements['DATASTORE/TEMPLATE/RSYNC_USER'].text
image = TransferManager::BackupImage.new(action)
vm_id = rds.elements['IMAGE/VMS[1]/ID'].text.to_i
image = TransferManager::BackupImage.new(drv_action)
xml = REXML::Document.new(action).root.elements
ds_id = xml['DATASTORE/ID'].text.to_i
base_path = xml['DATASTORE/BASE_PATH'].text
rsync_user = xml['DATASTORE/TEMPLATE/RSYNC_USER']&.text || 'oneadmin'
rsync_host = xml['DATASTORE/TEMPLATE/RSYNC_HOST'].text
rescue StandardError => e
STDERR.puts "Missing datastore or image attributes: #{e.message}"
exit(1)
exit(-1)
end
begin
username = rds.elements['TEMPLATE/USERNAME'].text
dst_ds_id = rds.elements['DESTINATION_DS_ID'].text.to_i
username = xml['TEMPLATE/USERNAME'].text
dst_ds_id = xml['DESTINATION_DS_ID'].text.to_i
rescue StandardError
STDERR.puts 'Cannot find USERNAME / DESTINATION_DS_ID'
exit(1)
exit(-1)
end
bk_path = "#{base}/#{vm_id}/#{buid}"
begin
script = [<<~EOS]
set -e -o pipefail; shopt -qs failglob
EOS
rc = TransferManager::Action.ssh('list_bkp_files',
:host => "#{rsync_user}@#{rsync_host}",
:cmds => "ls -d #{bk_path}/*",
:nostdout => false)
if rc.code != 0
STDERR.puts rc.stderr
exit(1)
snap_dirs = image.snapshots.map do |snap|
raw = %(#{base_path}/#{image.vm_id}/#{snap}/)
cleaned = Pathname.new(raw).cleanpath.to_s
quoted = %('#{cleaned}/')
quoted
end
script << %(find #{snap_dirs.join(' ')} -type f -name 'disk.*' -or -name 'vm.xml')
rc = TransferManager::Action.ssh 'list_files',
:host => "#{rsync_user}@#{rsync_host}",
:forward => true,
:cmds => script.join("\n"),
:nostdout => false,
:nostderr => false
raise StandardError, "Unable to list backup files: #{rc.stderr}" if rc.code != 0
stdout_lines = rc.stdout.lines.map(&:strip).reject(&:empty?)
disk_paths = stdout_lines.select do |line|
line.include?('/disk.')
end
vm_xml_path = stdout_lines.find do |line|
line.include?("/#{image.last}/vm.xml")
end
raise StandardError, 'Backup does not contain any disks or missing vm.xml' \
if disk_paths.empty? || vm_xml_path.nil?
rescue StandardError => e
STDERR.puts e.full_message
exit(-1)
end
disks = []
vm_xml_path = ''
begin
rc = TransferManager::Action.ssh 'read_vm_xml',
:host => "#{rsync_user}@#{rsync_host}",
:cmds => "cat '#{vm_xml_path}'",
:nostdout => false,
:nostderr => false
rc.stdout.each_line do |l|
l.delete('"').strip!
raise StandardError, "Unable to read vm.xml: #{rc.stderr}" if rc.code != 0
disks << l if l.match(/disk\.[0-9]+\.[0-9]+$/)
vm_xml_path = l if l.match(/vm\.xml$/)
vm_xml = rc.stdout
rescue StandardError => e
STDERR.puts e.full_message
exit(-1)
end
if disks.empty? || vm_xml_path.empty?
STDERR.puts 'Backup does not contain any disk or missing vm.xml'
exit(1)
end
rc = TransferManager::Action.ssh('gather_vm_xml',
:host => "#{rsync_user}@#{rsync_host}",
:cmds => "cat #{vm_xml_path}",
:nostdout => false)
if rc.code != 0
STDERR.puts rc.stderr
exit(1)
end
vm_xml = rc.stdout
# ------------------------------------------------------------------------------
# Prepare an OpenNebula client to impersonate the target user
# ------------------------------------------------------------------------------
@ -152,48 +165,47 @@ one_client = OpenNebula::Client.new(token)
# Create backup object templates for VM and associated disk images
# Monkey patch REXML::DOCUMENT to respond to []
# ------------------------------------------------------------------------------
rds.define_singleton_method('[]') {|xpath| elements[xpath].text }
xml.define_singleton_method('[]') {|xpath| elements[xpath].text }
restorer = TransferManager::BackupRestore.new(
:vm_xml64 => vm_xml,
:backup_id => buid,
:backup_id => image.last,
:bimage => image,
:ds_id => dsid,
:image_id => iid,
:txml => rds,
:ds_id => ds_id,
:txml => xml,
:proto => 'rsync'
)
br_disks = restorer.disk_images(disks)
br_disks = restorer.disk_images(disk_paths)
one_error = ''
images = []
one_error = ''
restored_images = []
# Create disk images
br_disks.each do |_id, disk|
# Fix image name - maybe not necessary any longer
# disk[:template].gsub!(%r{(NAME = "[0-9]+-)[0-9]+/}, '\1')
image = OpenNebula::Image.new(OpenNebula::Image.build_xml, one_client)
rc = image.allocate(disk[:template], dst_ds_id)
restored_image = OpenNebula::Image.new(OpenNebula::Image.build_xml, one_client)
rc = restored_image.allocate(disk[:template], dst_ds_id)
if OpenNebula.is_error?(rc)
one_error = rc.message
break
end
disk[:image_id] = image.id
images << image.id
disk[:image_id] = restored_image.id
restored_images << restored_image.id
end
if !one_error.empty?
message = "Error restoring disk image: #{one_error}"
if !images.empty?
message << " The following images were restored: #{images.join(' ')}"
if !restored_images.empty?
message << " The following images were restored: #{restored_images.join(' ')}"
end
STDERR.puts message
exit(1)
exit(-1)
end
# Create VM template
@ -208,14 +220,14 @@ rc = tmpl.allocate(vm_template)
if OpenNebula.is_error?(rc)
message = "Error creating VM template: #{rc.message}"
if !images.empty?
message << " The following images were restored: #{images.join(' ')}"
if !restored_images.empty?
message << " The following images were restored: #{restored_images.join(' ')}"
end
STDERR.puts message
exit(1)
exit(-1)
end
STDOUT.puts "#{tmpl.id} #{images.join(' ')}"
STDOUT.puts "#{tmpl.id} #{restored_images.join(' ')}"
exit(0)

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2023, OpenNebula Project, OpenNebula Systems #
@ -15,52 +15,95 @@
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
ONE_LOCATION = ENV['ONE_LOCATION']
###############################################################################
# This script is used to remove a VM image (SRC) from the image repository
###############################################################################
# ------------ Set up the environment to source common tools ------------
if [ -z "${ONE_LOCATION}" ]; then
LIB_LOCATION=/usr/lib/one
if !ONE_LOCATION
RUBY_LIB_LOCATION = '/usr/lib/one/ruby'
GEMS_LOCATION = '/usr/share/one/gems'
VMDIR = '/var/lib/one'
CONFIG_FILE = '/var/lib/one/config'
else
LIB_LOCATION=$ONE_LOCATION/lib
fi
RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby'
GEMS_LOCATION = ONE_LOCATION + '/share/gems'
VMDIR = ONE_LOCATION + '/var'
CONFIG_FILE = ONE_LOCATION + '/var/config'
end
. $LIB_LOCATION/sh/scripts_common.sh
# %%RUBYGEMS_SETUP_BEGIN%%
if File.directory?(GEMS_LOCATION)
real_gems_path = File.realpath(GEMS_LOCATION)
if !defined?(Gem) || Gem.path != [real_gems_path]
$LOAD_PATH.reject! {|l| l =~ /vendor_ruby/ }
DRIVER_PATH=$(dirname $0)
source ${DRIVER_PATH}/../libfs.sh
# Suppress warnings from Rubygems
# https://github.com/OpenNebula/one/issues/5379
begin
verb = $VERBOSE
$VERBOSE = nil
require 'rubygems'
Gem.use_paths(real_gems_path)
ensure
$VERBOSE = verb
end
end
end
# %%RUBYGEMS_SETUP_END%%
# -------- Get rm and datastore arguments from OpenNebula core ------------
$LOAD_PATH << RUBY_LIB_LOCATION
DRV_ACTION=$1
ID=$2
require 'base64'
require 'pathname'
require 'rexml/document'
XPATH="${DRIVER_PATH}/../xpath.rb -b $DRV_ACTION"
require_relative '../../tm/lib/backup'
require_relative '../../tm/lib/tm_action'
unset i XPATH_ELEMENTS
daction64 = ARGV[0]
while IFS= read -r -d '' element; do
XPATH_ELEMENTS[i++]="$element"
done < <($XPATH /DS_DRIVER_ACTION_DATA/IMAGE/SOURCE \
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/RSYNC_HOST \
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/RSYNC_USER \
/DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \
/DS_DRIVER_ACTION_DATA/IMAGE/VMS[1]/ID )
# Parse input data.
unset i
begin
action = Base64.decode64 daction64
SRC="${XPATH_ELEMENTS[i++]}"
RSYNC_HOST="${XPATH_ELEMENTS[i++]}"
RSYNC_USER="${XPATH_ELEMENTS[i++]}"
BASE_PATH="${XPATH_ELEMENTS[i++]}"
VM_ID="${XPATH_ELEMENTS[i++]}"
image = TransferManager::BackupImage.new action
# ------------ Remove the image from the repository ------------
xml = REXML::Document.new(action).root.elements
BACKUP_PATH="${BASE_PATH}/${VM_ID}/${SRC}"
base_path = xml['DATASTORE/BASE_PATH'].text
rsync_user = xml['DATASTORE/TEMPLATE/RSYNC_USER']&.text || 'oneadmin'
rsync_host = xml['DATASTORE/TEMPLATE/RSYNC_HOST'].text
rescue StandardError => e
STDERR.puts e.full_message
exit(-1)
end
ssh_exec_and_log "$RSYNC_USER@$RSYNC_HOST" "[ -d $BACKUP_PATH ] && rm -rf $BACKUP_PATH" \
"Error deleting $BACKUP_PATH in $RSYNC_HOST"
begin
script = [<<~EOS]
set -e -o pipefail; shopt -qs failglob
EOS
snap_dirs = image.snapshots.map do |snap|
raw = %(#{base_path}/#{image.vm_id}/#{snap}/)
cleaned = Pathname.new(raw).cleanpath.to_s
quoted = %('#{cleaned}/')
quoted
end
raise StandardError, 'Nothing to remove' if snap_dirs.empty?
script << %(rm -rf #{snap_dirs.join(' ')})
rc = TransferManager::Action.ssh 'remove_snapshots',
:host => "#{rsync_user}@#{rsync_host}",
:forward => true,
:cmds => script.join("\n"),
:nostdout => false,
:nostderr => false
raise StandardError, "Unable to remove snapshots: #{rc.stderr}" if rc.code != 0
rescue StandardError => e
STDERR.puts e.full_message
exit(-1)
end
exit(0)

View File

@ -0,0 +1,132 @@
#!/usr/bin/env ruby
# ---------------------------------------------------------------------------- #
# Copyright 2002-2023, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
ONE_LOCATION = ENV['ONE_LOCATION'] unless defined?(ONE_LOCATION)
if !ONE_LOCATION
RUBY_LIB_LOCATION ||= '/usr/lib/one/ruby'
GEMS_LOCATION ||= '/usr/share/one/gems'
VAR_LOCATION ||= '/var/lib/one'
else
RUBY_LIB_LOCATION ||= ONE_LOCATION + '/lib/ruby'
GEMS_LOCATION ||= ONE_LOCATION + '/share/gems'
VAR_LOCATION ||= ONE_LOCATION + '/var'
end
# %%RUBYGEMS_SETUP_BEGIN%%
if File.directory?(GEMS_LOCATION)
real_gems_path = File.realpath(GEMS_LOCATION)
if !defined?(Gem) || Gem.path != [real_gems_path]
$LOAD_PATH.reject! {|l| l =~ /vendor_ruby/ }
# Suppress warnings from Rubygems
# https://github.com/OpenNebula/one/issues/5379
begin
verb = $VERBOSE
$VERBOSE = nil
require 'rubygems'
Gem.use_paths(real_gems_path)
ensure
$VERBOSE = verb
end
end
end
# %%RUBYGEMS_SETUP_END%%
$LOAD_PATH << RUBY_LIB_LOCATION
$LOAD_PATH << File.dirname(__FILE__)
require 'opennebula'
require 'pathname'
require 'securerandom'
require_relative '../tm/lib/backup'
require_relative '../tm/lib/tm_action'
# Parse input data.
# rsync://100/0:8a3454,1:f6e63e//var/lib/one//datastores/100/6/8a3454/disk.0.0
rsync_url = ARGV[0]
tokens = rsync_url.delete_prefix('rsync://').split('/')
ds_id = tokens[0].to_i
increments = tokens[1].split(',').map {|s| s.split(':') }
disk_path = tokens[2..].join('/')
disk_index = Pathname.new(disk_path).basename.to_s.split('.')[1]
vm_id = disk_path.match("/#{ds_id}/(\\d+)/[^/]+/[^/]+$")[1].to_i
begin
backup_ds = OpenNebula::Datastore.new_with_id ds_id, OpenNebula::Client.new
rc = backup_ds.info(true)
raise StandardError, rc.message if OpenNebula.is_error?(backup_ds)
ds_hash = backup_ds.to_hash['DATASTORE']
base_path = ds_hash['BASE_PATH']
rsync_user = ds_hash['TEMPLATE']['RSYNC_USER'] || 'oneadmin'
rsync_host = ds_hash['TEMPLATE']['RSYNC_HOST']
rsync_tmp_dir = ds_hash['TEMPLATE']['RSYNC_TMP_DIR'] || '/var/tmp'
rescue StandardError => e
STDERR.puts e.full_message
exit(-1)
end
# Prepare image.
begin
script = [<<~EOS]
set -e -o pipefail; shopt -qs failglob
EOS
disk_paths = increments.map do |index, snap|
raw = %(#{base_path}/#{vm_id}/#{snap}/disk.#{disk_index}.#{index})
cleaned = Pathname.new(raw).cleanpath.to_s
cleaned
end
tmp_dir = "#{rsync_tmp_dir}/#{SecureRandom.uuid}"
tmp_path = "#{tmp_dir}/#{Pathname.new(disk_paths.last).basename}"
script << <<~EOS
mkdir -p '#{tmp_dir}/'
#{TransferManager::BackupImage.reconstruct_chain(disk_paths)}
qemu-img convert -O qcow2 '#{disk_paths.last}' '#{tmp_path}'
EOS
rc = TransferManager::Action.ssh 'prepare_image',
:host => "#{rsync_user}@#{rsync_host}",
:forward => true,
:cmds => script.join("\n"),
:nostdout => false,
:nostderr => false
raise StandardError, "Unable to prepare image: #{rc.stderr}" if rc.code != 0
rescue StandardError => e
STDERR.puts e.full_message
exit(-1)
end
ssh_opts = '-q -o ControlMaster=no -o ControlPath=none -o ForwardAgent=yes'
STDOUT.puts <<~EOS
command="ssh #{ssh_opts} '#{rsync_user}@#{rsync_host}' cat '#{tmp_path}'"
clean_command="ssh #{ssh_opts} '#{rsync_user}@#{rsync_host}' rm -rf '#{tmp_dir}/'"
EOS
exit(0)