1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-01-22 22:03:39 +03:00

F #6029, #6074: Retention and restore opetions for incremental backups

* Introduce support to follow KEEP_LAST for incremental backups.
  - New increment_flatten action added for backup datastores.
  - increment_flatten will consolidate KEEP_LAST increments into the
    current first increment in the chain.
  - increment_flatten MUST return the new chain (inc1:source1,...) and size
    of the new first increment (FULL) in the chain.

* Downloader logic for restore has been extracted from downloader.sh to
  reuse the increment flatten logic. A new command restic_downloader.rb
  process restic:// pseudo-urls.

* Restore process uses two new attributes to customize the restore
  process:
  - NAME to be used as base name for images and VM Template
  - INCREMENT_ID to restore disks from a given increment (not always the
    last one)

* Common logic has been added to BackupImage class (backup.rb)

* Includes the following fixes:
  - Fix when increment includes blocks larger than max qemu-io request size
  - Fix IMAGE counter for quotas on backup images
  - Fix rsync restore NO_IP / NO_NIC attributes

TODO:
* Mimic increment_flatten logic and restore images on the backup server
* Suntone restore options

co-authored-by:Michal Opala <mopala@opennebula.io>
This commit is contained in:
Ruben S. Montero 2023-02-07 10:39:48 +01:00
parent 26c76a7307
commit 8cb3a360b8
No known key found for this signature in database
GPG Key ID: A0CEA6FA880A1D87
23 changed files with 764 additions and 227 deletions

View File

@ -24,6 +24,14 @@
/**
* The Image INCREMENT attribute
*
* <INCREMENT>
* <ID> Unique ID within this backup increment
* <TYPE> Of the backup FULL | INCREMENT
* <PARENT_ID> ID of the parent increment (backing file)
* <SOURCE> Reference in the backup system
* <SIZE> Size of this increment
* <DATE> When this backup was taken (epoch)
*/
class Increment : public ExtendedAttribute
{
@ -38,6 +46,9 @@ public:
INCREMENT = 1, /** < Forward increment */
};
/* ---------------------------------------------------------------------- */
/* Functions to get/set increment attributes */
/* ---------------------------------------------------------------------- */
long long size() const
{
long long sz = 0;
@ -47,11 +58,44 @@ public:
return sz;
}
void size(const std::string& sz)
{
replace("SIZE", sz);
}
std::string source() const
{
return vector_value("SOURCE");
}
void source(const std::string& src)
{
replace("SOURCE", src);
}
int id() const
{
return get_id();
}
void parent_id(int id)
{
replace("PARENT_ID", id);
}
void backup_type(Type t)
{
switch (t)
{
case FULL:
replace("TYPE", "FULL");
break;
case INCREMENT:
replace("TYPE", "INCREMENT");
break;
}
}
Type backup_type() const
{
std::string stype = vector_value("TYPE");
@ -93,6 +137,14 @@ public:
/* ---------------------------------------------------------------------- */
/* Increment interface */
/* ---------------------------------------------------------------------- */
/**
* Creates a new increment in the set
* @param source internal representation for the backup driver of the increment
* @param sz in MB of the increment
* @param type FULL (first increment in the chain) or INCREMENT
*
* @return Pointer to the new attribute
*/
VectorAttribute * new_increment(const std::string& source, long long sz,
Increment::Type type);
@ -101,6 +153,27 @@ public:
return static_cast<Increment *>(last_attribute());
}
Increment * get_increment(int id) const
{
return static_cast<Increment *>(get_attribute(id));
}
Increment * delete_increment(int id)
{
return static_cast<Increment *>(delete_attribute(id));
}
/**
* @return Number of increments in the chain
*/
unsigned int total()
{
return size();
}
/**
* @return Total size of the backup (adding all increments), in MB
*/
long long total_size();
/* ---------------------------------------------------------------------- */
@ -177,11 +250,30 @@ public:
int last_increment_id();
/**
* @return Total size of the backup (adding all increments), in MB
*/
long long total_size()
{
return increments.total_size();
}
/**
* Update the sources of the increments after a merge operation.
*
* @param chain string with format 0:source0, ... N:sourceN Number of increments
* can be less than the current number.
*/
int update_increments(const std::string& incs, const std::string& sz);
/**
* @return Number of increments in the chain
*/
unsigned int total()
{
return increments.total();
}
private:
/**
* Text representation of the increments

View File

@ -230,6 +230,19 @@ public:
return id;
}
int keep_last() const
{
int kl;
if (!config.get("KEEP_LAST", kl))
{
return 0;
}
return kl;
}
/* ---------------------------------------------------------------------- */
void last_backup_clear()

View File

@ -613,21 +613,26 @@ public:
/* ---------------------------------------------------------------------- */
int add_increment(const std::string& source, long long size, Increment::Type type)
{
int rc = increments.add_increment(source, size, type);
int rc = _increments.add_increment(source, size, type);
if ( rc == -1 )
{
return -1;
}
size_mb = increments.total_size();
size_mb = _increments.total_size();
return 0;
}
int last_increment_id()
{
return increments.last_increment_id();
return _increments.last_increment_id();
}
BackupIncrements& increments()
{
return _increments;
}
private:
@ -744,7 +749,7 @@ private:
* List of backup increments (only relevant for BACKUP images, of type
* incremental)
*/
BackupIncrements increments;
BackupIncrements _increments;
/**
* ID of the snapshot being processed (if any)

View File

@ -300,7 +300,7 @@ public:
int revert_snapshot(int iid, int sid, std::string& error);
/**
* Flattens ths snapshot by commiting changes to base image.
* Flattens the snapshot by commiting changes to base image.
* @param iid id of image
* @param sid id of the snapshot
* @param error_str Error reason, if any
@ -308,6 +308,16 @@ public:
*/
int flatten_snapshot(int iid, int sid, std::string& error);
/**
* Flattens the backup chain by commiting changes to first (full) backup
* @param iid id of image
* @param ds_id id of the datastore
* @param edata XML string with KEEP_LAST and VM_ID
* @param error_str Error reason, if any
* @return 0 on success
*/
int flatten_increments(int iid, int ds_id, const std::string& edata, std::string& error);
private:
/**
* Generic name for the Image driver
@ -409,6 +419,8 @@ private:
void _snap_flatten(std::unique_ptr<image_msg_t> msg);
void _increment_flatten(std::unique_ptr<image_msg_t> msg);
void _restore(std::unique_ptr<image_msg_t> msg);
static void _log(std::unique_ptr<image_msg_t> msg);

View File

@ -60,6 +60,7 @@ enum class ImageManagerMessages : unsigned short int
SNAP_REVERT,
SNAP_FLATTEN,
RESTORE,
INCREMENT_FLATTEN,
LOG,
ENUM_MAX
};

View File

@ -2149,6 +2149,7 @@ DATASTORE_DRIVER_COMMON_SCRIPTS="src/datastore_mad/remotes/xpath.rb \
src/datastore_mad/remotes/downloader.sh \
src/datastore_mad/remotes/lxd_downloader.sh \
src/datastore_mad/remotes/docker_downloader.sh \
src/datastore_mad/remotes/restic_downloader.rb \
src/datastore_mad/remotes/vcenter_uploader.rb \
src/datastore_mad/remotes/vcenter_downloader.rb \
src/datastore_mad/remotes/url.rb \

View File

@ -25,12 +25,6 @@ class OneImageHelper < OpenNebulaHelper::OneHelper
PREFIXES = %w[http https ssh s3 rbd vcenter lxd docker dockerfile]
TEMPLATE_OPTIONS=[
{
:name => 'name',
:large => '--name name',
:format => String,
:description => 'Name of the new image'
},
{
:name => 'description',
:large => '--description description',
@ -497,6 +491,8 @@ class OneImageHelper < OpenNebulaHelper::OneHelper
o[:name].to_sym
end
template_options << :name
template = create_image_variables(
options,
template_options - [:persistent, :dry, :prefix]

View File

@ -106,6 +106,21 @@ CommandParser::CmdParser.new(ARGV) do
:description => 'Do not keep network mappings'
}
INCREMENT = {
:name => 'increment',
:large => '--increment increment_id',
:format => Integer,
:description => 'Use the given increment ID to restore the backup.'\
' If not provided the last one will be used'
}
NAME = {
:name => 'name',
:large => '--name name',
:format => String,
:description => 'Name of the new image'
}
########################################################################
# Global Options
########################################################################
@ -118,6 +133,7 @@ CommandParser::CmdParser.new(ARGV) do
CREATE_OPTIONS = [OneDatastoreHelper::DATASTORE,
OneImageHelper::IMAGE,
NAME,
NO_CONTEXT]
########################################################################
@ -432,7 +448,7 @@ CommandParser::CmdParser.new(ARGV) do
command :restore,
restore_desc,
:imageid,
:options => [OneDatastoreHelper::DATASTORE, NO_NIC, NO_IP] do
:options => [OneDatastoreHelper::DATASTORE, NO_NIC, NO_IP, NAME, INCREMENT] do
helper.perform_action(args[0], options, 'vm backup restored') do |o|
if options[:datastore].nil?
STDERR.puts 'Datastore to restore the backup is mandatory: '
@ -444,6 +460,8 @@ CommandParser::CmdParser.new(ARGV) do
restore_opts << "NO_NIC=\"YES\"\n" if options[:no_nic]
restore_opts << "NO_IP=\"YES\"\n" if options[:no_ip]
restore_opts << "NAME=\"#{options[:name]}\"\n" if options[:name]
restore_opts << "INCREMENT_ID=\"#{options[:increment]}\"\n" if options[:increment]
rc = o.restore(options[:datastore].to_i, restore_opts)

View File

@ -53,7 +53,7 @@ end
$LOAD_PATH << RUBY_LIB_LOCATION
require "OpenNebulaDriver"
require 'OpenNebulaDriver'
require 'getoptlong'
require 'base64'
require 'rexml/document'
@ -66,28 +66,29 @@ class DatastoreDriver < OpenNebulaDriver
# Image Driver Protocol constants
ACTION = {
:cp => "CP",
:rm => "RM",
:mkfs => "MKFS",
:log => "LOG",
:stat => "STAT",
:clone => "CLONE",
:monitor => "MONITOR",
:snap_delete => "SNAP_DELETE",
:snap_revert => "SNAP_REVERT",
:snap_flatten=> "SNAP_FLATTEN",
:restore => "RESTORE"
:cp => 'CP',
:rm => 'RM',
:mkfs => 'MKFS',
:log => 'LOG',
:stat => 'STAT',
:clone => 'CLONE',
:monitor => 'MONITOR',
:snap_delete => 'SNAP_DELETE',
:snap_revert => 'SNAP_REVERT',
:snap_flatten=> 'SNAP_FLATTEN',
:restore => 'RESTORE',
:increment_flatten => 'INCREMENT_FLATTEN'
}
# Default System datastores for OpenNebula, override in oned.conf
SYSTEM_DS_TYPES = [
"shared",
"ssh",
"ceph"
'shared',
'ssh',
'ceph'
]
# Register default actions for the protocol
def initialize(ds_type, sys_ds_type, options={})
def initialize(ds_type, sys_ds_type, options = {})
@options={
:concurrency => 10,
:threaded => true,
@ -106,9 +107,9 @@ class DatastoreDriver < OpenNebulaDriver
}
}.merge!(options)
super("datastore/", @options)
super('datastore/', @options)
if ds_type == nil
if ds_type.nil?
@types = Dir["#{@local_scripts_path}/*/"].map do |d|
d.split('/')[-1]
end
@ -118,7 +119,7 @@ class DatastoreDriver < OpenNebulaDriver
@types = ds_type
end
if sys_ds_type == nil
if sys_ds_type.nil?
@sys_types = SYSTEM_DS_TYPES
elsif sys_ds_type.class == String
@sys_types = [sys_ds_type]
@ -128,16 +129,17 @@ class DatastoreDriver < OpenNebulaDriver
@local_tm_scripts_path = File.join(@local_scripts_base_path, 'tm/')
register_action(ACTION[:cp].to_sym, method("cp"))
register_action(ACTION[:rm].to_sym, method("rm"))
register_action(ACTION[:mkfs].to_sym, method("mkfs"))
register_action(ACTION[:stat].to_sym, method("stat"))
register_action(ACTION[:clone].to_sym, method("clone"))
register_action(ACTION[:monitor].to_sym, method("monitor"))
register_action(ACTION[:snap_delete].to_sym, method("snap_delete"))
register_action(ACTION[:snap_revert].to_sym, method("snap_revert"))
register_action(ACTION[:snap_flatten].to_sym, method("snap_flatten"))
register_action(ACTION[:restore].to_sym, method("restore"))
register_action(ACTION[:cp].to_sym, method('cp'))
register_action(ACTION[:rm].to_sym, method('rm'))
register_action(ACTION[:mkfs].to_sym, method('mkfs'))
register_action(ACTION[:stat].to_sym, method('stat'))
register_action(ACTION[:clone].to_sym, method('clone'))
register_action(ACTION[:monitor].to_sym, method('monitor'))
register_action(ACTION[:snap_delete].to_sym, method('snap_delete'))
register_action(ACTION[:snap_revert].to_sym, method('snap_revert'))
register_action(ACTION[:snap_flatten].to_sym, method('snap_flatten'))
register_action(ACTION[:restore].to_sym, method('restore'))
register_action(ACTION[:increment_flatten].to_sym, method('increment_flatten'))
end
############################################################################
@ -194,46 +196,52 @@ class DatastoreDriver < OpenNebulaDriver
do_image_action(id, ds, :restore, "#{drv_message} #{id}")
end
def increment_flatten(id, drv_message)
ds, _sys = get_ds_type(drv_message)
do_image_action(id, ds, :increment_flatten, "#{drv_message} #{id}")
end
private
def is_available?(ds, id, action)
if @types.include?(ds)
return true
true
else
send_message(ACTION[action], RESULT[:failure], id,
"Datastore driver '#{ds}' not available")
return false
"Datastore driver '#{ds}' not available")
false
end
end
def is_sys_available?(sys, id, action)
if @sys_types.include?(sys)
return true
true
else
send_message(ACTION[action], RESULT[:failure], id,
"System datastore driver '#{sys}' not available")
return false
"System datastore driver '#{sys}' not available")
false
end
end
def do_image_action(id, ds, action, arguments, sys='', encode64=false)
def do_image_action(id, ds, action, arguments, sys = '', encode64 = false)
if !sys.empty?
return if not is_sys_available?(sys, id, action)
return unless is_sys_available?(sys, id, action)
path = File.join(@local_tm_scripts_path, sys)
else
return if not is_available?(ds, id, action)
return unless is_available?(ds, id, action)
path = File.join(@local_scripts_path, ds)
end
cmd = File.join(path, ACTION[action].downcase)
cmd << " " << arguments
cmd << ' ' << arguments
rc = LocalCommand.run(cmd, log_method(id))
result, info = get_info_from_execution(rc)
info = Base64::encode64(info).strip.delete("\n") if encode64
info = Base64.encode64(info).strip.delete("\n") if encode64
send_message(ACTION[action], result, id, info)
end
@ -250,12 +258,13 @@ class DatastoreDriver < OpenNebulaDriver
dsxml = xml_doc.root.elements['/DS_DRIVER_ACTION_DATA/DATASTORE/TYPE']
if dsxml && dsxml.text == '1'
dsxml = xml_doc.root.elements['/DS_DRIVER_ACTION_DATA/DATASTORE/TM_MAD']
dssys = dsxml.text if dsxml
dsxml = xml_doc.root.elements['/DS_DRIVER_ACTION_DATA/DATASTORE/TM_MAD']
dssys = dsxml.text if dsxml
end
return dstxt, dssys
[dstxt, dssys]
end
end
################################################################################
@ -265,10 +274,10 @@ end
################################################################################
opts = GetoptLong.new(
[ '--threads', '-t', GetoptLong::OPTIONAL_ARGUMENT ],
[ '--ds-types', '-d', GetoptLong::OPTIONAL_ARGUMENT ],
[ '--system-ds-types', '-s', GetoptLong::OPTIONAL_ARGUMENT ],
[ '--timeout', '-w', GetoptLong::OPTIONAL_ARGUMENT ]
['--threads', '-t', GetoptLong::OPTIONAL_ARGUMENT],
['--ds-types', '-d', GetoptLong::OPTIONAL_ARGUMENT],
['--system-ds-types', '-s', GetoptLong::OPTIONAL_ARGUMENT],
['--timeout', '-w', GetoptLong::OPTIONAL_ARGUMENT]
)
ds_type = nil
@ -279,17 +288,17 @@ timeout = nil
begin
opts.each do |opt, arg|
case opt
when '--threads'
threads = arg.to_i
when '--ds-types'
ds_type = arg.split(',').map {|a| a.strip }
when '--system-ds-types'
sys_ds_type = arg.split(',').map {|a| a.strip }
when '--timeout'
timeout = arg.to_i
when '--threads'
threads = arg.to_i
when '--ds-types'
ds_type = arg.split(',').map {|a| a.strip }
when '--system-ds-types'
sys_ds_type = arg.split(',').map {|a| a.strip }
when '--timeout'
timeout = arg.to_i
end
end
rescue Exception => e
rescue StandardError
exit(-1)
end

View File

@ -162,80 +162,6 @@ function s3_env
CURRENT_DATE_ISO8601="${CURRENT_DATE_DAY}T$(date -u '+%H%M%S')Z"
}
# Get restic repo information from datastore template
# It generates the repo url as: sftp:SFTP_USER@SFTP_SERVER:DATASTORE_PATH
# Sets the following environment variables
# - RESTIC_REPOSITORY (replaces -r in restic command)
# - RESTIC_PASSWORD (password to access the repo)
function restic_env
{
XPATH="$DRIVER_PATH/xpath.rb --stdin"
unset i j XPATH_ELEMENTS
while IFS= read -r -d '' element; do
XPATH_ELEMENTS[i++]="$element"
done < <(onedatastore show -x --decrypt $1 | $XPATH \
/DATASTORE/TEMPLATE/RESTIC_SFTP_SERVER \
/DATASTORE/TEMPLATE/RESTIC_SFTP_USER \
/DATASTORE/BASE_PATH \
/DATASTORE/TEMPLATE/RESTIC_PASSWORD \
/DATASTORE/TEMPLATE/RESTIC_IONICE \
/DATASTORE/TEMPLATE/RESTIC_NICE \
/DATASTORE/TEMPLATE/RESTIC_BWLIMIT \
/DATASTORE/TEMPLATE/RESTIC_CONNECTIONS \
/DATASTORE/TEMPLATE/RESTIC_TMP_DIR)
SFTP_SERVER="${XPATH_ELEMENTS[j++]}"
SFTP_USER="${XPATH_ELEMENTS[j++]:-oneadmin}"
BASE_PATH="${XPATH_ELEMENTS[j++]}"
PASSWORD="${XPATH_ELEMENTS[j++]}"
IONICE="${XPATH_ELEMENTS[j++]}"
NICE="${XPATH_ELEMENTS[j++]}"
BWLIMIT="${XPATH_ELEMENTS[j++]}"
CONNECTIONS="${XPATH_ELEMENTS[j++]}"
TMP_DIR="${XPATH_ELEMENTS[j++]}"
export RESTIC_REPOSITORY="sftp:${SFTP_USER}@${SFTP_SERVER}:${BASE_PATH}"
export RESTIC_PASSWORD="${PASSWORD}"
RESTIC_ONE_PRECMD=""
if [ -n "${NICE}" ]; then
RESTIC_ONE_PRECMD="nice -n ${NICE} "
fi
if [ -z "${TMP_DIR}" ]; then
TMP_DIR="/var/tmp/"
fi
export RESTIC_TMP_DIR="${TMP_DIR}/`uuidgen`"
if [ -n "${IONICE}" ]; then
RESTIC_ONE_PRECMD="${RESTIC_ONE_PRECMD}ionice -c2 -n ${IONICE} "
fi
if [ -x "/var/lib/one/remotes/datastore/restic/restic" ]; then
RESTIC_ONE_PATH="/var/lib/one/remotes/datastore/restic/restic"
elif [ -x "/var/tmp/one/datastore/restic/restic" ]; then
RESTIC_ONE_PATH="/var/tmp/one/datastore/restic/restic"
else
RESTIC_ONE_PATH="restic"
fi
RESTIC_ONE_CMD="${RESTIC_ONE_PRECMD}${RESTIC_ONE_PATH}"
if [ -n "${BWLIMIT}" ]; then
RESTIC_ONE_CMD="${RESTIC_ONE_CMD} --limit-upload ${BWLIMIT} --limit-download ${BWLIMIT}"
fi
if [ -n "${CONNECTIONS}" ]; then
RESTIC_ONE_CMD="${RESTIC_ONE_CMD} --option sftp.connections=${CONNECTIONS}"
fi
export RESTIC_ONE_CMD
}
# Get rsync repo information from DS template
# Sets the following variables:
# - RSYNC_CMD = rsync -a user@host:/base/path
@ -551,50 +477,7 @@ docker://*|dockerfile://*)
command="$VAR_LOCATION/remotes/datastore/docker_downloader.sh \"$FROM\""
;;
restic://*)
#pseudo restic url restic://<datastore_id>/<id>:<snapshot_id>,.../<file_name>
restic_path=${FROM#restic://}
d_id=`echo ${restic_path} | cut -d'/' -f1`
s_id=`echo ${restic_path} | cut -d'/' -f2`
file=`echo ${restic_path} | cut -d'/' -f3-`
restic_env $d_id
if [ -z "$RESTIC_REPOSITORY" -o -z "$RESTIC_PASSWORD" ]; then
echo "RESTIC_REPOSITORY and RESTIC_PASSWORD are required" >&2
exit -1
fi
incs=(${s_id//,/ })
mkdir -p ${RESTIC_TMP_DIR}
pushd ${RESTIC_TMP_DIR}
for i in "${incs[@]}"; do
inc_id=`echo $i | cut -d':' -f1`
snap_id=`echo $i | cut -d':' -f2`
${RESTIC_ONE_CMD} dump -q ${snap_id} /${file}.${inc_id} > disk.${inc_id}
done
for i in `ls disk* | sort -r`; do
id=`echo $i | cut -d'.' -f2`
pid=$((id - 1))
if [ -f "disk.${pid}" ]; then
qemu-img rebase -u -F qcow2 -b "disk.${pid}" "disk.${id}"
else
qemu-img rebase -u -b '' "disk.${id}"
fi
done
qemu-img convert -O qcow2 -m 4 `ls disk* | sort -r | head -1` disk.qcow2
command="cat `realpath disk.qcow2`"
clean_command="rm -rf ${RESTIC_TMP_DIR}"
popd
eval `$VAR_LOCATION/remotes/datastore/restic_downloader.rb "$FROM" | grep -e '^command=' -e '^clean_command='`
;;
rsync://*)
# example: rsync://100/0:8a3454,1:f6e63e//var/lib/one//datastores/100/6/8a3454/disk.0.0

View File

@ -0,0 +1,118 @@
#!/usr/bin/env ruby
# ---------------------------------------------------------------------------- #
# Copyright 2002-2023, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
ONE_LOCATION = ENV['ONE_LOCATION'] unless defined?(ONE_LOCATION)
if !ONE_LOCATION
RUBY_LIB_LOCATION ||= '/usr/lib/one/ruby'
GEMS_LOCATION ||= '/usr/share/one/gems'
VAR_LOCATION ||= '/var/lib/one'
else
RUBY_LIB_LOCATION ||= ONE_LOCATION + '/lib/ruby'
GEMS_LOCATION ||= ONE_LOCATION + '/share/gems'
VAR_LOCATION ||= ONE_LOCATION + '/var'
end
# %%RUBYGEMS_SETUP_BEGIN%%
if File.directory?(GEMS_LOCATION)
real_gems_path = File.realpath(GEMS_LOCATION)
if !defined?(Gem) || Gem.path != [real_gems_path]
$LOAD_PATH.reject! {|l| l =~ /vendor_ruby/ }
# Suppress warnings from Rubygems
# https://github.com/OpenNebula/one/issues/5379
begin
verb = $VERBOSE
$VERBOSE = nil
require 'rubygems'
Gem.use_paths(real_gems_path)
ensure
$VERBOSE = verb
end
end
end
# %%RUBYGEMS_SETUP_END%%
$LOAD_PATH << RUBY_LIB_LOCATION
$LOAD_PATH << File.dirname(__FILE__)
require 'fileutils'
require 'opennebula'
require 'securerandom'
require 'CommandManager'
require_relative '../tm/lib/backup'
# restic://<datastore_id>/<id>:<snapshot_id>,.../<file_name>
restic_url = ARGV[0]
tokens = restic_url.delete_prefix('restic://').split('/')
ds_id = tokens[0].to_i
snaps = tokens[1].split(',').map {|s| s.split(':')[1] }
disk_path = tokens[2..].join('/')
disk_index = File.basename(disk_path).split('.')[1]
begin
# Do a sanity check if Restic is available/enabled.
raise StandardError, 'Restic unavailable, please use OpenNebula EE.' \
unless File.exist?("#{VAR_LOCATION}/remotes/datastore/restic/")
require "#{VAR_LOCATION}/remotes/datastore/restic/restic"
# Fetch datastore XML payload directly from the API.
backup_ds = OpenNebula::Datastore.new_with_id ds_id, OpenNebula::Client.new
rc = backup_ds.info(true)
raise StandardError, rc.message \
if OpenNebula.is_error?(backup_ds)
# Pull from Restic, then post-process qcow2 disks.
rds = Restic.new backup_ds.to_xml
tmp_dir = "#{rds.tmp_dir}/#{SecureRandom.uuid}"
FileUtils.mkdir_p tmp_dir
paths = rds.pull_chain(snaps, disk_index, nil, tmp_dir)
disk_paths = paths[:disks][:by_index][disk_index]
rc = LocalCommand.run '/bin/bash -s', nil, <<~EOS
set -e -o pipefail
#{TransferManager::BackupImage.reconstruct_chain(disk_paths, tmp_dir)}
#{TransferManager::BackupImage.merge_chain(disk_paths, tmp_dir)}
mv '#{tmp_dir}/#{File.basename(disk_paths.last)}' '#{tmp_dir}/disk.#{disk_index}'
EOS
raise StandardError, rc.stderr \
if rc.code != 0
# Return shell code snippets according to the downloader's interface.
STDOUT.puts <<~EOS
command="cat '#{tmp_dir}/disk.#{disk_index}'"
clean_command="rm -rf '#{tmp_dir}/'"
EOS
rescue StandardError => e
STDERR.puts e.full_message
exit(-1)
end
exit(0)

View File

@ -82,8 +82,10 @@ begin
iid = rds.elements['IMAGE/ID'].text.to_i
dsid = rds.elements['DATASTORE/ID'].text.to_i
base = rds.elements['DATASTORE/BASE_PATH'].text
rsync_host = rds.elements['DATASTORE/TEMPLATE/RSYNC_HOST'].text
rsync_user = rds.elements['DATASTORE/TEMPLATE/RSYNC_USER'].text
vm_id = rds.elements['IMAGE/VMS[1]/ID'].text.to_i
image = TransferManager::BackupImage.new(drv_action)
rescue StandardError => e
@ -139,18 +141,6 @@ vm_xml = rc.stdout
# ------------------------------------------------------------------------------
# Prepare an OpenNebula client to impersonate the target user
# ------------------------------------------------------------------------------
no_ip = begin
rds['TEMPLATE/NO_IP'] == 'YES'
rescue StandardError
false
end
no_nic = begin
rds['TEMPLATE/NO_NIC'] == 'YES'
rescue StandardError
false
end
ENV['ONE_CIPHER_AUTH'] = SERVERADMIN_AUTH
sauth = OpenNebula::ServerCipherAuth.new_client
@ -160,15 +150,17 @@ one_client = OpenNebula::Client.new(token)
# ------------------------------------------------------------------------------
# Create backup object templates for VM and associated disk images
# Monkey patch REXML::DOCUMENT to respond to []
# ------------------------------------------------------------------------------
rds.define_singleton_method('[]'){|xpath| self.elements[xpath].text }
restorer = TransferManager::BackupRestore.new(
:vm_xml64 => vm_xml,
:backup_id => buid,
:chain => image.chain,
:bimage => image,
:ds_id => dsid,
:image_id => iid,
:no_ip => no_ip,
:no_nic => no_nic,
:txml => rds,
:proto => 'rsync'
)

View File

@ -128,3 +128,66 @@ long long IncrementSet::total_size()
return sz;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int BackupIncrements::update_increments(const std::string& incs, const std::string& sz)
{
int first = -1;
Increment * inc;
std::vector<std::string> id_sources = one_util::split(incs, ',');
for (const std::string& is: id_sources)
{
std::vector<std::string> parts = one_util::split(is, ':');
if (parts.size() != 2)
{
return -1;
}
int id;
if (!one_util::str_cast(parts[0], id))
{
return -1;
}
Increment * inc = increments.get_increment(id);
if ( inc == nullptr )
{
return -1;
}
if ( first == -1 )
{
first = id;
inc->backup_type(Increment::FULL);
inc->parent_id(-1);
inc->size(sz);
}
inc->source(parts[1]);
}
first = first - 1;
while (first >= 0 && (inc = increments.delete_increment(first)) != nullptr)
{
delete _template.remove(inc->vector_attribute());
delete inc;
first = first - 1;
}
return 0;
}

View File

@ -432,7 +432,7 @@ string& Image::to_xml(string& xml) const
app_clone_collection.to_xml(app_clone_collection_xml) <<
obj_template->to_xml(template_xml) <<
snapshots.to_xml(snapshots_xml) <<
increments.to_xml(increments_xml) <<
_increments.to_xml(increments_xml) <<
"</IMAGE>";
xml = oss.str();
@ -528,7 +528,7 @@ int Image::from_xml(const string& xml)
if (!content.empty())
{
rc += increments.from_xml_node(content[0]);
rc += _increments.from_xml_node(content[0]);
ObjectXML::free_nodes(content);
content.clear();

View File

@ -99,6 +99,9 @@ int ImageManager::start()
register_action(ImageManagerMessages::RESTORE,
bind(&ImageManager::_restore, this, _1));
register_action(ImageManagerMessages::INCREMENT_FLATTEN,
bind(&ImageManager::_increment_flatten, this, _1));
register_action(ImageManagerMessages::LOG,
&ImageManager::_log);

View File

@ -1387,6 +1387,69 @@ int ImageManager::flatten_snapshot(int iid, int sid, string& error)
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int ImageManager::flatten_increments(int iid, int ds_id, const string& edata, string& error)
{
const auto* imd = get();
if ( imd == nullptr )
{
error = "Could not get datastore driver";
return -1;
}
string ds_data;
if (auto ds = dspool->get_ro(ds_id))
{
ds->decrypt();
ds->to_xml(ds_data);
}
else
{
error = "Datastore no longer exists";
return -1;
}
auto img = ipool->get(iid);
if ( img == nullptr )
{
error = "Image does not exist";
return -1;
}
if ( img->get_type() != Image::BACKUP )
{
error = "Image is not of type BACKUP";
return -1;
}
if (img->get_state() != Image::READY)
{
error = "Cannot flatten increments in state " + Image::state_to_str(img->get_state());
return -1;
}
/* ---------------------------------------------------------------------- */
/* Format message and send action to driver */
/* ---------------------------------------------------------------------- */
string img_tmpl;
string drv_msg(format_message(img->to_xml(img_tmpl), ds_data, edata));
image_msg_t msg(ImageManagerMessages::INCREMENT_FLATTEN, "", iid, drv_msg);
imd->write(msg);
img->set_state(Image::LOCKED);
ipool->update(img.get());
return 0;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int ImageManager::restore_image(int iid, int dst_ds_id, const std::string& txml,
std::string& result)
{

View File

@ -864,6 +864,91 @@ void ImageManager::_restore(unique_ptr<image_msg_t> msg)
/* -------------------------------------------------------------------------- */
void ImageManager::_increment_flatten(unique_ptr<image_msg_t> msg)
{
NebulaLog::dddebug("ImM", "_increment_flatten: " + msg->payload());
auto image = ipool->get(msg->oid());
if ( !image )
{
return;
}
bool error = false;
long long size_orig;
long long size_new;
int ds_id = image->get_ds_id();
int uid = image->get_uid();
int gid = image->get_gid();
if (msg->status() == "SUCCESS")
{
auto& increments = image->increments();
size_orig = image->get_size();
string size_mb;
string chain;
istringstream is(msg->payload());
is >> size_mb;
is >> chain;
int rc = increments.update_increments(chain, size_mb);
size_new = increments.total_size();
image->set_size(size_new);
error = rc == -1;
}
else
{
error = true;
}
if (error)
{
ostringstream oss;
oss << "Error flattening backup increments";
const auto& info = msg->payload();
if (!info.empty() && (info[0] != '-'))
{
oss << ": " << info;
}
image->set_template_error_message(oss.str());
NebulaLog::log("ImM", Log::ERROR, oss);
}
image->set_state_unlock();
ipool->update(image.get());
image.reset();
if (msg->status() == "SUCCESS")
{
Template quotas;
quotas.add("DATASTORE", ds_id);
quotas.add("SIZE", size_orig - size_new);
quotas.add("IMAGES", 0);
Quotas::ds_del(uid, gid, &quotas);
}
}
/* -------------------------------------------------------------------------- */
void ImageManager::_log(unique_ptr<image_msg_t> msg)
{
NebulaLog::log("ImM", log_type(msg->status()[0]), msg->payload());

View File

@ -2765,6 +2765,7 @@ void LifeCycleManager::trigger_backup_success(int vid)
int ds_id = backups.last_datastore_id();
int incremental_id = backups.incremental_backup_id();
int keep_last = backups.keep_last();
Backups::Mode mode = backups.mode();
long long reserved_sz = vm->backup_size(ds_deltas);
@ -2774,11 +2775,6 @@ void LifeCycleManager::trigger_backup_success(int vid)
ds_deltas.add("DATASTORE", backups.last_datastore_id());
if (mode == Backups::FULL || incremental_id == -1)
{
ds_deltas.add("IMAGES", 1);
}
switch(vm->get_lcm_state())
{
case VirtualMachine::BACKUP:
@ -2794,6 +2790,11 @@ void LifeCycleManager::trigger_backup_success(int vid)
vm->log("LCM", Log::ERROR, "backup_success, VM in wrong state");
vm.reset();
if (mode == Backups::FULL || incremental_id == -1)
{
ds_deltas.add("IMAGES", 1);
}
Quotas::ds_del(vm_uid, vm_gid, &ds_deltas);
return;
}
@ -2822,6 +2823,8 @@ void LifeCycleManager::trigger_backup_success(int vid)
/* ------------------------------------------------------------------ */
/* Update backup information for increments */
/* ------------------------------------------------------------------ */
int increments = -1;
if (mode == Backups::INCREMENT)
{
Increment::Type itype;
@ -2849,6 +2852,9 @@ void LifeCycleManager::trigger_backup_success(int vid)
backups.last_increment_id(image->last_increment_id());
increments = image->increments().total();
ds_id = image->get_ds_id();
ipool->update(image.get());
}
else
@ -2862,7 +2868,7 @@ void LifeCycleManager::trigger_backup_success(int vid)
vmpool->update(vm.get());
if ( delete_ids.size() > 0 )
if (delete_ids.size() > 0) // FULL & backups > keep_last
{
ostringstream oss;
@ -2875,6 +2881,14 @@ void LifeCycleManager::trigger_backup_success(int vid)
vm->log("LCM", Log::INFO, oss.str());
}
else if (keep_last > 0 && increments > keep_last) // INCREMENTAL & increments > keep_last
{
ostringstream oss;
oss << "Removing " << increments - keep_last << " backup increments";
vm->log("LCM", Log::INFO, oss.str());
}
vm.reset();
@ -2903,6 +2917,25 @@ void LifeCycleManager::trigger_backup_success(int vid)
}
}
}
else if (mode == Backups::INCREMENT && keep_last > 0 && increments > keep_last)
{
ostringstream oss;
oss << "<EXTRA_DATA>"
<< "<KEEP_LAST>" << keep_last << "</KEEP_LAST>"
<< "<VM_ID>" << vid << "</VM_ID>"
<< "</EXTRA_DATA>";
if ( imagem->flatten_increments(image_id, ds_id, oss.str(), error) != 0 )
{
ostringstream oss;
oss << "backup_success, cannot flatten backup increments for image "
<< image_id << " : " << error;
NebulaLog::error("LCM", oss.str());
}
}
/* ------------------------------------------------------------------ */
/* Update quotas, count real size of the backup */

View File

@ -51,7 +51,18 @@ class HostSyncManager
@remote_scripts_base_path&.delete!("'")
end
def update_remotes(hostname, logger = nil, copy_method = :rsync)
def update_remotes(hostname, logger = nil, copy_method = :rsync, subset = nil)
sources = '.'
if subset && copy_method == :rsync
# Make sure all files in the subset exist (and are relative).
subset.each do |path|
File.realpath path, @local_scripts_base_path
end
sources = subset.join(' ')
end
assemble_cmd = lambda do |steps|
"exec 2>/dev/null; #{steps.join(' && ')}"
end
@ -64,7 +75,8 @@ class HostSyncManager
]
sync_cmd = assemble_cmd.call [
"scp -rp '#{@local_scripts_base_path}'/* " \
"cd '#{@local_scripts_base_path}'/",
"scp -rp #{sources} " \
"'#{hostname}':'#{@remote_scripts_base_path}'/"
]
when :rsync
@ -73,7 +85,8 @@ class HostSyncManager
]
sync_cmd = assemble_cmd.call [
"rsync -Laz --delete '#{@local_scripts_base_path}'/ " \
"cd '#{@local_scripts_base_path}'/",
"rsync -LRaz --delete #{sources} " \
"'#{hostname}':'#{@remote_scripts_base_path}'/"
]
end

View File

@ -63,6 +63,7 @@ const EString<ImageManagerMessages> image_msg_t::_type_str({
{"SNAP_DELETE", ImageManagerMessages::SNAP_DELETE},
{"SNAP_REVERT", ImageManagerMessages::SNAP_REVERT},
{"SNAP_FLATTEN", ImageManagerMessages::SNAP_FLATTEN},
{"INCREMENT_FLATTEN", ImageManagerMessages::INCREMENT_FLATTEN},
{"RESTORE", ImageManagerMessages::RESTORE},
{"LOG", ImageManagerMessages::LOG},
});

View File

@ -4049,6 +4049,11 @@ void VirtualMachineBackup::request_execute(
{
quota_tmpl.add("IMAGES", 1);
}
else
{
quota_tmpl.add("IMAGES", 0);
}
RequestAttributes att_quota(vm_perms.uid, vm_perms.gid, att);

View File

@ -23,6 +23,47 @@ module TransferManager
# This class includes methods manage backup images
class BackupImage
attr_reader :vm_id, :keep_last
# Given a sorted list of qcow2 files,
# return a shell recipe that reconstructs the backing chain in-place.
def self.reconstruct_chain(paths, workdir = nil)
return '' unless paths.size > 1
lhs = paths.last(paths.size - 1)
rhs = paths.first(paths.size - 1)
script = []
lhs.zip(rhs).each do |target, backing|
backing = "#{workdir || File.dirname(backing)}/#{File.basename(backing)}"
target = "#{workdir || File.dirname(target)}/#{File.basename(target)}"
script << "qemu-img rebase -u -F qcow2 -b '#{backing}' '#{target}'"
end
script.join("\n")
end
# Given a sorted list of qcow2 files with backing chain properly reconstructed,
# return a shell recipe that merges it into a single qcow2 image.
def self.merge_chain(paths, workdir = nil)
return '' unless paths.size > 1
clean = paths.first(paths.size - 1)
origfile = "#{workdir || File.dirname(paths.last)}/#{File.basename(paths.last)}"
workfile = "#{origfile}.tmp"
script = []
script << "qemu-img convert -O qcow2 '#{origfile}' '#{workfile}'"
script << "mv '#{workfile}' '#{origfile}'"
script << "rm -f #{clean.map {|p| "'#{p}'" }.join(' ')}" unless clean.empty?
script.join("\n")
end
def initialize(action_xml)
@action = REXML::Document.new(action_xml).root
@increments = {}
@ -36,6 +77,9 @@ module TransferManager
end
@increments[0] = @action.elements["#{prefix}/SOURCE"].text if @increments.empty?
@keep_last = @action.elements['/DS_DRIVER_ACTION_DATA/EXTRA_DATA/KEEP_LAST']&.text.to_i
@vm_id = @action.elements['/DS_DRIVER_ACTION_DATA/EXTRA_DATA/VM_ID']&.text.to_i
end
def last
@ -50,6 +94,29 @@ module TransferManager
@increments.map {|k, v| "#{k}:#{v}" }.join(',')
end
# Create the chain with the last N elements
def chain_last(n)
@increments.map {|k, v| "#{k}:#{v}" }.last(n).join(',')
end
# Create the chain with the first N elements
def chain_first(n)
@increments.map {|k, v| "#{k}:#{v}" }.first(n).join(',')
end
# Create the chain up to a given increment id
def chain_up_to(id)
@increments.map {|k, v| "#{k}:#{v}" if k <= id }.compact.join(',')
end
# Create the chain up to a given increment id
def chain_keep_last(snap_id)
chain_a = @increments.map {|k, v| "#{k}:#{v}" }.last(@keep_last)
chain_a[0] = "#{@increments.keys[-@keep_last]}:#{snap_id}"
chain_a.join(',')
end
end
# This class includes methods to generate a recovery VM template based
@ -95,25 +162,46 @@ module TransferManager
# :backup_id => Internal ID used by the backup system
# :ds_id => Datastore to create the images
# :proto => Backup protocol
# :no_ip => Do not preserve NIC addresses
# :no_nic => Do not preserve network maps
# :txml => Object that responds to [] to get TEMPLATE attributes
# }
def initialize(opts = {})
txt = Base64.decode64(opts[:vm_xml64])
xml = OpenNebula::XMLElement.build_xml(txt, 'VM')
@vm = OpenNebula::VirtualMachine.new(xml, nil)
@base_name = "#{@vm.id}-#{opts[:backup_id]}"
@base_url = "#{opts[:proto]}://#{opts[:ds_id]}/#{opts[:chain]}"
@ds_id = opts[:ds_id]
if opts[:no_ip]
@base_name = begin
opts[:txml]['TEMPLATE/NAME']
rescue StandardError
"#{@vm.id}-#{opts[:backup_id]}"
end
no_ip = begin
opts[:txml]['TEMPLATE/NO_IP'].casecmp?('YES')
rescue StandardError
false
end
@no_nic = begin
opts[:txml]['TEMPLATE/NO_NIC'].casecmp?('YES')
rescue StandardError
false
end
chain = begin
inc_id = Integer(opts[:txml]['TEMPLATE/INCREMENT_ID'])
opts[:bimage].chain_up_to(inc_id)
rescue StandardError
opts[:bimage].chain
end
@base_url = "#{opts[:proto]}://#{opts[:ds_id]}/#{chain}"
if no_ip
NIC_LIST << %w[IP IP6 IP6_ULA IP6_GLOBAL]
NIC_LIST.flatten!
end
@no_nic = opts[:no_nic]
end
# Creates Image templates for the backup disks.

View File

@ -34,12 +34,16 @@ require_relative './kvm'
#
# IO_ASYNC: if true issues aio_read commands instead of read
# OUTSTAND_OPS: number of aio_reads before issuing a aio_flush commnand
#
# BDRV_MAX_REQUEST is the limit for the sieze of qemu-io operations
#-------------------------------------------------------------------------------
LOG_FILE = nil
QEMU_IO_OPEN = '-t none -i native -o driver=qcow2'
IO_ASYNC = false
OUTSTAND_OPS = 8
BDRV_MAX_REQUEST = 2**30
# rubocop:disable Style/ClassVars
#---------------------------------------------------------------------------
@ -50,7 +54,7 @@ module Command
def log(message)
return unless LOG_FILE
File.write(LOG_FILE, "#{Time.now.strftime('%H:%M:%S.%L')} #{message}\n", { :mode => 'a' })
File.write(LOG_FILE, "#{Time.now.strftime('%H:%M:%S.%L')} #{message}\n", mode: 'a')
end
def cmd(command, args, opts = {})
@ -196,13 +200,52 @@ class QemuImg
# Create a qemu-io script to pull changes
# ----------------------------------------------------------------------
io_script = "open -C #{QEMU_IO_OPEN} #{@path}\n"
index = -1
exts.each_with_index do |e, i|
if IO_ASYNC
io_script << "aio_read -q #{e['offset']} #{e['length']}\n"
io_script << "aio_flush\n" if (i+1)%OUTSTAND_OPS == 0
exts.each do |e|
ext_length = Integer(e['length'])
new_exts = []
if ext_length > BDRV_MAX_REQUEST
ext_offset = Integer(e['offset'])
loop do
index += 1
blk_length = if ext_length > BDRV_MAX_REQUEST
BDRV_MAX_REQUEST
else
ext_length
end
new_exts << {
'offset' => ext_offset,
'length' => blk_length,
'index' => index
}
ext_offset += BDRV_MAX_REQUEST
ext_length -= BDRV_MAX_REQUEST
break if ext_length <= 0
end
else
io_script << "read -q #{e['offset']} #{e['length']}\n"
index += 1
new_exts << {
'offset' => e['offset'],
'length' => e['length'],
'index' => index
}
end
new_exts.each do |i|
if IO_ASYNC
io_script << "aio_read -q #{i['offset']} #{i['length']}\n"
io_script << "aio_flush\n" if (i['index']+1)%OUTSTAND_OPS == 0
else
io_script << "read -q #{i['offset']} #{i['length']}\n"
end
end
end