mirror of
https://github.com/OpenNebula/one.git
synced 2025-03-21 14:50:08 +03:00
Implementation overview: - Incremental points are saved as dedicated rbd snapshots under the "one_backup_<increment_id>" namespace. This snapshots are used to generate delta files in rbdiff format. - The rbdiff formats are stored in the backup server to restore the rbd volumes. - The restore process is performed directly on the Ceph cluster importing the base image (first full backup in the chain, rbd import) and then applying the increments (rbd import-diff) up to the target increment. - Two new pseudo-protocols has been implemented to adopt the restore pattern above (restic+rbd, rsync+rbd). This protocols bundle of the rbdiff files in a tarball for transfer from the backup server. Note: reconstruct process uses the Ceph BRIDGE_LIST and not the backup server (as opposed to qcow2 backups) Other bug fixes - This commit also fixes #6741, resetting the backup chain after a restore - The original ceph drivers do not receive the full action information, this now has been fixed by including VM information in the STDIN string sent to the driver. Compatibility note. - backup actions should return now the backup format used raw, rbd, ... If not provided oned (6.10.x) will use raw as a default to accommodate any third party driver implementation. It is recommended to include this third argument. Signed-off-by: Guillermo Ramos <gramos@opennebula.io> Co-authored-by: Guillermo Ramos <gramos@opennebula.io> (cherry picked from commit 5f7b370c5274434804726c3feb435c98af761963)
This commit is contained in:
parent
d570f6bdf3
commit
7f67cc681a
@ -175,6 +175,11 @@ public:
|
||||
config.replace("LAST_BACKUP_SIZE", size);
|
||||
}
|
||||
|
||||
void last_backup_format(const std::string& format)
|
||||
{
|
||||
config.replace("LAST_BACKUP_FORMAT", format);
|
||||
}
|
||||
|
||||
void last_increment_id(int id)
|
||||
{
|
||||
config.replace("LAST_INCREMENT_ID", id);
|
||||
@ -224,6 +229,15 @@ public:
|
||||
return sz;
|
||||
}
|
||||
|
||||
std::string last_backup_format() const
|
||||
{
|
||||
std::string fmt;
|
||||
|
||||
config.get("LAST_BACKUP_FORMAT", fmt);
|
||||
|
||||
return fmt;
|
||||
}
|
||||
|
||||
int last_increment_id() const
|
||||
{
|
||||
int id;
|
||||
@ -283,6 +297,8 @@ public:
|
||||
|
||||
config.erase("LAST_BACKUP_ID");
|
||||
config.erase("LAST_BACKUP_SIZE");
|
||||
|
||||
config.erase("LAST_BACKUP_FORMAT");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -61,7 +61,6 @@ done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \
|
||||
/DS_DRIVER_ACTION_DATA/IMAGE/SIZE \
|
||||
/DS_DRIVER_ACTION_DATA/IMAGE/TEMPLATE/MD5 \
|
||||
/DS_DRIVER_ACTION_DATA/IMAGE/TEMPLATE/SHA1 \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/NO_DECOMPRESS \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/LIMIT_TRANSFER_BW \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_USER \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/CEPH_KEY \
|
||||
@ -81,7 +80,6 @@ SRC="${XPATH_ELEMENTS[i++]}"
|
||||
SIZE="${XPATH_ELEMENTS[i++]}"
|
||||
MD5="${XPATH_ELEMENTS[i++]}"
|
||||
SHA1="${XPATH_ELEMENTS[i++]}"
|
||||
NO_DECOMPRESS="${XPATH_ELEMENTS[i++]}"
|
||||
LIMIT_TRANSFER_BW="${XPATH_ELEMENTS[i++]}"
|
||||
CEPH_USER="${XPATH_ELEMENTS[i++]}"
|
||||
CEPH_KEY="${XPATH_ELEMENTS[i++]}"
|
||||
@ -119,7 +117,7 @@ TMP_DST="$STAGING_DIR/$IMAGE_HASH"
|
||||
IMAGE_NAME="one-${ID}"
|
||||
RBD_SOURCE="${POOL_NAME}/${IMAGE_NAME}"
|
||||
|
||||
DOWNLOADER_ARGS=`set_downloader_args "$MD5" "$SHA1" "$NO_DECOMPRESS" "$LIMIT_TRANSFER_BW" "$SRC" -`
|
||||
DOWNLOADER_ARGS=`set_downloader_args "$MD5" "$SHA1" "yes" "$LIMIT_TRANSFER_BW" "$SRC" -`
|
||||
|
||||
COPY_COMMAND="$UTILS_PATH/downloader.sh $DOWNLOADER_ARGS"
|
||||
|
||||
@ -153,21 +151,43 @@ fi
|
||||
REGISTER_CMD=$(cat <<EOF
|
||||
set -e -o pipefail
|
||||
|
||||
FORMAT=\$($QEMU_IMG info $TMP_DST | grep "^file format:" | awk '{print \$3}' || :)
|
||||
if file $TMP_DST | grep -q gzip; then
|
||||
mkdir $TMP_DST.d
|
||||
cd $TMP_DST.d
|
||||
|
||||
if [ "\$FORMAT" != "raw" ] && [ "\$FORMAT" != "luks" ]; then
|
||||
$QEMU_IMG convert -O raw $TMP_DST $TMP_DST.raw
|
||||
mv $TMP_DST.raw $TMP_DST
|
||||
tar zxf $TMP_DST
|
||||
|
||||
# Upload base image and snapshot
|
||||
$RBD import --export-format 2 - $RBD_SOURCE < disk.*.rbd2
|
||||
|
||||
# Apply increments
|
||||
for f in \$(ls disk.*.*.rbdiff | sort -k3 -t.); do
|
||||
$RBD import-diff - $RBD_SOURCE < \$f
|
||||
done
|
||||
|
||||
# Delete all snapshots
|
||||
$RBD snap ls $RBD_SOURCE --format json | jq -r '.[] | select(.protected == "true").name' | xargs -rI{} $RBD snap unprotect $RBD_SOURCE@{}
|
||||
$RBD snap ls $RBD_SOURCE --format json | jq -r '.[].name' | xargs -rI{} $RBD snap rm $RBD_SOURCE@{}
|
||||
|
||||
cd -
|
||||
$RM -rf $TMP_DST $TMP_DST.d
|
||||
else
|
||||
FORMAT=\$($QEMU_IMG info $TMP_DST | grep "^file format:" | awk '{print \$3}' || :)
|
||||
|
||||
if [ "\$FORMAT" != "raw" ] && [ "\$FORMAT" != "luks" ]; then
|
||||
$QEMU_IMG convert -O raw $TMP_DST $TMP_DST.raw
|
||||
mv $TMP_DST.raw $TMP_DST
|
||||
fi
|
||||
|
||||
$RBD import $FORMAT_OPT $TMP_DST $RBD_SOURCE
|
||||
|
||||
# remove original
|
||||
$RM -f $TMP_DST
|
||||
fi
|
||||
|
||||
$RBD import $FORMAT_OPT $TMP_DST $RBD_SOURCE
|
||||
|
||||
# remove original
|
||||
$RM -f $TMP_DST
|
||||
EOF
|
||||
)
|
||||
|
||||
ssh_exec_and_log "$DST_HOST" "$REGISTER_CMD" \
|
||||
"Error registering $RBD_SOURCE in $DST_HOST"
|
||||
ssh_exec_and_log "$DST_HOST" "$REGISTER_CMD" \
|
||||
"Error registering $RBD_SOURCE in $DST_HOST"
|
||||
|
||||
echo "$RBD_SOURCE raw"
|
||||
|
@ -431,11 +431,17 @@ lxd://*)
|
||||
file_type="application/octet-stream"
|
||||
command="$VAR_LOCATION/remotes/datastore/lxd_downloader.sh \"$FROM\""
|
||||
;;
|
||||
restic://*)
|
||||
eval `$VAR_LOCATION/remotes/datastore/restic_downloader.rb "$FROM" | grep -e '^command=' -e '^clean_command='`
|
||||
restic://*|restic+rbd://*)
|
||||
defs=`$VAR_LOCATION/remotes/datastore/restic_downloader.rb "$FROM" | grep -e '^command=' -e '^clean_command='`
|
||||
ret=$?
|
||||
[ $ret -ne 0 ] && exit $ret
|
||||
eval "$defs"
|
||||
;;
|
||||
rsync://*)
|
||||
eval `$VAR_LOCATION/remotes/datastore/rsync_downloader.rb "$FROM" | grep -e '^command=' -e '^clean_command='`
|
||||
rsync://*|rsync+rbd://*)
|
||||
defs=`$VAR_LOCATION/remotes/datastore/rsync_downloader.rb "$FROM" | grep -e '^command=' -e '^clean_command='`
|
||||
ret=$?
|
||||
[ $ret -ne 0 ] && exit $ret
|
||||
eval "$defs"
|
||||
;;
|
||||
*)
|
||||
if [ ! -r $FROM ]; then
|
||||
|
@ -64,7 +64,7 @@ require_relative 'restic'
|
||||
|
||||
TransferManager::Datastore.load_env
|
||||
|
||||
ds_xml = STDIN.read
|
||||
xml = STDIN.read
|
||||
|
||||
dir = ARGV[0].split(':')
|
||||
_disks = ARGV[1]
|
||||
@ -89,12 +89,13 @@ repo_id = if bj_id != '-'
|
||||
end
|
||||
|
||||
begin
|
||||
ds = TransferManager::Datastore.from_xml(:ds_xml => ds_xml)
|
||||
ds_xml = REXML::Document.new(xml).root.elements['DATASTORE']
|
||||
ds = TransferManager::Datastore.from_xml(:ds_xml => ds_xml.to_s)
|
||||
|
||||
rds = Restic.new ds_xml, :create_repo => true,
|
||||
:repo_type => :sftp,
|
||||
:host_type => :hypervisor,
|
||||
:repo_id => repo_id
|
||||
rds = Restic.new ds_xml.to_s, :create_repo => true,
|
||||
:repo_type => :sftp,
|
||||
:host_type => :hypervisor,
|
||||
:repo_id => repo_id
|
||||
rds.resticenv_rb
|
||||
rescue StandardError => e
|
||||
STDERR.puts e.full_message
|
||||
@ -216,5 +217,14 @@ end
|
||||
id = parts[0]
|
||||
short_id = id[0..7] # first 8 chars only
|
||||
|
||||
STDOUT.puts "#{short_id} #{parts[1].to_i / (1024 * 1024)}"
|
||||
vm = REXML::Document.new(xml).root.elements['VM']
|
||||
backup_format =
|
||||
if vm.elements['TEMPLATE/TM_MAD_SYSTEM'].text == 'ceph' &&
|
||||
vm.elements['BACKUPS/BACKUP_CONFIG/MODE']&.text == 'INCREMENT'
|
||||
'rbd'
|
||||
else
|
||||
'raw'
|
||||
end
|
||||
|
||||
STDOUT.puts "#{short_id} #{parts[1].to_i / (1024 * 1024)} #{backup_format}"
|
||||
exit(0)
|
||||
|
@ -93,6 +93,9 @@ begin
|
||||
image.chain_up_to(increment_id)
|
||||
end
|
||||
|
||||
xml = REXML::Document.new(action).root
|
||||
format = xml.elements['IMAGE/FORMAT'].text
|
||||
|
||||
rds = Restic.new action, :prefix => 'DATASTORE/',
|
||||
:repo_type => :local,
|
||||
:host_type => :frontend
|
||||
@ -103,7 +106,7 @@ begin
|
||||
ds_id = rds['DATASTORE/ID']
|
||||
|
||||
snap = image.selected || image.last
|
||||
burl = "restic://#{ds_id}/#{image.bj_id}/#{chain}"
|
||||
burl = "restic#{format == 'rbd' ? '+rbd' : ''}://#{ds_id}/#{image.bj_id}/#{chain}"
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# Get a list of disk paths stored in the backup
|
||||
|
@ -125,12 +125,14 @@ one_client = OpenNebula::Client.new token
|
||||
# ------------------------------------------------------------------------------
|
||||
# Create backup object templates for VM and associated disk images
|
||||
# ------------------------------------------------------------------------------
|
||||
restorer = TransferManager::BackupRestore.new :vm_xml64 => vm_xml,
|
||||
:backup_id => snap,
|
||||
:bimage => image,
|
||||
:ds_id => ds_id,
|
||||
:txml => rds,
|
||||
:proto => 'restic'
|
||||
restorer = TransferManager::BackupRestore.new(
|
||||
:vm_xml64 => vm_xml,
|
||||
:backup_id => snap,
|
||||
:bimage => image,
|
||||
:ds_id => ds_id,
|
||||
:txml => rds,
|
||||
:proto => image.proto('restic')
|
||||
)
|
||||
|
||||
br_disks = restorer.disk_images disks
|
||||
|
||||
|
@ -75,7 +75,9 @@ begin
|
||||
rds = Restic.new action, :prefix =>'DATASTORE/'
|
||||
rds.resticenv_rb
|
||||
|
||||
file = rds['IMAGE/PATH'].delete_prefix('restic://')
|
||||
file = rds['IMAGE/PATH']
|
||||
file.slice! %r{restic(\+[^:]+)?://}
|
||||
|
||||
parts = file.split('/')
|
||||
diskid = parts[-1].match(/disk\.(\d+)/)
|
||||
base_path = "/#{parts[3..-2].join('/')}/"
|
||||
|
@ -64,11 +64,13 @@ SSH_OPTS = '-q -o ControlMaster=no -o ControlPath=none -o ForwardAgent=yes'
|
||||
|
||||
# restic://<datastore_id>/<bj_id>/<id>:<snapshot_id>,.../<file_name>
|
||||
restic_url = ARGV[0]
|
||||
tokens = restic_url.delete_prefix('restic://').split('/')
|
||||
|
||||
proto, url = restic_url.split(%r{://}, 2)
|
||||
tokens = url.split('/', 4)
|
||||
ds_id = tokens[0].to_i
|
||||
bj_id = tokens[1]
|
||||
snaps = tokens[2].split(',').map {|s| s.split(':')[1] }
|
||||
disk_path = tokens[3..-1].join('/')
|
||||
disk_path = "/#{tokens[3]}"
|
||||
disk_index = Pathname.new(disk_path).basename.to_s.split('.')[1]
|
||||
vm_id = disk_path.match('/(\d+)/backup/[^/]+$')[1].to_i
|
||||
|
||||
@ -109,52 +111,60 @@ end
|
||||
# Prepare image.
|
||||
|
||||
begin
|
||||
tmp_dir = "#{rds.tmp_dir}/#{SecureRandom.uuid}"
|
||||
|
||||
tmp_dir = "#{rds.tmp_dir}/#{SecureRandom.uuid}"
|
||||
paths = rds.pull_chain(snaps, disk_index, rds.sftp, tmp_dir)
|
||||
disk_paths = paths[:disks][:by_index][disk_index]
|
||||
disk_paths = paths[:disks][:by_index][disk_index].map {|d| Pathname.new(d) }
|
||||
tmp_path = "#{tmp_dir}/#{disk_paths.last.basename}"
|
||||
|
||||
tmp_path = "#{tmp_dir}/#{Pathname.new(disk_paths.last).basename}"
|
||||
if proto == 'restic+rbd'
|
||||
# FULL/INCREMENTAL BACKUP (RBD)
|
||||
|
||||
# FULL BACKUP
|
||||
|
||||
if disk_paths.size == 1
|
||||
# Return shell code snippets according to the downloader's interface.
|
||||
STDOUT.puts <<~EOS
|
||||
command="ssh #{SSH_OPTS} '#{rds.user}@#{rds.sftp}' cat '#{tmp_path}'"
|
||||
clean_command="ssh #{SSH_OPTS} '#{rds.user}@#{rds.sftp}' rm -rf '#{tmp_dir}/'"
|
||||
tmp_path = "#{tmp_dir}/disk.#{disk_index}.#{snaps.last[0]}.tar.gz"
|
||||
script = <<~EOS
|
||||
set -e -o pipefail; shopt -qs failglob
|
||||
mkdir -p '#{tmp_dir}/'
|
||||
tar zcvf '#{tmp_path}' -C #{tmp_dir} #{disk_paths.map {|d| d.basename }.join(' ')}
|
||||
rm #{disk_paths.map {|d| "#{tmp_dir}/#{d.basename}" }.join(' ')}
|
||||
EOS
|
||||
exit(0)
|
||||
|
||||
rc = TransferManager::Action.ssh('prepare_image',
|
||||
:host => "#{rds.user}@#{rds.sftp}",
|
||||
:forward => true,
|
||||
:cmds => script,
|
||||
:nostdout => false,
|
||||
:nostderr => false)
|
||||
|
||||
raise StandardError, "Unable to prepare image: #{rc.stderr}" if rc.code != 0
|
||||
elsif disk_paths.size == 1
|
||||
# FULL BACKUP (QCOW2)
|
||||
|
||||
# No additional preparation needed
|
||||
true
|
||||
else
|
||||
# INCREMENTAL BACKUP (QCOW2)
|
||||
|
||||
script = [<<~EOS]
|
||||
set -e -o pipefail; shopt -qs failglob
|
||||
#{rds.resticenv_sh}
|
||||
#{TransferManager::BackupImage.reconstruct_chain(disk_paths, :workdir => tmp_dir)}
|
||||
#{TransferManager::BackupImage.merge_chain(disk_paths, :workdir => tmp_dir)}
|
||||
EOS
|
||||
|
||||
rc = TransferManager::Action.ssh('prepare_image',
|
||||
:host => "#{rds.user}@#{rds.sftp}",
|
||||
:forward => true,
|
||||
:cmds => script.join("\n"),
|
||||
:nostdout => true,
|
||||
:nostderr => false)
|
||||
|
||||
raise StandardError, "Unable to prepare image: #{rc.stderr}" if rc.code != 0
|
||||
end
|
||||
|
||||
# INCREMENTAL BACKUP
|
||||
|
||||
script = [<<~EOS]
|
||||
set -e -o pipefail; shopt -qs failglob
|
||||
#{rds.resticenv_sh}
|
||||
EOS
|
||||
|
||||
script << TransferManager::BackupImage.reconstruct_chain(disk_paths,
|
||||
:workdir => tmp_dir)
|
||||
|
||||
script << TransferManager::BackupImage.merge_chain(disk_paths,
|
||||
:workdir => tmp_dir)
|
||||
|
||||
rc = TransferManager::Action.ssh 'prepare_image',
|
||||
:host => "#{rds.user}@#{rds.sftp}",
|
||||
:forward => true,
|
||||
:cmds => script.join("\n"),
|
||||
:nostdout => true,
|
||||
:nostderr => false
|
||||
|
||||
raise StandardError, "Unable to prepare image: #{rc.stderr}" if rc.code != 0
|
||||
|
||||
# Return shell code snippets according to the downloader's interface.
|
||||
STDOUT.puts <<~EOS
|
||||
command="ssh #{SSH_OPTS} '#{rds.user}@#{rds.sftp}' cat '#{tmp_path}'"
|
||||
clean_command="ssh #{SSH_OPTS} '#{rds.user}@#{rds.sftp}' rm -rf '#{tmp_dir}/'"
|
||||
EOS
|
||||
exit(0)
|
||||
rescue StandardError => e
|
||||
STDERR.puts e.full_message
|
||||
exit(-1)
|
||||
|
@ -62,7 +62,7 @@ require_relative '../../tm/lib/tm_action'
|
||||
|
||||
TransferManager::Datastore.load_env
|
||||
|
||||
ds_xml = STDIN.read
|
||||
xml = STDIN.read
|
||||
|
||||
dir = ARGV[0].split(':')
|
||||
_disks = ARGV[1].split(':')
|
||||
@ -81,20 +81,20 @@ vm_dir = if dsrdir
|
||||
end
|
||||
|
||||
begin
|
||||
ds = REXML::Document.new(ds_xml).root
|
||||
ds_xml = REXML::Document.new(xml).root.elements['DATASTORE']
|
||||
|
||||
rsync_user = ds.elements['TEMPLATE/RSYNC_USER'].text
|
||||
rsync_host = ds.elements['TEMPLATE/RSYNC_HOST'].text
|
||||
rsync_user = ds_xml.elements['TEMPLATE/RSYNC_USER'].text
|
||||
rsync_host = ds_xml.elements['TEMPLATE/RSYNC_HOST'].text
|
||||
|
||||
base = ds.elements['BASE_PATH'].text
|
||||
base = ds_xml.elements['BASE_PATH'].text
|
||||
|
||||
if ds.elements['TEMPLATE/RSYNC_ARGS'].nil?
|
||||
if ds_xml.elements['TEMPLATE/RSYNC_ARGS'].nil?
|
||||
args = '-aS'
|
||||
else
|
||||
args = ds.elements['TEMPLATE/RSYNC_ARGS'].text
|
||||
args = ds_xml.elements['TEMPLATE/RSYNC_ARGS'].text
|
||||
end
|
||||
|
||||
ds = TransferManager::Datastore.from_xml(:ds_xml => ds_xml)
|
||||
ds = TransferManager::Datastore.from_xml(:ds_xml => ds_xml.to_s)
|
||||
rescue StandardError => e
|
||||
STDERR.puts e.message
|
||||
exit(-1)
|
||||
@ -204,5 +204,14 @@ if rc.code != 0 || rc.stdout.empty?
|
||||
exit(-1)
|
||||
end
|
||||
|
||||
STDOUT.puts "#{backup_id} #{rc.stdout.lines.last.split[0]}"
|
||||
vm = REXML::Document.new(xml).root.elements['VM']
|
||||
backup_format =
|
||||
if vm.elements['TEMPLATE/TM_MAD_SYSTEM'].text == 'ceph' &&
|
||||
vm.elements['BACKUPS/BACKUP_CONFIG/MODE']&.text == 'INCREMENT'
|
||||
'rbd'
|
||||
else
|
||||
'raw'
|
||||
end
|
||||
|
||||
STDOUT.puts "#{backup_id} #{rc.stdout.lines.last.split[0]} #{backup_format}"
|
||||
exit(0)
|
||||
|
@ -98,9 +98,10 @@ begin
|
||||
bpath = xml.elements['DATASTORE/BASE_PATH'].text
|
||||
ruser = xml.elements['DATASTORE/TEMPLATE/RSYNC_USER']&.text || 'oneadmin'
|
||||
rhost = xml.elements['DATASTORE/TEMPLATE/RSYNC_HOST'].text
|
||||
format = xml.elements['IMAGE/FORMAT'].text
|
||||
|
||||
snap = image.selected || image.last
|
||||
burl = "rsync://#{ds_id}/#{image.bj_id}/#{chain}"
|
||||
burl = "rsync#{format == 'rbd' ? '+rbd' : ''}://#{ds_id}/#{image.bj_id}/#{chain}"
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# Get a list of disk paths stored in the backup
|
||||
|
@ -165,12 +165,14 @@ one_client = OpenNebula::Client.new token
|
||||
# ------------------------------------------------------------------------------
|
||||
xml.define_singleton_method('[]') {|xpath| elements[xpath].text }
|
||||
|
||||
restorer = TransferManager::BackupRestore.new :vm_xml64 => vm_xml,
|
||||
:backup_id => snap,
|
||||
:bimage => image,
|
||||
:ds_id => ds_id,
|
||||
:txml => xml,
|
||||
:proto => 'rsync'
|
||||
restorer = TransferManager::BackupRestore.new(
|
||||
:vm_xml64 => vm_xml,
|
||||
:backup_id => snap,
|
||||
:bimage => image,
|
||||
:ds_id => ds_id,
|
||||
:txml => xml,
|
||||
:proto => image.proto('rsync')
|
||||
)
|
||||
|
||||
br_disks = restorer.disk_images disk_paths
|
||||
|
||||
|
@ -74,7 +74,7 @@ begin
|
||||
rsync_host = rds['/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/RSYNC_HOST'].text
|
||||
rsync_user = rds['/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/RSYNC_USER'].text
|
||||
|
||||
img.slice! 'rsync://'
|
||||
img.slice! %r{rsync(\+[^:]+)?://}
|
||||
|
||||
parts = img.split('/')
|
||||
diskid = parts[-1].match(/disk\.([0-9]+)/)
|
||||
|
@ -64,11 +64,13 @@ SSH_OPTS = '-q -o ControlMaster=no -o ControlPath=none -o ForwardAgent=yes'
|
||||
|
||||
# rsync://100/3/0:8a3454,1:f6e63e//var/lib/one/datastores/100/6/8a3454/disk.0.0
|
||||
rsync_url = ARGV[0]
|
||||
tokens = rsync_url.delete_prefix('rsync://').split('/')
|
||||
|
||||
proto, url = rsync_url.split(%r{://}, 2)
|
||||
tokens = url.split('/', 4)
|
||||
ds_id = tokens[0].to_i
|
||||
_bj_id = tokens[1]
|
||||
increments = tokens[2].split(',').map {|s| s.split(':') }
|
||||
disk_path = "/#{tokens[3..-1].join('/')}"
|
||||
disk_path = "/#{tokens[3]}"
|
||||
disk_index = Pathname.new(disk_path).basename.to_s.split('.')[1]
|
||||
vm_id = disk_path.match("/#{ds_id}/(\\d+)/[^/]+/[^/]+$")[1].to_i
|
||||
|
||||
@ -93,54 +95,81 @@ end
|
||||
# Prepare image.
|
||||
|
||||
begin
|
||||
disk_paths = increments.map do |index, snap|
|
||||
raw = %(#{base_path}/#{vm_id}/#{snap}/disk.#{disk_index}.#{index})
|
||||
cleaned = Pathname.new(raw).cleanpath.to_s
|
||||
cleaned
|
||||
end
|
||||
tmp_dir = "#{rsync_tmp_dir}/#{SecureRandom.uuid}"
|
||||
if proto == 'rsync+rbd'
|
||||
# FULL/INCREMENTAL BACKUP (RBD)
|
||||
|
||||
# FULL BACKUP
|
||||
disk_paths = increments.map do |index, snap|
|
||||
raw = if index == '0'
|
||||
"#{base_path}/#{vm_id}/#{snap}/disk.#{disk_index}.rbd2"
|
||||
else
|
||||
"#{base_path}/#{vm_id}/#{snap}/disk.#{disk_index}.#{index}.rbdiff"
|
||||
end
|
||||
Pathname.new(raw).cleanpath
|
||||
end
|
||||
|
||||
tmp_path = "#{tmp_dir}/disk.#{disk_index}.#{increments.last[0]}.tar.gz"
|
||||
|
||||
script = <<~EOS
|
||||
set -e -o pipefail; shopt -qs failglob
|
||||
mkdir -p '#{tmp_dir}/'
|
||||
tar zcvf '#{tmp_path}'#{disk_paths.map {|d| " -C #{d.dirname} #{d.basename}" }.join('')}
|
||||
EOS
|
||||
|
||||
rc = TransferManager::Action.ssh('prepare_image',
|
||||
:host => "#{rsync_user}@#{rsync_host}",
|
||||
:forward => true,
|
||||
:cmds => script,
|
||||
:nostdout => false,
|
||||
:nostderr => false)
|
||||
|
||||
raise StandardError, "Unable to prepare image: #{rc.stderr}" if rc.code != 0
|
||||
|
||||
STDOUT.puts <<~EOS
|
||||
command="ssh #{SSH_OPTS} '#{rsync_user}@#{rsync_host}' cat '#{tmp_path}'"
|
||||
clean_command="ssh #{SSH_OPTS} '#{rsync_user}@#{rsync_host}' rm -rf '#{tmp_dir}/'"
|
||||
EOS
|
||||
elsif increments.size == 1
|
||||
# FULL BACKUP (QCOW2)
|
||||
|
||||
if disk_paths.size == 1
|
||||
# Return shell code snippets according to the downloader's interface.
|
||||
STDOUT.puts <<~EOS
|
||||
command="ssh #{SSH_OPTS} '#{rsync_user}@#{rsync_host}' cat '#{disk_path}'"
|
||||
clean_command=""
|
||||
EOS
|
||||
exit(0)
|
||||
else
|
||||
# INCREMENTAL BACKUP (QCOW2)
|
||||
|
||||
disk_paths = increments.map do |index, snap|
|
||||
raw = %(#{base_path}/#{vm_id}/#{snap}/disk.#{disk_index}.#{index})
|
||||
cleaned = Pathname.new(raw).cleanpath.to_s
|
||||
cleaned
|
||||
end
|
||||
|
||||
tmp_path = "#{tmp_dir}/#{Pathname.new(disk_paths.last).basename}"
|
||||
|
||||
script = <<~EOS
|
||||
set -e -o pipefail; shopt -qs failglob
|
||||
|
||||
#{TransferManager::BackupImage.reconstruct_chain(disk_paths)}
|
||||
mkdir -p '#{tmp_dir}/'
|
||||
#{TransferManager::BackupImage.merge_chain(disk_paths,
|
||||
:destdir => tmp_dir)}
|
||||
EOS
|
||||
|
||||
rc = TransferManager::Action.ssh('prepare_image',
|
||||
:host => "#{rsync_user}@#{rsync_host}",
|
||||
:forward => true,
|
||||
:cmds => script,
|
||||
:nostdout => false,
|
||||
:nostderr => false)
|
||||
|
||||
raise StandardError, "Unable to prepare image: #{rc.stderr}" if rc.code != 0
|
||||
|
||||
STDOUT.puts <<~EOS
|
||||
command="ssh #{SSH_OPTS} '#{rsync_user}@#{rsync_host}' cat '#{tmp_path}'"
|
||||
clean_command="ssh #{SSH_OPTS} '#{rsync_user}@#{rsync_host}' rm -rf '#{tmp_dir}/'"
|
||||
EOS
|
||||
end
|
||||
|
||||
# INCREMENTAL BACKUP
|
||||
|
||||
tmp_dir = "#{rsync_tmp_dir}/#{SecureRandom.uuid}"
|
||||
tmp_path = "#{tmp_dir}/#{Pathname.new(disk_paths.last).basename}"
|
||||
|
||||
script = [<<~EOS]
|
||||
set -e -o pipefail; shopt -qs failglob
|
||||
EOS
|
||||
|
||||
script << TransferManager::BackupImage.reconstruct_chain(disk_paths)
|
||||
|
||||
script << "mkdir -p '#{tmp_dir}/'"
|
||||
|
||||
script << TransferManager::BackupImage.merge_chain(disk_paths,
|
||||
:destdir => tmp_dir)
|
||||
|
||||
rc = TransferManager::Action.ssh 'prepare_image',
|
||||
:host => "#{rsync_user}@#{rsync_host}",
|
||||
:forward => true,
|
||||
:cmds => script.join("\n"),
|
||||
:nostdout => false,
|
||||
:nostderr => false
|
||||
|
||||
raise StandardError, "Unable to prepare image: #{rc.stderr}" if rc.code != 0
|
||||
|
||||
# Return shell code snippets according to the downloader's interface.
|
||||
STDOUT.puts <<~EOS
|
||||
command="ssh #{SSH_OPTS} '#{rsync_user}@#{rsync_host}' cat '#{tmp_path}'"
|
||||
clean_command="ssh #{SSH_OPTS} '#{rsync_user}@#{rsync_host}' rm -rf '#{tmp_dir}/'"
|
||||
EOS
|
||||
exit(0)
|
||||
rescue StandardError => e
|
||||
STDERR.puts e.full_message
|
||||
exit(-1)
|
||||
|
@ -2707,6 +2707,12 @@ void LifeCycleManager::trigger_disk_restore_success(int vid)
|
||||
vm->delete_snapshots(vm_quotas_snp);
|
||||
vm->delete_non_persistent_disk_snapshots(vm_quotas_snp, ds_quotas_snp);
|
||||
|
||||
if ( vm->backups().configured() )
|
||||
{
|
||||
vm->backups().last_increment_id(-1);
|
||||
vm->backups().incremental_backup_id(-1);
|
||||
}
|
||||
|
||||
vm->set_state(VirtualMachine::POWEROFF);
|
||||
vm->log("LCM", Log::INFO, "VM restore operation completed.");
|
||||
}
|
||||
@ -2822,7 +2828,7 @@ static int create_backup_image(VirtualMachine * vm, string& msg)
|
||||
itmp->add("NAME", oss.str());
|
||||
itmp->add("SOURCE", backups.last_backup_id());
|
||||
itmp->add("SIZE", backups.last_backup_size());
|
||||
itmp->add("FORMAT", "raw");
|
||||
itmp->add("FORMAT", backups.last_backup_format());
|
||||
itmp->add("VM_ID", vm->get_oid());
|
||||
itmp->add("TYPE", Image::type_to_str(Image::BACKUP));
|
||||
|
||||
|
@ -55,8 +55,6 @@ require 'rexml/document'
|
||||
|
||||
require_relative '../lib/tm_action'
|
||||
require_relative '../lib/ceph'
|
||||
require_relative '../lib/kvm'
|
||||
require_relative '../lib/datastore'
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# BACKUP tm_mad host:remote_dir DISK_ID:...:DISK_ID deploy_id bjid vmid dsid
|
||||
@ -75,74 +73,20 @@ _dsid = ARGV[5]
|
||||
rhost = dir[0]
|
||||
rdir = dir[1]
|
||||
|
||||
ds = TransferManager::Datastore.from_vm_backup_ds(:vm_xml => vm_xml)
|
||||
|
||||
action = TransferManager::Action.new(:action_name => 'prebackup',
|
||||
:vm_id => vmid)
|
||||
|
||||
base_path = ENV['BACKUP_BASE_PATH']
|
||||
|
||||
bck_dir = if base_path
|
||||
"#{base_path}/#{vmid}/backup"
|
||||
else
|
||||
"#{rdir}/backup"
|
||||
end
|
||||
backup_dir =
|
||||
if base_path
|
||||
"#{base_path}/#{vmid}/backup"
|
||||
else
|
||||
"#{rdir}/backup"
|
||||
end
|
||||
|
||||
snap_cmd = ''
|
||||
expo_cmd = ''
|
||||
conv_cmd = ''
|
||||
clup_cmd = ''
|
||||
|
||||
ceph_disks = TransferManager::Ceph::Disk.from_vm(action.vm.template_xml)
|
||||
ceph_disks.compact.each do |d|
|
||||
did = d.id
|
||||
next unless disks.include? did.to_s
|
||||
|
||||
rbd_src = d.rbd_image
|
||||
cmd = d.rbd_cmd
|
||||
|
||||
# Full backup
|
||||
draw = "#{bck_dir}/disk.#{did}.raw"
|
||||
ddst = "#{bck_dir}/disk.#{did}.0"
|
||||
|
||||
ceph_cmd = "#{cmd} export #{rbd_src} #{draw}\n"
|
||||
expo_cmd << ds.cmd_confinement(ceph_cmd, rdir)
|
||||
|
||||
qemu_cmd = "qemu-img convert -m 4 -O qcow2 #{draw} #{ddst}\n"
|
||||
conv_cmd << ds.cmd_confinement(qemu_cmd, rdir)
|
||||
|
||||
clup_cmd << "rm -f #{draw}\n"
|
||||
rescue StandardError => e
|
||||
STDERR.puts "Missing configuration attributes in DISK: #{e.message}"
|
||||
exit(1)
|
||||
end
|
||||
|
||||
script = <<~EOS
|
||||
set -ex -o pipefail
|
||||
|
||||
# ----------------------
|
||||
# Prepare backup folder
|
||||
# ----------------------
|
||||
[ -d #{bck_dir} ] && rm -rf #{bck_dir}
|
||||
|
||||
mkdir -p #{bck_dir}
|
||||
|
||||
echo "#{Base64.encode64(vm_xml)}" > #{bck_dir}/vm.xml
|
||||
|
||||
# --------------------------------
|
||||
# Create Ceph snapshots for disks
|
||||
# --------------------------------
|
||||
#{snap_cmd}
|
||||
|
||||
# --------------------------
|
||||
# export, convert & cleanup
|
||||
# --------------------------
|
||||
#{expo_cmd}
|
||||
|
||||
#{conv_cmd}
|
||||
|
||||
#{clup_cmd}
|
||||
EOS
|
||||
action = TransferManager::Action.new(:action_name => 'prebackup',
|
||||
:vm_id => vmid)
|
||||
ds = TransferManager::Datastore.from_vm_backup_ds(:vm_xml => vm_xml.to_s)
|
||||
ceph_vm = TransferManager::Ceph::VM.new(action.vm.to_xml)
|
||||
script = ceph_vm.backup_disks_sh(disks, backup_dir, ds, false)
|
||||
|
||||
rc = action.ssh(:host => rhost,
|
||||
:cmds => script,
|
||||
|
@ -55,21 +55,6 @@ require 'rexml/document'
|
||||
|
||||
require_relative '../lib/tm_action'
|
||||
require_relative '../lib/ceph'
|
||||
require_relative '../lib/kvm'
|
||||
require_relative '../lib/datastore'
|
||||
|
||||
# TODO: fsfreeze for each hypervisor based on VM_MAD
|
||||
include TransferManager::KVM
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Helper functions
|
||||
#-------------------------------------------------------------------------------
|
||||
def rbd_append(str, disk, name, opt)
|
||||
opt_val = disk.elements[name].text
|
||||
"#{str} #{opt} #{opt_val}" unless opt_val.empty?
|
||||
rescue StandardError
|
||||
str
|
||||
end
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# BACKUP tm_mad host:remote_dir DISK_ID:...:DISK_ID deploy_id bjid vmid dsid
|
||||
@ -88,85 +73,20 @@ _dsid = ARGV[5]
|
||||
rhost = dir[0]
|
||||
rdir = dir[1]
|
||||
|
||||
xml_doc = REXML::Document.new(vm_xml)
|
||||
vm = xml_doc.root
|
||||
|
||||
ds = TransferManager::Datastore.from_vm_backup_ds(:vm_xml => vm_xml)
|
||||
|
||||
action = TransferManager::Action.new(:action_name => 'prebackup_live',
|
||||
:vm_id => vmid)
|
||||
|
||||
base_path = ENV['BACKUP_BASE_PATH']
|
||||
|
||||
bck_dir = if base_path
|
||||
"#{base_path}/#{vmid}/backup"
|
||||
else
|
||||
"#{rdir}/backup"
|
||||
end
|
||||
backup_dir =
|
||||
if base_path
|
||||
"#{base_path}/#{vmid}/backup"
|
||||
else
|
||||
"#{rdir}/backup"
|
||||
end
|
||||
|
||||
snap_cmd = ''
|
||||
expo_cmd = ''
|
||||
conv_cmd = ''
|
||||
clup_cmd = ''
|
||||
|
||||
ceph_disks = TransferManager::Ceph::Disk.from_vm(action.vm.template_xml)
|
||||
ceph_disks.compact.each do |d|
|
||||
did = d.id
|
||||
next unless disks.include? did.to_s
|
||||
|
||||
rbd_src = d.rbd_image
|
||||
cmd = d.rbd_cmd
|
||||
|
||||
draw = "#{bck_dir}/disk.#{did}.raw"
|
||||
ddst = "#{bck_dir}/disk.#{did}.0"
|
||||
|
||||
snap_cmd << "#{cmd} snap create #{rbd_src}@backup\n"
|
||||
|
||||
ceph_cmd = "#{cmd} export #{rbd_src}@backup #{draw}\n"
|
||||
expo_cmd << ds.cmd_confinement(ceph_cmd, rdir)
|
||||
|
||||
qemu_cmd = "qemu-img convert -m 4 -O qcow2 #{draw} #{ddst}\n"
|
||||
conv_cmd << ds.cmd_confinement(qemu_cmd, rdir)
|
||||
|
||||
clup_cmd << "#{cmd} snap rm #{rbd_src}@backup\n"
|
||||
clup_cmd << "rm -f #{draw}\n"
|
||||
rescue StandardError => e
|
||||
STDERR.puts "Missing configuration attributes in DISK: #{e.message}"
|
||||
exit(1)
|
||||
end
|
||||
|
||||
freeze, thaw = fsfreeze(vm, deploy_id)
|
||||
|
||||
script = <<~EOS
|
||||
set -ex -o pipefail
|
||||
|
||||
# ----------------------
|
||||
# Prepare backup folder
|
||||
# ----------------------
|
||||
[ -d #{bck_dir} ] && rm -rf #{bck_dir}
|
||||
|
||||
mkdir -p #{bck_dir}
|
||||
|
||||
echo "#{Base64.encode64(vm_xml)}" > #{bck_dir}/vm.xml
|
||||
|
||||
# --------------------------------
|
||||
# Create Ceph snapshots for disks
|
||||
# --------------------------------
|
||||
#{freeze}
|
||||
|
||||
#{snap_cmd}
|
||||
|
||||
#{thaw}
|
||||
|
||||
# --------------------------
|
||||
# export, convert & cleanup
|
||||
# --------------------------
|
||||
#{expo_cmd}
|
||||
|
||||
#{conv_cmd}
|
||||
|
||||
#{clup_cmd}
|
||||
EOS
|
||||
action = TransferManager::Action.new(:action_name => 'prebackup_live',
|
||||
:vm_id => vmid)
|
||||
ds = TransferManager::Datastore.from_vm_backup_ds(:vm_xml => vm_xml.to_s)
|
||||
ceph_vm = TransferManager::Ceph::VM.new(action.vm.to_xml)
|
||||
script = ceph_vm.backup_disks_sh(disks, backup_dir, ds, true, deploy_id)
|
||||
|
||||
rc = action.ssh(:host => rhost,
|
||||
:cmds => script,
|
||||
|
@ -95,20 +95,21 @@ begin
|
||||
|
||||
raise 'cannot list backup contents' unless rc.code == 0
|
||||
|
||||
# e.g.: {"0"=>"rsync://100//0:a47997,1:6ca565/var/lib/one/datastores/100/3/a47997/disk.0.rbd2"}
|
||||
disk_urls = JSON.parse(rc.stdout)
|
||||
disk_urls = disk_urls.select {|id, _url| id.to_i == disk_id } if disk_id != -1
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# Restore disk_urls in Host VM folder
|
||||
# --------------------------------------------------------------------------
|
||||
ceph_disks = TransferManager::Ceph::Disk.from_vm(action.vm.template_xml)
|
||||
ceph_disks = TransferManager::Ceph::Disk.from_vm(action.vm.to_xml)
|
||||
success_disks = []
|
||||
|
||||
info = {}
|
||||
|
||||
disk_urls.each do |id, url|
|
||||
ceph_disk = ceph_disks[id.to_i]
|
||||
randsuffix = SecureRandom.hex(5)
|
||||
randsuffix = SecureRandom.hex(4)
|
||||
|
||||
ceph_one_ds = OpenNebula::Datastore.new_with_id(
|
||||
action.vm["/VM/TEMPLATE/DISK[DISK_ID = #{id}]/DATASTORE_ID"].to_i, action.one
|
||||
@ -117,16 +118,24 @@ begin
|
||||
|
||||
info[ceph_disk] = {
|
||||
:br => ceph_ds.pick_bridge,
|
||||
:bak => "#{ceph_disk.rbd_image}.backup.#{randsuffix}",
|
||||
:new => "#{ceph_disk.rbd_image}.new.#{randsuffix}",
|
||||
:old => "#{ceph_disk.rbd_image}.old.#{randsuffix}"
|
||||
}
|
||||
|
||||
upload_ceph = <<~EOS
|
||||
tmpimg="$(mktemp -t disk#{id}.XXXX)"
|
||||
tmpdir="$(mktemp -dt disk#{id}.XXXX)"
|
||||
tmpimg=$tmpdir/image
|
||||
trap "rm -rf $tmpdir" EXIT
|
||||
#{__dir__}/../../datastore/downloader.sh --nodecomp #{url} $tmpimg && \
|
||||
qemu-img convert -m 4 -O raw $tmpimg $tmpimg.raw && \
|
||||
ssh #{info[ceph_disk][:br]} #{ceph_disk.rbd_cmd} import - #{info[ceph_disk][:bak]} < $tmpimg.raw; \
|
||||
rm $tmpimg $tmpimg.raw
|
||||
if file $tmpimg | grep -q gzip; then
|
||||
cd $tmpdir
|
||||
tar zxf $tmpimg
|
||||
#{ceph_disk.restore_sh(info[ceph_disk][:new], info[ceph_disk][:br])}
|
||||
cd -
|
||||
else
|
||||
qemu-img convert -m 4 -O raw $tmpimg $tmpimg.raw && \
|
||||
ssh #{info[ceph_disk][:br]} #{ceph_disk.rbd_cmd} import - #{info[ceph_disk][:new]} < $tmpimg.raw
|
||||
fi
|
||||
EOS
|
||||
|
||||
rc = action.ssh(:host => nil,
|
||||
@ -144,7 +153,7 @@ begin
|
||||
if success_disks.length != disk_urls.length
|
||||
success_disks.each do |ceph_disk|
|
||||
cleanup = <<~EOS
|
||||
#{ceph_disk.rbd_cmd} rm #{info[ceph_disk][:bak]}
|
||||
#{ceph_disk.rbd_cmd} rm #{info[ceph_disk][:new]}
|
||||
EOS
|
||||
|
||||
action.ssh(:host => info[ceph_disk][:br],
|
||||
@ -165,7 +174,7 @@ begin
|
||||
#{ceph_disk.shdefs}
|
||||
|
||||
#{ceph_disk.rbd_cmd} mv #{ceph_disk.rbd_image} #{info[ceph_disk][:old]} && \
|
||||
#{ceph_disk.rbd_cmd} mv #{info[ceph_disk][:bak]} #{ceph_disk.rbd_image} && \
|
||||
#{ceph_disk.rbd_cmd} mv #{info[ceph_disk][:new]} #{ceph_disk.rbd_image} && \
|
||||
rbd_rm_image #{info[ceph_disk][:old]}
|
||||
EOS
|
||||
|
||||
|
@ -23,7 +23,7 @@ module TransferManager
|
||||
# This class includes methods manage backup images
|
||||
class BackupImage
|
||||
|
||||
attr_reader :vm_id, :keep_last, :bj_id
|
||||
attr_reader :vm_id, :keep_last, :bj_id, :format
|
||||
|
||||
# Given a sorted list of qcow2 files,
|
||||
# return a shell recipe that reconstructs the backing chain in-place.
|
||||
@ -116,6 +116,18 @@ module TransferManager
|
||||
@keep_last = @action.elements['/DS_DRIVER_ACTION_DATA/EXTRA_DATA/KEEP_LAST']&.text.to_i
|
||||
|
||||
@incr_id = @action.elements['/DS_DRIVER_ACTION_DATA/TEMPLATE/INCREMENT_ID']&.text.to_i
|
||||
|
||||
@format = @action.elements["#{prefix}/FORMAT"]&.text
|
||||
end
|
||||
|
||||
# Returns the backup protocol to use (e.g. rsync, restic+rbd) based
|
||||
# on backup format
|
||||
def proto(base)
|
||||
if @format == 'rbd'
|
||||
"#{base}+rbd"
|
||||
else
|
||||
base
|
||||
end
|
||||
end
|
||||
|
||||
def last
|
||||
|
@ -18,32 +18,98 @@
|
||||
|
||||
require 'rexml/document'
|
||||
require_relative 'datastore'
|
||||
require_relative 'kvm'
|
||||
|
||||
module TransferManager
|
||||
|
||||
# Ceph utils
|
||||
class Ceph
|
||||
|
||||
# VM containing Ceph disks
|
||||
class VM
|
||||
|
||||
include TransferManager::KVM
|
||||
|
||||
def initialize(vm_xml)
|
||||
@xml = vm_xml
|
||||
@disks = Disk.from_vm(@xml)
|
||||
end
|
||||
|
||||
def backup_disks_sh(disks, backup_dir, ds, live, deploy_id = nil)
|
||||
snap_cmd = ''
|
||||
expo_cmd = ''
|
||||
clup_cmd = ''
|
||||
@disks.compact.each do |d|
|
||||
did = d.id
|
||||
next unless disks.include? did.to_s
|
||||
|
||||
cmds = d.backup_cmds(backup_dir, ds, live)
|
||||
snap_cmd << cmds[:snapshot]
|
||||
expo_cmd << cmds[:export]
|
||||
clup_cmd << cmds[:cleanup]
|
||||
end
|
||||
|
||||
freeze, thaw =
|
||||
if live
|
||||
fsfreeze(@xml, deploy_id)
|
||||
else
|
||||
['', '']
|
||||
end
|
||||
|
||||
<<~EOS
|
||||
set -ex -o pipefail
|
||||
|
||||
# ----------------------
|
||||
# Prepare backup folder
|
||||
# ----------------------
|
||||
[ -d #{backup_dir} ] && rm -rf #{backup_dir}
|
||||
|
||||
mkdir -p #{backup_dir}
|
||||
|
||||
echo "#{Base64.encode64(@xml)}" > #{backup_dir}/vm.xml
|
||||
|
||||
# --------------------------------
|
||||
# Create Ceph snapshots for disks
|
||||
# --------------------------------
|
||||
#{freeze}
|
||||
|
||||
#{snap_cmd}
|
||||
|
||||
#{thaw}
|
||||
|
||||
# --------------------------
|
||||
# export, convert & cleanup
|
||||
# --------------------------
|
||||
#{expo_cmd}
|
||||
|
||||
#{clup_cmd}
|
||||
EOS
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
# Ceph disks
|
||||
class Disk
|
||||
|
||||
attr_reader :id, :vmid, :source, :clone, :rbd_image, :rbd_cmd
|
||||
|
||||
# @param vmid [Integer]
|
||||
# @param vm_xml [String, REXML::Element]
|
||||
# @param disk_xml [String, REXML::Document, REXML::Element]
|
||||
# @return [Disk]
|
||||
def initialize(vmid, disk_xml)
|
||||
def initialize(vm_xml, disk_xml)
|
||||
vm_xml = REXML::Document.new(vm_xml) if vm_xml.is_a?(String)
|
||||
disk_xml = REXML::Document.new(disk_xml) if disk_xml.is_a?(String)
|
||||
|
||||
@vm = vm_xml
|
||||
@vmid = @vm.elements['TEMPLATE/VMID'].text
|
||||
@id = disk_xml.elements['DISK_ID'].text.to_i
|
||||
@vmid = vmid
|
||||
@type = disk_xml.elements['TYPE'].text
|
||||
@pool = disk_xml.elements['POOL_NAME'].text
|
||||
|
||||
if volatile?
|
||||
@source = nil
|
||||
@clone = nil
|
||||
@rbd_image = "#{@pool}/one-sys-#{vmid}-#{id}"
|
||||
@rbd_image = "#{@pool}/one-sys-#{@vmid}-#{id}"
|
||||
else
|
||||
@source = disk_xml.elements['SOURCE'].text
|
||||
@clone = disk_xml.elements['CLONE'].text == 'YES'
|
||||
@ -59,12 +125,129 @@ module TransferManager
|
||||
@rbd_cmd += Ceph.xml_opt(disk_xml, 'CEPH_USER', '--id')
|
||||
@rbd_cmd += Ceph.xml_opt(disk_xml, 'CEPH_KEY', '--keyfile')
|
||||
@rbd_cmd += Ceph.xml_opt(disk_xml, 'CEPH_CONF', '--conf')
|
||||
|
||||
bc = @vm.elements['BACKUPS/BACKUP_CONFIG']
|
||||
mode = bc.elements['MODE']&.text if bc
|
||||
|
||||
@vm_backup_config =
|
||||
if 'INCREMENT'.casecmp?(mode)
|
||||
{
|
||||
:mode => :increment,
|
||||
:last_increment => bc.elements['LAST_INCREMENT_ID'].text.to_i
|
||||
}
|
||||
else
|
||||
{
|
||||
:mode => :full
|
||||
}
|
||||
end
|
||||
end
|
||||
|
||||
def volatile?
|
||||
['fs', 'swap'].include?(@type)
|
||||
end
|
||||
|
||||
# @param backup_dir [String]
|
||||
# @param ds [TransferManager::Datastore]
|
||||
# @param live [Boolean]
|
||||
# @return [Disk]
|
||||
def backup_cmds(backup_dir, ds, live)
|
||||
snap_cmd = ''
|
||||
expo_cmd = ''
|
||||
clup_cmd = ''
|
||||
|
||||
if @vm_backup_config[:mode] == :full
|
||||
# Full backup
|
||||
draw = "#{backup_dir}/disk.#{@id}.raw"
|
||||
ddst = "#{backup_dir}/disk.#{@id}.0"
|
||||
|
||||
if live
|
||||
snapshot = "#{@rbd_image}@one_backup"
|
||||
|
||||
snap_cmd << "#{@rbd_cmd} snap create #{snapshot}\n"
|
||||
expo_cmd << ds.cmd_confinement(
|
||||
"#{@rbd_cmd} export #{snapshot} #{draw}\n",
|
||||
backup_dir
|
||||
)
|
||||
clup_cmd << "#{@rbd_cmd} snap rm #{snapshot}\n"
|
||||
else
|
||||
expo_cmd << ds.cmd_confinement(
|
||||
"#{@rbd_cmd} export #{@rbd_image} #{draw}\n",
|
||||
backup_dir
|
||||
)
|
||||
end
|
||||
|
||||
expo_cmd << ds.cmd_confinement(
|
||||
"qemu-img convert -m 4 -O qcow2 #{draw} #{ddst}\n",
|
||||
backup_dir
|
||||
)
|
||||
|
||||
clup_cmd << "rm -f #{draw}\n"
|
||||
elsif @vm_backup_config[:last_increment] == -1
|
||||
# First incremental backup (similar to full but snapshot must be preserved)
|
||||
incid = 0
|
||||
|
||||
dexp = "#{backup_dir}/disk.#{@id}.rbd2"
|
||||
sprefix = 'one_backup_'
|
||||
snapshot = "#{@rbd_image}@#{sprefix}#{incid}"
|
||||
|
||||
snap_cmd << <<~EOF
|
||||
#{@rbd_cmd} snap ls #{@rbd_image} --format json | \
|
||||
jq -r '.[] | select(.protected == "true" and (.name | startswith("#{sprefix}"))).name' | \
|
||||
xargs -rI{} #{@rbd_cmd} snap unprotect #{@rbd_image}@{}
|
||||
#{@rbd_cmd} snap ls #{@rbd_image} --format json | \
|
||||
jq -r '.[] | select(.name | startswith("#{sprefix}")).name' | \
|
||||
xargs -rI{} #{@rbd_cmd} snap rm #{@rbd_image}@{}
|
||||
#{@rbd_cmd} snap create #{snapshot}
|
||||
#{@rbd_cmd} snap protect #{snapshot}
|
||||
EOF
|
||||
|
||||
expo_cmd << ds.cmd_confinement(
|
||||
"#{@rbd_cmd} export --export-format 2 #{snapshot} #{dexp}\n",
|
||||
backup_dir
|
||||
)
|
||||
else
|
||||
# Incremental backup
|
||||
incid = @vm_backup_config[:last_increment] + 1
|
||||
|
||||
dinc = "#{backup_dir}/disk.#{@id}.#{incid}.rbdiff"
|
||||
snapshot = "#{@rbd_image}@one_backup_#{incid}"
|
||||
|
||||
last_snap = "one_backup_#{@vm_backup_config[:last_increment]}"
|
||||
|
||||
snap_cmd << "#{@rbd_cmd} snap create #{snapshot}\n"
|
||||
snap_cmd << "#{@rbd_cmd} snap protect #{snapshot}\n"
|
||||
|
||||
expo_cmd << ds.cmd_confinement(
|
||||
"#{@rbd_cmd} export-diff --from-snap #{last_snap} #{snapshot} #{dinc}\n",
|
||||
backup_dir
|
||||
)
|
||||
end
|
||||
|
||||
{
|
||||
:snapshot => snap_cmd,
|
||||
:export => expo_cmd,
|
||||
:cleanup => clup_cmd
|
||||
}
|
||||
end
|
||||
|
||||
def restore_sh(target, bridge = nil)
|
||||
ssh = bridge ? "ssh #{bridge}" : ''
|
||||
<<~EOF
|
||||
# Upload base image and snapshot
|
||||
#{ssh} #{@rbd_cmd} import --export-format 2 - #{target} < disk.*.rbd2
|
||||
|
||||
# Apply increments
|
||||
for f in $(ls disk.*.*.rbdiff | sort -k3 -t.); do
|
||||
#{ssh} #{@rbd_cmd} import-diff - #{target} < $f
|
||||
done
|
||||
|
||||
# Protect all snapshots
|
||||
#{ssh} #{@rbd_cmd} snap ls #{target} --format json | \
|
||||
jq -r '.[] | select(.protected == "false").name' | \
|
||||
xargs -I{} #{@rbd_cmd} snap protect #{target}@{}
|
||||
EOF
|
||||
end
|
||||
|
||||
# @return [String] Shell definitions for functionality related to this disk
|
||||
def shdefs
|
||||
<<~SCRIPT
|
||||
@ -101,11 +284,10 @@ module TransferManager
|
||||
def self.from_vm(vm_xml)
|
||||
vm_xml = REXML::Document.new(vm_xml) if vm_xml.is_a?(String)
|
||||
vm = vm_xml.root
|
||||
vmid = vm.elements['VMID'].text
|
||||
|
||||
indexed_disks = []
|
||||
vm.elements.each('DISK[TYPE="RBD"]') do |d|
|
||||
disk = new(vmid, d)
|
||||
vm.elements.each('TEMPLATE/DISK[TYPE="RBD"]') do |d|
|
||||
disk = new(vm, d)
|
||||
indexed_disks[disk.id] = disk
|
||||
end
|
||||
|
||||
|
@ -122,10 +122,9 @@ module TransferManager
|
||||
unless success
|
||||
err = opt[:err_msg] || "Command failed:\n#{script}"
|
||||
|
||||
@logger.error "#{err.chomp}\nError: #{rc.stdout}"
|
||||
@logger.error "ERROR: #{err.chomp})\n"
|
||||
@logger.error " [STDOUT] #{rc.stdout.gsub("\n", '\n')}\n" unless opt[:nostdout]
|
||||
@logger.error " [STDERR] #{rc.stderr.gsub("\n", '\n')}\n" unless opt[:nostderr]
|
||||
@logger.error err.chomp
|
||||
@logger.error " [STDOUT] \"#{rc.stdout.gsub("\n", '\n')}\"\n" unless opt[:nostdout]
|
||||
@logger.error " [STDERR] \"#{rc.stderr.gsub("\n", '\n')}\"\n" unless opt[:nostderr]
|
||||
end
|
||||
|
||||
rc
|
||||
|
@ -1599,6 +1599,11 @@ bool VirtualMachineDisks::backup_increment(bool do_volatile)
|
||||
|
||||
one_util::toupper(type);
|
||||
|
||||
if (type == "RBD")
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((type == "SWAP") || ((type == "FS") && !do_volatile))
|
||||
{
|
||||
continue;
|
||||
|
@ -886,6 +886,7 @@ void VirtualMachineManager::_backup(unique_ptr<vm_msg_t> msg)
|
||||
{
|
||||
string backup_id;
|
||||
string backup_size;
|
||||
string backup_format;
|
||||
|
||||
istringstream is(msg->payload());
|
||||
|
||||
@ -893,12 +894,19 @@ void VirtualMachineManager::_backup(unique_ptr<vm_msg_t> msg)
|
||||
|
||||
is >> backup_size;
|
||||
|
||||
if (!(is >> backup_format)) //Default to raw if not provided by driver
|
||||
{
|
||||
backup_format = "raw";
|
||||
}
|
||||
|
||||
if ( auto vm = vmpool->get(id) )
|
||||
{
|
||||
vm->backups().last_backup_id(backup_id);
|
||||
|
||||
vm->backups().last_backup_size(backup_size);
|
||||
|
||||
vm->backups().last_backup_format(backup_format);
|
||||
|
||||
vmpool->update(vm.get());
|
||||
|
||||
vm->log("VMM", Log::INFO, "VM backup successfully created.");
|
||||
|
@ -1373,7 +1373,12 @@ class ExecDriver < VirtualMachineDriver
|
||||
:driver => :ds,
|
||||
:action => :backup,
|
||||
:parameters => ds_command,
|
||||
:stdin => xml_data.elements['DATASTORE'].to_s,
|
||||
:stdin => <<~EOF,
|
||||
<DS_DRIVER_ACTION_DATA>
|
||||
#{xml_data.elements['DATASTORE']}
|
||||
#{vm_xml}
|
||||
</DS_DRIVER_ACTION_DATA>
|
||||
EOF
|
||||
:fail_actions => cleanup_steps
|
||||
}
|
||||
] + cleanup_steps
|
||||
|
Loading…
x
Reference in New Issue
Block a user