1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-01-03 01:17:41 +03:00

F #5516: Incremental backups for qcow2 disk images

- Adds new configuration attribute MODE: FULL or INCREMENTAL for
  BACKUP_CONFIG. FULL backups uses a differen backup image.

- INCREMENTAL backup information is together with the backup image.
  Example:

      <BACKUP_INCREMENTS>
        <INCREMENT>
          <DATE><![CDATA[1667770552]]></DATE>
          <ID><![CDATA[0]]></ID>
          <PARENT_ID><![CDATA[-1]]></PARENT_ID>
          <SIZE><![CDATA[172]]></SIZE>
          <SOURCE><![CDATA[bb828060]]></SOURCE>
          <TYPE><![CDATA[FULL]]></TYPE>
        </INCREMENT>
        <INCREMENT>
          <DATE><![CDATA[1667770604]]></DATE>
          <ID><![CDATA[1]]></ID>
          <PARENT_ID><![CDATA[0]]></PARENT_ID>
          <SIZE><![CDATA[1]]></SIZE>
          <SOURCE><![CDATA[ca0de5f6]]></SOURCE>
          <TYPE><![CDATA[INCREMENT]]></TYPE>
        </INCREMENT>
        <INCREMENT>
          <DATE><![CDATA[1667770700]]></DATE>
          <ID><![CDATA[2]]></ID>
          <PARENT_ID><![CDATA[1]]></PARENT_ID>
          <SIZE><![CDATA[39]]></SIZE>
          <SOURCE><![CDATA[e9897d6a]]></SOURCE>
          <TYPE><![CDATA[INCREMENT]]></TYPE>
        </INCREMENT>
      </BACKUP_INCREMENTS>

    This information only appears on incremental backups

- Internal BACKUP_CONFIG data includes information about the current
  active backup and the last increment id.

- Backup operation includes a new parameter: reset. This "closes" the
  current active incremental chain and creates a new FULL backup.

- Backup drivers store backups with increment index (0 = FULL) e.g.
  disk.0.0.

- Incremental backups are only allowed for VMs using all disks in qcow2
  format.

- Backup configuration cannot be changed while doing a VM backup.

- Downloader strings includes backup chains <inc_id>:<backup_ref>,...

- Restic downloader  has been updated to support backup chains. Disk
  images are rebased across increments.
This commit is contained in:
Ruben S. Montero 2022-11-06 22:54:17 +01:00
parent f9b077fb55
commit 0a46e21129
No known key found for this signature in database
GPG Key ID: A0CEA6FA880A1D87
30 changed files with 1750 additions and 282 deletions

194
include/BackupIncrements.h Normal file
View File

@ -0,0 +1,194 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2022, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#ifndef BACKUPS_INCREMENTS_H_
#define BACKUPS_INCREMENTS_H_
#include <string>
#include "ExtendedAttribute.h"
#include "NebulaUtil.h"
/**
* The Image INCREMENT attribute
*/
class Increment : public ExtendedAttribute
{
public:
Increment(VectorAttribute *va, int id): ExtendedAttribute(va, id){};
~Increment() = default;
enum Type
{
FULL = 0, /** < Full backup*/
INCREMENT = 1, /** < Forward increment */
};
long long size() const
{
long long sz = 0;
vector_value("SIZE", sz);
return sz;
}
int id() const
{
return get_id();
}
Type type() const
{
std::string stype = vector_value("TYPE");
one_util::toupper(stype);
if ( stype == "FULL" )
{
return FULL;
}
else if ( stype == "INCREMENT" )
{
return INCREMENT;
}
return FULL;
}
};
/**
* Set of INCREMENTS (indexed by ID)
*/
class IncrementSet : public ExtendedAttributeSet
{
public:
IncrementSet(): ExtendedAttributeSet(false){};
~IncrementSet() = default;
void init(Template * tmpl)
{
std::vector<VectorAttribute *> incs;
tmpl->get("INCREMENT", incs);
init_attribute_map("ID", incs);
};
/* ---------------------------------------------------------------------- */
/* Increment interface */
/* ---------------------------------------------------------------------- */
VectorAttribute * new_increment(std::string source, long long sz,
Increment::Type type);
Increment * last_increment()
{
return static_cast<Increment *>(last_attribute());
}
long long total_size();
/* ---------------------------------------------------------------------- */
/* Iterators */
/* ---------------------------------------------------------------------- */
class IncIterator : public AttributeIterator
{
public:
IncIterator():AttributeIterator(){};
IncIterator(const AttributeIterator& dit):AttributeIterator(dit){};
virtual ~IncIterator(){};
Increment * operator*() const
{
return static_cast<Increment *>(map_it->second);
}
};
IncIterator begin()
{
IncIterator it(ExtendedAttributeSet::begin());
return it;
}
IncIterator end()
{
IncIterator it(ExtendedAttributeSet::end());
return it;
}
typedef class IncIterator inc_iterator;
private:
ExtendedAttribute * attribute_factory(VectorAttribute * va, int id) const
{
return new Increment(va, id);
};
};
/**
* This class represents a generic set of references (links) for Image objects
* The data model is as follows
* <BACKUP_INCREMENTS>
* <INCREMENT>
* <ID> Unique ID within this backup increment
* <TYPE> Of the backup FULL | INCREMENT
* <PARENT_ID> ID of the parent increment (backing file)
* <SOURCE> Reference in the backup system
* <SIZE> Size of this increment
* <DATE> When this backup was taken (epoch)
*/
class BackupIncrements
{
public:
BackupIncrements():_template(false,'=',"BACKUP_INCREMENTS"){};
~BackupIncrements() = default;
/* ---------------------------------------------------------------------- */
/* XML from/to methods for DB persistency */
/* ---------------------------------------------------------------------- */
int from_xml_node(const xmlNodePtr node);
std::string& to_xml(std::string& xml) const
{
return _template.to_xml(xml);
}
/* ---------------------------------------------------------------------- */
/* Increments interface */
/* ---------------------------------------------------------------------- */
int add_increment(std::string source, long long size, Increment::Type type);
int last_increment_id();
long long total_size()
{
return increments.total_size();
}
private:
/**
* Text representation of the increments
*/
Template _template;
IncrementSet increments;
};
#endif /*BACKUPS_H_*/

View File

@ -34,6 +34,7 @@ class ObjectXML;
* <KEEP_LAST> Just keep the last N backups
* <BACKUP_VOLATILE> Backup volatile disks or not
* <FS_FREEZE> FS freeze operation to perform on the VM
* <MODE> Backup mode
* <LAST_DATASTORE_ID> The dastore ID used to store the active backups(*)
* <LAST_BACKUP_ID> ID of the active backup(*)
* <LAST_BACKUP_SIZE> SIZE of the active backup(*)
@ -55,6 +56,43 @@ public:
~Backups() = default;
// *************************************************************************
// Backup modes
// *************************************************************************
enum Mode
{
FULL = 0, /** < Full backups */
INCREMENT = 1, /** < Forward increments */
};
static std::string mode_to_str(Mode bm)
{
switch (bm)
{
case FULL: return "FULL";
case INCREMENT: return "INCREMENT";
default: return "";
}
};
static Mode str_to_mode(std::string& str_mode)
{
Mode mode = FULL;
one_util::toupper(str_mode);
if ( str_mode == "FULL" )
{
mode = FULL;
}
else if ( str_mode == "INCREMENT" )
{
mode = INCREMENT;
}
return mode;
};
// *************************************************************************
// Inititalization functions
// *************************************************************************
@ -78,20 +116,39 @@ public:
* - BACKUP_VOLATILE
* - KEEP_LAST
* - FS_FREEZE
* - MODE
*
* The following attributes are stored in the configuration and refers
* only to the active backup operation
* - LAST_DATASTORE_ID
* - LAST_BACKUP_ID
* - LAST_BACKUP_SIZE
*
* Incremental backups include a reference to the last increment and full
* backup:
* - LAST_INCREMENT_ID
* - INCREMENTAL_BACKUP_ID
*/
int parse(std::string& error_str, Template *tmpl);
int parse(Template *tmpl, bool can_increment, std::string& error_str);
/**
* @return true if Backup includes configuration attributes
*/
bool configured()
{
return config.empty();
}
/**
* @return true if the backup needs to include volatile disks
*/
bool do_volatile() const;
/**
* @return true if the backup needs to include volatile disks
*/
Mode mode() const;
/**
* Set of functions to manipulate the LAST_* attributes referring to
* the active backup operation
@ -111,6 +168,16 @@ public:
config.replace("LAST_BACKUP_SIZE", size);
}
void last_increment_id(int id)
{
config.replace("LAST_INCREMENT_ID", id);
}
void incremental_backup_id(int id)
{
config.replace("INCREMENTAL_BACKUP_ID", id);
}
/* ---------------------------------------------------------------------- */
int last_datastore_id() const
@ -140,6 +207,23 @@ public:
return sz;
}
int last_increment_id() const
{
int id;
config.get("LAST_INCREMENT_ID", id);
return id;
}
int incremental_backup_id() const
{
int id;
config.get("INCREMENTAL_BACKUP_ID", id);
return id;
}
/* ---------------------------------------------------------------------- */
void last_backup_clear()

View File

@ -465,7 +465,7 @@ public:
*
* @return 0 on success, -1 otherwise
*/
int backup(int vid, int bck_ds_id,
int backup(int vid, int bck_ds_id, bool reset,
const RequestAttributes& ra, std::string& error_str);
//--------------------------------------------------------------------------

View File

@ -171,10 +171,15 @@ protected:
/* Method to access attributes */
/* ---------------------------------------------------------------------- */
/**
* @return attribute by id or 0 if not found
* @return attribute by id or nullptr if not found
*/
ExtendedAttribute * get_attribute(int id) const;
/**
* @return last_attribute or nullptr if empty set
*/
ExtendedAttribute * last_attribute() const;
/* ---------------------------------------------------------------------- */
/* Iterators */
/* ---------------------------------------------------------------------- */

View File

@ -21,6 +21,7 @@
#include "ImageTemplate.h"
#include "ObjectCollection.h"
#include "Snapshots.h"
#include "BackupIncrements.h"
class VirtualMachineDisk;
@ -607,6 +608,28 @@ public:
target_snapshot = -1;
};
/* ---------------------------------------------------------------------- */
/* Incremental backups interface */
/* ---------------------------------------------------------------------- */
int add_increment(std::string source, long long size, Increment::Type type)
{
int rc = increments.add_increment(source, size, type);
if ( rc == -1 )
{
return -1;
}
size_mb = increments.total_size();
return 0;
}
int last_increment_id()
{
return increments.last_increment_id();
}
private:
// -------------------------------------------------------------------------
@ -717,6 +740,12 @@ private:
*/
Snapshots snapshots;
/**
* List of backup increments (only relevant for BACKUP images, of type
* incremental)
*/
BackupIncrements increments;
/**
* ID of the snapshot being processed (if any)
*/

View File

@ -634,7 +634,7 @@ public:
VirtualMachineBackup():
RequestManagerVirtualMachine("one.vm.backup",
"Creates a new backup image for the virtual machine",
"A:sii")
"A:siib")
{
vm_action = VMActions::BACKUP_ACTION;
}

View File

@ -1736,9 +1736,9 @@ public:
/**
*
*/
void max_backup_size(Template &ds_quota)
long long backup_size(Template &ds_quota)
{
disks.backup_size(ds_quota, _backups.do_volatile());
return disks.backup_size(ds_quota, _backups.do_volatile());
}
Backups& backups()

View File

@ -506,6 +506,19 @@ public:
*/
void get_image_ids(std::set<int>& ids, int uid);
/**
* Marshall disks in XML format with just essential information
* @param xml string to write the disk XML description
*/
std::string& to_xml_short(std::string& xml);
/**
* Check if a tm_mad is valid for each Virtual Machine Disk and set
* clone_target and ln_target
* @param tm_mad is the tm_mad for system datastore chosen
*/
int check_tm_mad(const std::string& tm_mad, std::string& error);
/* ---------------------------------------------------------------------- */
/* Image Manager Interface */
/* ---------------------------------------------------------------------- */
@ -783,24 +796,20 @@ public:
/* ---------------------------------------------------------------------- */
/* BACKUP interface */
/* ---------------------------------------------------------------------- */
/** Returns upper limit of the disk size needed to do a VM backup
/**
* Returns upper limit of the disk size needed to do a VM backup
* @param ds_quota The Datastore quota
* @param do_volatile consider volatile disks to compute size
*/
void backup_size(Template &ds_quota, bool do_volatile);
long long backup_size(Template &ds_quota, bool do_volatile);
/**
* Marshall disks in XML format with just essential information
* @param xml string to write the disk XML description
* Returns true if all disks support incremental backups. This requires
* QCOW2 format.
*
* @param do_volatile consider volatile disks for incremental backups
*/
std::string& to_xml_short(std::string& xml);
/**
* Check if a tm_mad is valid for each Virtual Machine Disk and set
* clone_target and ln_target
* @param tm_mad is the tm_mad for system datastore chosen
*/
int check_tm_mad(const std::string& tm_mad, std::string& error);
bool backup_increment(bool do_volatile);
protected:

View File

@ -1885,6 +1885,7 @@ TM_FILES="src/tm_mad/tm_common.sh"
TM_LIB_FILES="src/tm_mad/lib/kvm.rb \
src/tm_mad/lib/tm_action.rb \
src/tm_mad/lib/backup_qcow2.rb \
src/tm_mad/lib/backup.rb"
TM_SHARED_FILES="src/tm_mad/shared/clone \

View File

@ -353,7 +353,19 @@ class OneImageHelper < OpenNebulaHelper::OneHelper
return unless vms
if image.type_str.casecmp('backup').zero?
puts format(str, 'BACKUP OF VM', vms[0])
CLIHelper.print_header(str_h1 % 'BACKUP INFORMATION', false)
puts format(str, 'VM', vms[0])
if image.has_elements?('/IMAGE/BACKUP_INCREMENTS/INCREMENT')
puts format(str, 'TYPE', 'INCREMENTAL')
puts
CLIHelper.print_header('BACKUP INCREMENTS', false)
format_backup_increments(image)
else
puts format(str, 'TYPE', 'FULL')
end
else
puts
CLIHelper.print_header('VIRTUAL MACHINES', false)
@ -418,6 +430,50 @@ class OneImageHelper < OpenNebulaHelper::OneHelper
table.show(image_snapshots)
end
def format_backup_increments(image)
table=CLIHelper::ShowTable.new(nil, self) do
column :ID, 'Increment ID', :size=>3 do |d|
d['ID']
end
column :PID, 'Parent increment ID', :size=>3 do |d|
d['PARENT_ID']
end
column :TYPE, 'T', :size=>1 do |d|
d['TYPE'][0]
end
column :SIZE, '', :left, :size=>8 do |d|
if d['SIZE']
OpenNebulaHelper.unit_to_str(
d['SIZE'].to_i,
{},
'M'
)
else
'-'
end
end
column :DATE, 'Creation date', :size=>15 do |d|
OpenNebulaHelper.time_to_str(d['DATE'])
end
column :SOURCE, 'Backup source', :left, :size=>37 do |d|
d['SOURCE']
end
default :ID, :PID, :TYPE, :SIZE, :DATE, :SOURCE
end
ihash = image.to_hash
increments = [ihash['IMAGE']['BACKUP_INCREMENTS']['INCREMENT']].flatten
table.show(increments)
end
class << self
def create_image_variables(options, name)

View File

@ -251,6 +251,12 @@ CommandParser::CmdParser.new(ARGV) do
:description => 'Select PCI device by its class ID'
}
RESET_BACKUP = {
:name => 'reset',
:large => '--reset',
:description => 'Creates a new backup image, from a new full backup (only for incremental)'
}
OpenNebulaHelper::TEMPLATE_OPTIONS_VM.delete_if do |v|
%w[as_gid as_uid].include?(v[:name])
end
@ -1520,7 +1526,8 @@ CommandParser::CmdParser.new(ARGV) do
command :backup,
backup_vm_desc,
:vmid,
:options => [OneDatastoreHelper::DATASTORE,
:options => [RESET_BACKUP,
OneDatastoreHelper::DATASTORE,
OneVMHelper::SCHEDULE,
OneVMHelper::WEEKLY,
OneVMHelper::MONTHLY,
@ -1533,6 +1540,8 @@ CommandParser::CmdParser.new(ARGV) do
exit(-1)
end
reset = options[:reset] == true
if !options[:schedule].nil?
options[:args] = options[:datastore]
@ -1540,7 +1549,7 @@ CommandParser::CmdParser.new(ARGV) do
else
helper.perform_action(args[0], options, 'Backup') do |vm|
rc = vm.backup(options[:datastore])
rc = vm.backup(options[:datastore], reset)
if OpenNebula.is_error?(rc)
STDERR.puts "Error creating VM backup: #{rc.message}"

View File

@ -47,6 +47,18 @@ ExtendedAttribute * ExtendedAttributeSet::get_attribute(int id) const
return it->second;
}
ExtendedAttribute * ExtendedAttributeSet::last_attribute() const
{
auto it = a_set.rbegin();
if ( it == a_set.rend() )
{
return nullptr;
}
return it->second;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */

View File

@ -183,7 +183,8 @@ function restic_env
/DATASTORE/TEMPLATE/RESTIC_IONICE \
/DATASTORE/TEMPLATE/RESTIC_NICE \
/DATASTORE/TEMPLATE/RESTIC_BWLIMIT \
/DATASTORE/TEMPLATE/RESTIC_CONNECTIONS)
/DATASTORE/TEMPLATE/RESTIC_CONNECTIONS \
/DATASTORE/TEMPLATE/RESTIC_TMP_DIR)
SFTP_SERVER="${XPATH_ELEMENTS[j++]}"
SFTP_USER="${XPATH_ELEMENTS[j++]:-oneadmin}"
@ -193,6 +194,7 @@ function restic_env
NICE="${XPATH_ELEMENTS[j++]}"
BWLIMIT="${XPATH_ELEMENTS[j++]}"
CONNECTIONS="${XPATH_ELEMENTS[j++]}"
TMP_DIR="${XPATH_ELEMENTS[j++]}"
export RESTIC_REPOSITORY="sftp:${SFTP_USER}@${SFTP_SERVER}:${BASE_PATH}"
export RESTIC_PASSWORD="${PASSWORD}"
@ -203,6 +205,12 @@ function restic_env
RESTIC_ONE_PRECMD="nice -n ${NICE} "
fi
if [ -z "${TMP_DIR}" ]; then
TMP_DIR="/var/tmp/"
fi
export RESTIC_TMP_DIR="${TMP_DIR}/`uuidgen`"
if [ -n "${IONICE}" ]; then
RESTIC_ONE_PRECMD="${RESTIC_ONE_PRECMD}ionice -c2 -n ${IONICE} "
fi
@ -532,7 +540,7 @@ docker://*|dockerfile://*)
command="$VAR_LOCATION/remotes/datastore/docker_downloader.sh \"$FROM\""
;;
restic://*)
#pseudo restic url restic://<datastore_id>/<snapshot_id>/<file_name>
#pseudo restic url restic://<datastore_id>/<id>:<snapshot_id>,.../<file_name>
restic_path=${FROM#restic://}
d_id=`echo ${restic_path} | cut -d'/' -f1`
s_id=`echo ${restic_path} | cut -d'/' -f2`
@ -545,7 +553,37 @@ restic://*)
exit -1
fi
command="${RESTIC_ONE_CMD} dump -q ${s_id} /${file}"
incs=(${s_id//,/ })
mkdir -p ${RESTIC_TMP_DIR}
pushd ${RESTIC_TMP_DIR}
for i in "${incs[@]}"; do
inc_id=`echo $i | cut -d':' -f1`
snap_id=`echo $i | cut -d':' -f2`
${RESTIC_ONE_CMD} dump -q ${snap_id} /${file}.${inc_id} > disk.${inc_id}
done
for i in `ls disk* | sort -r`; do
id=`echo $i | cut -d'.' -f2`
pid=$((id - 1))
if [ -f "disk.${pid}" ]; then
qemu-img rebase -u -F qcow2 -b "disk.${pid}" "disk.${id}"
else
qemu-img rebase -u -b '' "disk.${id}"
fi
done
qemu-img convert -O qcow2 -m 4 `ls disk* | sort -r | head -1` disk.qcow2
command="cat `realpath disk.qcow2`"
clean_command="rm -rf ${RESTIC_TMP_DIR}"
popd
;;
rsync://*)
# rsync://<ds_id>/<vm_id>/<backup_id>/<file>
@ -628,3 +666,8 @@ fi
if [ "$TO" != "-" ]; then
unarchive "$TO"
fi
# Perform any clean operation
if [ -n "${clean_command}" ]; then
eval "$clean_command"
fi

View File

@ -2536,7 +2536,7 @@ int DispatchManager::detach_sg(int vid, int nicid, int sgid,
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int DispatchManager::backup(int vid, int backup_ds_id,
int DispatchManager::backup(int vid, int backup_ds_id, bool reset,
const RequestAttributes& ra, string& error_str)
{
ostringstream oss;
@ -2587,6 +2587,12 @@ int DispatchManager::backup(int vid, int backup_ds_id,
vm->backups().last_datastore_id(backup_ds_id);
if (reset)
{
vm->backups().last_increment_id(-1);
vm->backups().incremental_backup_id(-1);
}
vmm->trigger_backup(vid);
vm->set_resched(false);

View File

@ -0,0 +1,130 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2022, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#include "BackupIncrements.h"
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int BackupIncrements::from_xml_node(const xmlNodePtr node)
{
int rc = _template.from_xml_node(node);
if (rc == -1)
{
return -1;
}
increments.init(&_template);
return 0;
}
/* -------------------------------------------------------------------------- */
int BackupIncrements::add_increment(std::string source, long long size,
Increment::Type type)
{
VectorAttribute * va = increments.new_increment(source, size, type);
if (va == nullptr)
{
return -1;
}
_template.set(va);
return 0;
}
/* -------------------------------------------------------------------------- */
int BackupIncrements::last_increment_id()
{
Increment * li = increments.last_increment();
if ( li == nullptr )
{
return -1;
}
return li->id();
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
VectorAttribute * IncrementSet::new_increment(std::string source, long long sz,
Increment::Type type)
{
Increment * li = last_increment();
int parent = -1;
if ( li == nullptr )
{
if ( type != Increment::FULL )
{
return nullptr;
}
}
else
{
parent = li->id();
}
int iid = parent + 1;
VectorAttribute * va = new VectorAttribute("INCREMENT");
switch (type)
{
case Increment::FULL:
va->replace("TYPE", "FULL");
break;
case Increment::INCREMENT:
va->replace("TYPE", "INCREMENT");
break;
}
va->replace("DATE", time(0));
va->replace("SOURCE", source);
va->replace("SIZE", sz);
va->replace("PARENT_ID", parent);
va->replace("ID", iid);
add_attribute(attribute_factory(va, iid), iid);
return va;
}
/* -------------------------------------------------------------------------- */
long long IncrementSet::total_size()
{
long long sz = 0;
for (inc_iterator i = begin(); i != end(); ++i)
{
sz += (*i)->size();
}
return sz;
}

View File

@ -390,14 +390,16 @@ int Image::bootstrap(SqlDB * db)
string& Image::to_xml(string& xml) const
{
string template_xml;
string perms_xml;
string vm_collection_xml;
string clone_collection_xml;
string app_clone_collection_xml;
string snapshots_xml;
string lock_str;
string template_xml;
string perms_xml;
string increments_xml;
ostringstream oss;
string vm_collection_xml;
string clone_collection_xml;
string app_clone_collection_xml;
string snapshots_xml;
string lock_str;
oss <<
"<IMAGE>" <<
@ -431,6 +433,7 @@ string& Image::to_xml(string& xml) const
app_clone_collection.to_xml(app_clone_collection_xml) <<
obj_template->to_xml(template_xml) <<
snapshots.to_xml(snapshots_xml) <<
increments.to_xml(increments_xml) <<
"</IMAGE>";
xml = oss.str();
@ -525,6 +528,16 @@ int Image::from_xml(const string& xml)
content.clear();
}
ObjectXML::get_nodes("/IMAGE/BACKUP_INCREMENTS", content);
if (!content.empty())
{
rc += increments.from_xml_node(content[0]);
ObjectXML::free_nodes(content);
content.clear();
}
if (rc != 0)
{
return -1;

View File

@ -27,7 +27,8 @@ source_files=[
'ImageManagerProtocol.cc',
'ImageManager.cc',
'ImageManagerActions.cc',
'ImageTemplate.cc'
'ImageTemplate.cc',
'BackupIncrements.cc'
]
# Build library

View File

@ -16,6 +16,7 @@
#include <time.h>
#include <stdio.h>
#include "Nebula.h"
#include "LifeCycleManager.h"
#include "TransferManager.h"
#include "DispatchManager.h"
@ -1276,24 +1277,32 @@ void LifeCycleManager::trigger_attach_success(int vid)
return;
}
if ( vm->get_lcm_state() == VirtualMachine::HOTPLUG )
{
vm->clear_attach_disk();
vm->set_state(VirtualMachine::RUNNING);
vmpool->update(vm.get());
vmpool->update_search(vm.get());
}
else if ( vm->get_lcm_state() == VirtualMachine::HOTPLUG_PROLOG_POWEROFF )
if ( vm->get_lcm_state() == VirtualMachine::HOTPLUG ||
vm->get_lcm_state() == VirtualMachine::HOTPLUG_PROLOG_POWEROFF )
{
vm->log("LCM", Log::INFO, "VM Disk successfully attached.");
vm->clear_attach_disk();
// Reset incremental backup pointer. After attach a new full backup
// is needed.
if ( vm->backups().configured() )
{
vm->backups().last_increment_id(-1);
vm->backups().incremental_backup_id(-1);
}
if ( vm->get_lcm_state() == VirtualMachine::HOTPLUG )
{
vm->set_state(VirtualMachine::RUNNING);
}
else
{
dm->trigger_poweroff_success(vid);
}
vmpool->update(vm.get());
vmpool->update_search(vm.get());
dm->trigger_poweroff_success(vid);
}
else
{
@ -2638,6 +2647,99 @@ void LifeCycleManager::trigger_resize_failure(int vid)
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
static int create_backup_image(VirtualMachine * vm, string& msg)
{
Nebula& nd = Nebula::instance();
auto ipool = nd.get_ipool();
auto dspool = nd.get_dspool();
int i_id;
string error_str;
auto& backups = vm->backups();
/* ------------------------------------------------------------------ */
/* Get datastore backup information */
/* ------------------------------------------------------------------ */
string ds_name, ds_mad, ds_data;
Datastore::DatastoreType ds_type;
Image::DiskType ds_dtype;
int ds_id = backups.last_datastore_id();
if (auto ds = dspool->get_ro(ds_id))
{
ds_name = ds->get_name();
ds_dtype = ds->get_disk_type();
ds_type = ds->get_type();
ds_mad = ds->get_ds_mad();
ds->to_xml(ds_data);
}
else
{
msg = "backup datastore does not exist";
return -1;
}
/* ------------------------------------------------------------------ */
/* Create Image for the backup snapshot */
/* ------------------------------------------------------------------ */
time_t the_time;
char tbuffer[80];
struct tm * tinfo;
ostringstream oss;
time(&the_time);
tinfo = localtime(&the_time);
strftime(tbuffer, 80, "%d-%b %H.%M.%S", tinfo); //18-Jun 08.30.15
oss << vm->get_oid() << " " << tbuffer;
auto itmp = make_unique<ImageTemplate>();
itmp->add("NAME", oss.str());
itmp->add("SOURCE", backups.last_backup_id());
itmp->add("SIZE", backups.last_backup_size());
itmp->add("FORMAT", "raw");
itmp->add("VM_ID", vm->get_oid());
itmp->add("TYPE", Image::type_to_str(Image::BACKUP));
int rc = ipool->allocate(vm->get_uid(),
vm->get_gid(),
vm->get_uname(),
vm->get_gname(),
0177,
move(itmp),
ds_id,
ds_name,
ds_dtype,
ds_data,
ds_type,
ds_mad,
"-",
"",
-1,
&i_id,
error_str);
if ( rc < 0 )
{
msg = "backup image allocate error: " + error_str;
return -1;
}
return i_id;
}
/* -------------------------------------------------------------------------- */
void LifeCycleManager::trigger_backup_success(int vid)
{
trigger([this, vid] {
@ -2648,32 +2750,34 @@ void LifeCycleManager::trigger_backup_success(int vid)
return;
}
time_t the_time;
char tbuffer[80];
/* ------------------------------------------------------------------ */
/* State update and Quota values (worst case) */
/* ------------------------------------------------------------------ */
string error;
ostringstream oss;
string ds_name, ds_mad, ds_data;
Datastore::DatastoreType ds_type;
Image::DiskType ds_dtype;
int i_id;
string error_str;
// Store quota values
Template ds_deltas;
long long backup_size = 0;
int vm_uid = vm->get_uid();
int vm_gid = vm->get_gid();
auto& backups = vm->backups();
vm->max_backup_size(ds_deltas);
int ds_id = backups.last_datastore_id();
int incremental_id = backups.incremental_backup_id();
Backups::Mode mode = backups.mode();
long long reserved_sz = vm->backup_size(ds_deltas);
long long real_sz = 0;
one_util::str_cast(backups.last_backup_size(), real_sz);
ds_deltas.add("DATASTORE", backups.last_datastore_id());
one_util::str_cast(backups.last_backup_size(), backup_size);
if (mode == Backups::FULL || incremental_id == -1)
{
ds_deltas.add("IMAGES", 1);
}
switch(vm->get_lcm_state())
{
@ -2687,97 +2791,84 @@ void LifeCycleManager::trigger_backup_success(int vid)
break;
default:
vm->log("LCM",Log::ERROR,"backup_success, VM in a wrong state");
vm->log("LCM", Log::ERROR, "backup_success, VM in wrong state");
vm.reset();
Quotas::ds_del(vm_uid, vm_gid, &ds_deltas);
return;
}
/* ------------------------------------------------------------------ */
/* Get datastore backup information */
/* Create Backup image if needed */
/* ------------------------------------------------------------------ */
int ds_id = backups.last_datastore_id();
int image_id = -1;
if (auto ds = dspool->get_ro(ds_id))
std::set<int> delete_ids;
if (mode == Backups::FULL || incremental_id == -1)
{
ds_name = ds->get_name();
ds_dtype = ds->get_disk_type();
ds_type = ds->get_type();
ds_mad = ds->get_ds_mad();
image_id = create_backup_image(vm.get(), error);
ds->to_xml(ds_data);
}
else
{
vm->log("LCM", Log::ERROR, "backup_success, "
"backup datastore does not exist");
if ( image_id == -1 )
{
goto error_image_create;
}
vmpool->update(vm.get());
backups.add(image_id);
vm.reset();
Quotas::ds_del(vm_uid, vm_gid, &ds_deltas);
return;
backups.remove_last(delete_ids);
}
/* ------------------------------------------------------------------ */
/* Create Image for the backup snapshot, add it to the VM */
/* Update backup information for increments */
/* ------------------------------------------------------------------ */
time(&the_time);
struct tm * tinfo = localtime(&the_time);
strftime (tbuffer, 80, "%d-%b %H.%M.%S", tinfo); //18-Jun 08.30.15
oss << vm->get_oid() << " " << tbuffer;
auto itmp = make_unique<ImageTemplate>();
itmp->add("NAME", oss.str());
itmp->add("SOURCE", backups.last_backup_id());
itmp->add("SIZE", backups.last_backup_size());
itmp->add("FORMAT", "raw");
itmp->add("VM_ID", vm->get_oid());
itmp->add("TYPE", Image::type_to_str(Image::BACKUP));
int rc = ipool->allocate( vm->get_uid(), vm->get_gid(), vm->get_uname(),
vm->get_gname(), 0177, move(itmp), ds_id, ds_name, ds_dtype,
ds_data, ds_type, ds_mad, "-", "", -1, &i_id, error_str);
if ( rc < 0 )
if (mode == Backups::INCREMENT)
{
vm->log("LCM",Log::ERROR,"backup_success, "
"backup image allocate error: " + error_str);
Increment::Type itype;
vmpool->update(vm.get());
if (incremental_id == -1)
{
backups.incremental_backup_id(image_id);
itype = Increment::FULL;
}
else
{
image_id = backups.incremental_backup_id();
itype = Increment::INCREMENT;
}
vm.reset();
if (auto image = ipool->get(image_id))
{
int rc = image->add_increment(backups.last_backup_id(), real_sz, itype);
Quotas::ds_del(vm_uid, vm_gid, &ds_deltas);
if ( rc == -1)
{
error = "cannot add increment to backup image";
goto error_increment_update;
}
return;
backups.last_increment_id(image->last_increment_id());
ipool->update(image.get());
}
else
{
error = "backup image does not exists";
goto error_increment_update;
}
}
std::set<int> iids;
backups.add(i_id);
backups.remove_last(iids);
backups.last_backup_clear();
vmpool->update(vm.get());
if ( iids.size() > 0 )
if ( delete_ids.size() > 0 )
{
oss.str("");
ostringstream oss;
oss << "Removing backup snapshots:";
for (int i : iids)
for (int i : delete_ids)
{
oss << " " << i;
}
@ -2790,32 +2881,54 @@ void LifeCycleManager::trigger_backup_success(int vid)
/* ------------------------------------------------------------------ */
/* Add image to the datastore and forget keep_last backups */
/* ------------------------------------------------------------------ */
if ( auto ds = dspool->get(ds_id) )
if (mode == Backups::FULL || incremental_id == -1)
{
ds->add_image(i_id);
dspool->update(ds.get());
}
for (int i : iids)
{
if ( imagem->delete_image(i, error_str) != 0 )
if ( auto ds = dspool->get(ds_id) )
{
oss.str("");
oss << "backup_success, cannot remove VM backup " << i
<< " : " << error_str;
ds->add_image(image_id);
NebulaLog::error("LCM", oss.str());
dspool->update(ds.get());
}
for (int i : delete_ids)
{
if ( imagem->delete_image(i, error) != 0 )
{
ostringstream oss;
oss << "backup_success, cannot remove VM backup " << i
<< " : " << error;
NebulaLog::error("LCM", oss.str());
}
}
}
// Update quotas, count real size of the backup
long long reserved_size{0};
ds_deltas.get("SIZE", reserved_size);
ds_deltas.replace("SIZE", reserved_size - backup_size);
ds_deltas.add("IMAGES", 0);
/* ------------------------------------------------------------------ */
/* Update quotas, count real size of the backup */
/* ------------------------------------------------------------------ */
ds_deltas.replace("SIZE", reserved_sz - real_sz);
ds_deltas.replace("IMAGES", 0);
Quotas::ds_del(vm_uid, vm_gid, &ds_deltas);
return;
error_increment_update:
if (incremental_id == -1)
{
backups.incremental_backup_id(-1);
backups.del(image_id);
}
error_image_create:
vm->log("LCM", Log::ERROR, "backup_success, " + error);
vmpool->update(vm.get());
vm.reset();
Quotas::ds_del(vm_uid, vm_gid, &ds_deltas);
return;
});
}
@ -2849,9 +2962,17 @@ void LifeCycleManager::trigger_backup_failure(int vid)
vm_uid = vm->get_uid();
vm_gid = vm->get_gid();
vm->max_backup_size(ds_deltas);
int incremental_id = vm->backups().incremental_backup_id();
Backups::Mode mode = vm->backups().mode();
vm->backup_size(ds_deltas);
ds_deltas.add("DATASTORE", vm->backups().last_datastore_id());
if (mode == Backups::FULL || incremental_id == -1)
{
ds_deltas.add("IMAGES", 1);
}
vm->backups().last_backup_clear();
vmpool->update(vm.get());

View File

@ -790,8 +790,8 @@ module OpenNebula
# @param ds_id [Integer] Id of the datastore to save the backup
# @return [Integer, OpenNebula::Error] ID of the resulting BACKUP image
# in case of success, Error otherwise.
def backup(ds_id)
return @client.call(VM_METHODS[:backup], @pe_id, ds_id)
def backup(ds_id, reset = false)
return @client.call(VM_METHODS[:backup], @pe_id, ds_id, reset)
end
########################################################################

View File

@ -3394,7 +3394,7 @@ void VirtualMachineUpdateConf::request_execute(
}
}
rc = vm->updateconf(uc_tmpl.get(), att.resp_msg, update_type == 1 ? true : false);
rc = vm->updateconf(uc_tmpl.get(), att.resp_msg, update_type == 1);
// rc = -1 (error), 0 (context changed), 1 (no context change)
if ( rc == -1 )
@ -3822,13 +3822,22 @@ void VirtualMachineBackup::request_execute(
PoolObjectAuth ds_perms;
Template quota_tmpl;
Backups::Mode mode;
int li_id;
ostringstream oss;
// ------------------------------------------------------------------------
// Get request parameters
// ------------------------------------------------------------------------
int vm_id = xmlrpc_c::value_int(paramList.getInt(1));
int backup_ds_id = xmlrpc_c::value_int(paramList.getInt(2));
int vm_id = xmlrpc_c::value_int(paramList.getInt(1));
int backup_ds_id = xmlrpc_c::value_int(paramList.getInt(2));
bool reset = false;
if ( paramList.size() > 3 )
{
reset = xmlrpc_c::value_boolean(paramList.getBoolean(3));
}
// ------------------------------------------------------------------------
// Get VM & Backup Information
@ -3837,7 +3846,10 @@ void VirtualMachineBackup::request_execute(
{
vm->get_permissions(vm_perms);
vm->max_backup_size(quota_tmpl);
vm->backup_size(quota_tmpl);
mode = vm->backups().mode();
li_id = vm->backups().last_increment_id();
}
else
{
@ -3885,7 +3897,11 @@ void VirtualMachineBackup::request_execute(
// after backup success notification from driver
// -------------------------------------------------------------------------
quota_tmpl.add("DATASTORE", backup_ds_id);
quota_tmpl.add("IMAGES", 1);
if ( mode == Backups::FULL || li_id == -1 || reset )
{
quota_tmpl.add("IMAGES", 1);
}
RequestAttributes att_quota(vm_perms.uid, vm_perms.gid, att);
@ -3898,7 +3914,7 @@ void VirtualMachineBackup::request_execute(
// ------------------------------------------------------------------------
// Create backup
// ------------------------------------------------------------------------
if (dm->backup(vm_id, backup_ds_id, att, att.resp_msg) < 0)
if (dm->backup(vm_id, backup_ds_id, reset, att, att.resp_msg) < 0)
{
quota_rollback(&quota_tmpl, Quotas::DATASTORE, att_quota);

View File

@ -20,6 +20,39 @@ require 'CommandManager'
module TransferManager
# This class includes methods manage backup images
class BackupImage
def initialize(action_xml)
@action = REXML::Document.new(action_xml).root
@increments = {}
prefix = '/DS_DRIVER_ACTION_DATA/IMAGE'
@action.each_element("#{prefix}/BACKUP_INCREMENTS/INCREMENT") do |inc|
id = inc.elements['ID'].text.to_i
@increments[id] = inc.elements['SOURCE'].text
end
if @increments.empty?
@increments[0] = @action.elements["#{prefix}/SOURCE"].text
end
end
def last
@increments[@increments.keys.last]
end
def snapshots
@increments.values
end
def chain
@increments.map {|k,v| "#{k}:#{v}"}.join(',')
end
end
# This class includes methods to generate a recovery VM template based
# on the XML stored in a Backup
#
@ -72,7 +105,7 @@ module TransferManager
@vm = OpenNebula::VirtualMachine.new(xml, nil)
@base_name = "#{@vm.id}-#{opts[:backup_id]}"
@base_url = "#{opts[:proto]}://#{opts[:ds_id]}/#{opts[:backup_id]}"
@base_url = "#{opts[:proto]}://#{opts[:ds_id]}/#{opts[:chain]}"
@ds_id = opts[:ds_id]

717
src/tm_mad/lib/backup_qcow2.rb Executable file
View File

@ -0,0 +1,717 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2022, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'json'
require 'open3'
require 'rexml/document'
require 'base64'
require 'getoptlong'
require_relative './kvm'
#---------------------------------------------------------------------------
# Helper module to execute commands
#---------------------------------------------------------------------------
module Command
def cmd(command, args, opts = {})
opts.each do |key, value|
if value.class == Array
value.each { |v| command << render_opt(key, v) }
else
command << render_opt(key, value)
end
end
File.write($debug, "#{command} #{args}\n", mode: 'a') if $debug
out, err, rc = Open3.capture3("#{command} #{args}")
if rc.exitstatus != 0
raise StandardError.new "Error executing '#{command} #{args}':\n#{out} #{err}"
end
out
end
def render_opt(name, value)
if name.length == 1
opt = " -#{name.to_s.gsub('_','-')}"
else
opt = " --#{name.to_s.gsub('_','-')}"
end
if value && !value.empty?
opt << " " if name[-1] != '='
opt << "#{value}"
end
opt
end
end
#-------------------------------------------------------------------------------
# Setup an NBD server to pull changes, an optional map can be provided
#-------------------------------------------------------------------------------
module Nbd
@@server = nil
def self.start_nbd(file, map = '')
return if @@server != nil
@@socket = "#{File.realpath(file)}.socket"
@@server = fork {
args = ['-r', '-k', @@socket, '-f', 'qcow2', '-t']
args << '-B' << map unless map.empty?
args = file
exec('qemu-nbd', *args)
}
sleep(1) #TODO inotify or poll for @@socket
end
def self.stop_nbd
Process.kill('QUIT', @@server)
Process.waitpid(@@server)
File.unlink(@@socket)
@@server = nil
end
def self.uri
"nbd+unix:///?socket=#{@@socket}"
end
end
# ------------------------------------------------------------------------------
# This class abstracts the information and several methods to operate over
# disk images files
# ------------------------------------------------------------------------------
class QemuImg
include Command
def initialize(path)
@path = path
@_info = nil
@path = File.realpath(path) if File.exist?(path)
end
#---------------------------------------------------------------------------
# qemu-img command methods
#---------------------------------------------------------------------------
QEMU_IMG_COMMANDS = %w[convert create rebase info bitmap]
QEMU_IMG_COMMANDS.each do |command|
define_method(command.to_sym) do |args = '', opts|
cmd("qemu-img #{command}", "#{@path} #{args}", opts)
end
end
#---------------------------------------------------------------------------
# Image information methods
#---------------------------------------------------------------------------
def [](key)
if !@_info
out = info(:output => 'json', :force_share => '')
@_info = JSON.parse(out)
end
@_info[key]
end
#---------------------------------------------------------------------------
# Pull changes since last checkpoint through the NBD server in this image
# 1. Get list of dirty blocks
# 2. Create increment qcow2 using NBD as backing file
# 3. Pull changes reading (copy-on-write)
#
# Note: Increment files neeed rebase to reconstruct the increment chain
#---------------------------------------------------------------------------
def pull_changes(uri, map)
exts = if !map || map.empty?
data_extents(uri, '')
else
dirty_extents(uri, map)
end
rc, msg = create(:f => 'qcow2', :F => 'raw', :b => uri)
return [false, msg] unless rc
exts.each do |e|
cmd('qemu-io', @path,
:C => '',
:c => "\"r #{e['offset']} #{e['length']}\"",
:f => 'qcow2')
end
end
private
#---------------------------------------------------------------------------
# Gets the dirty extent information from the given map using an NBD server
#---------------------------------------------------------------------------
def dirty_extents(uri, map)
extents(uri, map, 'dirty')
end
def data_extents(uri, map)
#TODO change for pattern include zero
extents(uri, map, 'data')
end
def extents(uri, map, description)
opts = { :json => '' }
if !map.empty?
opts[:map=] = map
else
opts[:map] = ''
end
out = cmd('nbdinfo', uri, opts)
bmap = JSON.parse(out)
exts = []
bmap.each do |e|
next if !e || e['description'] != description
exts << e
end
exts
end
end
# ------------------------------------------------------------------------------
# This class represents a KVM domain, includes information about the associated
# OpenNebula VM
# ------------------------------------------------------------------------------
class KVMDomain
include TransferManager::KVM
include Command
attr_reader :parent_id, :backup_id, :checkpoint
#---------------------------------------------------------------------------
# @param vm[REXML::Document] OpenNebula XML VM information
# @param opts[Hash] Vm attributes:
# - :vm_dir VM folder (/var/lib/one/datastores/<DS_ID>/<VM_ID>)
#---------------------------------------------------------------------------
def initialize(vm_xml, opts = {})
@vm = REXML::Document.new(vm_xml).root
@vid = @vm.elements['ID'].text
@dom = @vm.elements['DEPLOY_ID'].text
@backup_id = 0
@parent_id = -1
@checkpoint = false
mode = @vm.elements['BACKUPS/BACKUP_CONFIG/MODE']
if mode
case(mode.text)
when 'FULL'
@backup_id = 0
@parent_id = -1
@checkpoint = false
when 'INCREMENT'
li = @vm.elements['BACKUPS/BACKUP_CONFIG/LAST_INCREMENT_ID'].text.to_i
@backup_id = li + 1
@parent_id = li
@checkpoint = true
end
end
@vm_dir = opts[:vm_dir]
@tmp_dir = "#{opts[:vm_dir]}/tmp"
@bck_dir = "#{opts[:vm_dir]}/backup"
@socket = "#{opts[:vm_dir]}/backup.socket"
@ongoing = false
@frozen = nil
end
# "pause" the VM before excuting any FS related operation. The modes are:
# - NONE (no operation)
# - AGENT (domfsfreeze - domfsthaw)
# - SUSPEND (suspend - resume)
#
# @return [String, String] freeze and thaw commands
def fsfreeze
@frozen = begin
@vm.elements['/VM/BACKUPS/BACKUP_CONFIG/FS_FREEZE'].text.upcase
rescue StandardError
'NONE'
end
case @frozen
when 'AGENT'
cmd("#{virsh} domfsfreeze", @dom)
when 'SUSPEND'
cmd("#{virsh} suspend", @dom)
end
end
def fsthaw
return unless @frozen
case @frozen
when 'AGENT'
cmd("#{virsh} domfsthaw", @dom)
when 'SUSPEND'
cmd("#{virsh} resume", @dom)
end
ensure
@frozen = nil
end
#---------------------------------------------------------------------------
# Re-define the parent_id checkpoint if not included in the checkpoint-list.
# If the checkpoint is not present in storage the methods will fail.
#
# @param[String] List of disks to include in the checkpoint
# @param[Integer] id of the checkpoint to define
#---------------------------------------------------------------------------
def define_checkpoint(disks_s)
return if @parent_id == -1
#-----------------------------------------------------------------------
# Check if the parent_id checkpoint is already defined for this domain
#-----------------------------------------------------------------------
out = cmd("#{virsh} checkpoint-list", @dom, :name => '')
out.strip!
check_ids = []
out.each_line do |l|
m = l.match(/(one-[0-9]+)-([0-9]+)/)
next unless m
check_ids << m[2].to_i
end
return if check_ids.include? @parent_id
#-----------------------------------------------------------------------
# Try to re-define checkpoint, will fail if not present in storage.
# Can be queried using qemu-monitor
#
# out = cmd("#{virsh} qemu-monitor-command", @dom,
# :cmd => '{"execute": "query-block","arguments": {}}')
#-----------------------------------------------------------------------
disks = disks_s.split ':'
tgts = []
@vm.elements.each 'TEMPLATE/DISK' do |d|
did = d.elements['DISK_ID'].text
next unless disks.include? did
tgts << d.elements['TARGET'].text
end
return if tgts.empty?
disks = '<disks>'
tgts.each { |tgt| disks << "<disk name='#{tgt}'/>" }
disks << '</disks>'
checkpoint_xml = <<~EOS
<domaincheckpoint>
<name>one-#{@vid}-#{@parent_id}</name>
<creationTime>#{Time.now.to_i}</creationTime>
#{disks}
</domaincheckpoint>
EOS
cpath = "#{@tmp_dir}/checkpoint.xml"
File.open(cpath, "w") { |f| f.write(checkpoint_xml) }
cmd("#{virsh} checkpoint-create", @dom,
:xmlfile => cpath, :redefine => '')
end
#---------------------------------------------------------------------------
# Cleans defined checkpoints up to the last one taken for this backup
#---------------------------------------------------------------------------
def clean_checkpoints(all = false)
return unless @checkpoint
out = cmd("#{virsh} checkpoint-list", @dom, :name => '')
out.strip!
out.each_line do |l|
m = l.match(/(one-[0-9]+)-([0-9]+)/)
next if (!m || (!all && m[2].to_i == @backup_id))
cmd("#{virsh} checkpoint-delete", "#{@dom} #{m[1]}-#{m[2]}")
end
end
#---------------------------------------------------------------------------
# Make a live backup for the VM.
# @param [Array] ID of disks that will take part on the backup
# @param [Boolean] if true do not generate checkpoint
#---------------------------------------------------------------------------
def backup_nbd_live(disks_s)
disks = disks_s.split ':'
fsfreeze
start_backup(disks, @backup_id, @parent_id, @checkpoint)
fsthaw
@vm.elements.each 'TEMPLATE/DISK' do |d|
did = d.elements['DISK_ID'].text
tgt = d.elements['TARGET'].text
next unless disks.include? did
ipath = "#{@bck_dir}/disk.#{did}.#{@backup_id}"
idisk = QemuImg.new(ipath)
if @parent_id == -1
map = ''
else
map = "qemu:dirty-bitmap:backup-#{tgt}"
end
idisk.pull_changes(mkuri(tgt), map)
end
ensure
fsthaw
stop_backup
end
def backup_full_live(disks_s)
disks = disks_s.split ':'
dspec = []
qdisk = {}
disk_xml = '<disks>'
@vm.elements.each 'TEMPLATE/DISK' do |d|
did = d.elements['DISK_ID'].text
tgt = d.elements['TARGET'].text
next unless disks.include? did
overlay = "#{@tmp_dir}/overlay_#{did}.qcow2"
File.open(overlay, "w"){}
dspec << "#{tgt},file=#{overlay}"
disk_xml << "<disk name='#{tgt}'/>"
qdisk[did] = QemuImg.new("#{@vm_dir}/disk.#{did}")
end
disk_xml << '</disks>'
opts = {
:name => "one-#{@vid}-backup",
:disk_only => '',
:atomic => '',
:diskspec => dspec
}
checkpoint_xml = <<~EOS
<domaincheckpoint>
<name>one-#{@vid}-0</name>
#{disk_xml}
</domaincheckpoint>
EOS
cpath = "#{@tmp_dir}/checkpoint.xml"
File.open(cpath, "w") { |f| f.write(checkpoint_xml) }
fsfreeze
cmd("#{virsh} snapshot-create-as", @dom, opts)
cmd("#{virsh} checkpoint-create", @dom, :xmlfile => cpath) if @checkpoint
fsthaw
qdisk.each do |did, disk|
disk.convert("#{@bck_dir}/disk.#{did}.0", :m => '4', :O => 'qcow2')
end
ensure
fsthaw
end
def stop_backup_full_live(disks_s)
disks = disks_s.split ':'
@vm.elements.each 'TEMPLATE/DISK' do |d|
did = d.elements['DISK_ID'].text
tgt = d.elements['TARGET'].text
next unless disks.include? did
opts = {
:base => "#{@vm_dir}/disk.#{did}",
:active => '',
:pivot => '',
:keep_relative => ''
}
cmd("#{virsh} blockcommit", "#{@dom} #{tgt}", opts)
end
cmd("#{virsh} snapshot-delete", "#{@dom}",
:snapshotname => "one-#{@vid}-backup",
:metadata => '')
end
#---------------------------------------------------------------------------
# Make a backup for the VM. (see make_backup_live)
#---------------------------------------------------------------------------
def backup_nbd(disks_s)
disks = disks_s.split ':'
if @parent_id == -1
nbd_map = ''
map = ''
else
nbd_map = "one-#{@vid}-#{@parent_id}"
map = "qemu:dirty-bitmap:#{nbd_map}"
end
dids = []
@vm.elements.each 'TEMPLATE/DISK' do |d|
did = d.elements['DISK_ID'].text
dids << did if disks.include? did
end
dids.each do |d|
idisk = QemuImg.new("#{@bck_dir}/disk.#{d}.#{@backup_id}")
Nbd.start_nbd("#{@vm_dir}/disk.#{d}", nbd_map)
idisk.pull_changes(Nbd.uri, map)
ensure
Nbd.stop_nbd
end
#TODO Check if baking files needs bitmap
dids.each do |d|
idisk = QemuImg.new("#{@vm_dir}/disk.#{d}")
idisk.bitmap("one-#{@vid}-#{@backup_id}", :add => '')
end if @checkpoint
end
def backup_full(disks_s)
disks = disks_s.split ':'
@vm.elements.each 'TEMPLATE/DISK' do |d|
did = d.elements['DISK_ID'].text
next unless disks.include? did
sdisk = QemuImg.new("#{@vm_dir}/disk.#{d}")
ddisk = "#{@bck_dir}/disk.#{did}.0"
sdisk.convert(ddisk, :m => 4, :O => 'qcow2')
sdisk.bitmap("one-#{@vid}-0", :add => '') if @checkpoint
end
end
private
# Generate nbd URI to query block bitmaps for a device
def mkuri(target)
"nbd+unix:///#{target}?socket=#{@socket}"
end
#---------------------------------------------------------------------------
# Start a Backup operation on the domain (See make_backup_live)
#---------------------------------------------------------------------------
def start_backup(disks, bck_id, pid, checkpoint)
parent = "one-#{@vid}-#{pid}"
bname = "one-#{@vid}-#{bck_id}"
parent_xml = "<incremental>#{parent}</incremental>" if pid != -1
backup_xml = <<~EOS
<domainbackup mode='pull'>
#{parent_xml}
<server transport='unix' socket='#{@socket}'/>
<disks>
EOS
checkpoint_xml = <<~EOS
<domaincheckpoint>
<name>#{bname}</name>
<disks>
EOS
@vm.elements.each 'TEMPLATE/DISK' do |d|
did = d.elements['DISK_ID'].text
tgt = d.elements['TARGET'].text
szm = d.elements['SIZE'].text
next unless disks.include? did
spath = "#{@tmp_dir}/scracth.#{did}.qcow2"
simg = QemuImg.new(spath)
simg.create("#{szm}M", :f => 'qcow2')
backup_xml << <<~EOS
<disk name='#{tgt}' backup='yes' type='file'>
<scratch file='#{spath}'/>
</disk>
EOS
checkpoint_xml << "<disk name='#{tgt}'/>"
end
checkpoint_xml << <<~EOS
</disks>
</domaincheckpoint>
EOS
backup_xml << <<~EOS
</disks>
</domainbackup>
EOS
backup_path = "#{@tmp_dir}/backup.xml"
check_path = "#{@tmp_dir}/checkpoint.xml"
File.open(backup_path, "w") { |f| f.write(backup_xml) }
File.open(check_path, "w") { |f| f.write(checkpoint_xml) }
opts = { :reuse_external => '', :backupxml => backup_path}
opts[:checkpointxml] = check_path if checkpoint
cmd("#{virsh} backup-begin", @dom, opts)
@ongoing = true
end
#---------------------------------------------------------------------------
# Stop an ongoing Backup operation on the domain
#---------------------------------------------------------------------------
def stop_backup
return unless @ongoing
cmd("#{virsh} domjobabort", @dom, {})
ensure
@ongoing = false
end
end
opts = GetoptLong.new(
['--disks','-d', GetoptLong::REQUIRED_ARGUMENT],
['--vmxml','-x', GetoptLong::REQUIRED_ARGUMENT],
['--path', '-p', GetoptLong::REQUIRED_ARGUMENT],
['--live', '-l', GetoptLong::NO_ARGUMENT],
['--debug','-v', GetoptLong::NO_ARGUMENT],
['--stop', '-s', GetoptLong::NO_ARGUMENT]
)
begin
path = disks = vmxml = ''
live = stop = false
$debug = nil
opts.each do |opt, arg|
case opt
when '--disks'
disks = arg
when '--path'
path = arg
when '--live'
live = true
when '--stop'
stop = true
when '--vmxml'
vmxml = arg
when '--debug'
$debug = "/tmp/one.backup.#{Time.now.to_i}"
end
end
vm = KVMDomain.new(Base64.decode64(File.read(vmxml)), :vm_dir => path)
#---------------------------------------------------------------------------
# Stop operation. Only for full backups in live mode. It blockcommits
# changes and cleans snapshot.
#---------------------------------------------------------------------------
if stop
vm.stop_backup_full_live(disks) if vm.parent_id == -1 && live
exit(0)
end
#---------------------------------------------------------------------------
# Backup operation
# - (live - full) Creates a snapshot to copy the disks via qemu-convert
# all previous defined checkpoints are cleaned.
# - (live - increment) starts a backup operation in libvirt and pull changes
# through NBD server using qemu-io copy-on-read feature
# - (poff - full) copy disks via qemu-convert
# - (poff - incremental) starts qemu-nbd server to pull changes from the
# last checkpoint
#---------------------------------------------------------------------------
if live
if vm.parent_id == -1
vm.clean_checkpoints(true)
vm.backup_full_live(disks)
else
vm.define_checkpoint(disks)
vm.backup_nbd_live(disks)
vm.clean_checkpoints
end
else
if vm.parent_id == -1
vm.backup_full(disks)
else
vm.backup_nbd(disks)
end
end
rescue StandardError => e
puts e.message
exit(-1)
end

View File

@ -16,9 +16,10 @@
# limitations under the License. #
#--------------------------------------------------------------------------- #
# rubocop:disable Style/ClassAndModuleChildren
# rubocop:disable Style/ClassVars
module TransferManager::KVM
module TransferManager
module KVM
KVMRC = '/var/lib/one/remotes/etc/vmm/kvm/kvmrc'
@ -102,6 +103,7 @@ module TransferManager::KVM
end
end
end
end
# rubocop:enable Style/ClassAndModuleChildren
# rubocop:enable Style/ClassVars

View File

@ -61,48 +61,29 @@ include TransferManager::KVM
# BACKUP tm_mad host:remote_dir DISK_ID:...:DISK_ID deploy_id vmid dsid
vm_xml = STDIN.read
_vm_xml = STDIN.read
dir = ARGV[0].split ':'
disks = ARGV[1].split ':'
deploy_id = ARGV[2]
vmid = ARGV[3]
_dsid = ARGV[4]
dir = ARGV[0].split ':'
disks = ARGV[1]
_deploy_id = ARGV[2]
_vmid = ARGV[3]
_dsid = ARGV[4]
rhost = dir[0]
rdir = dir[1]
xml_doc = REXML::Document.new(vm_xml)
vm = xml_doc.root
tmp_dir = "#{rdir}/tmp"
bck_dir = "#{rdir}/backup"
cmds = ''
# TODO: limit BW?
vm.elements.each 'TEMPLATE/DISK' do |d|
did = d.elements['DISK_ID'].text
next unless disks.include? did
tgt = d.elements['TARGET'].text
base = "#{rdir}/disk.#{did}"
cmds << "#{virsh} blockcommit #{deploy_id} #{tgt} --active --pivot"\
" --base #{base} --keep-relative\n"
end
qcow2_util = '/var/tmp/one/tm/lib/backup_qcow2.rb'
post_script = <<~EOS
set -ex -o pipefail
# --------------------------------------------------------------------
# Commit changes since backup, pivot disks and delete libvirt snapshot
# Commit changes, pivot disks and delete libvirt snapshot (only full)
# --------------------------------------------------------------------
#{cmds}
#{virsh} snapshot-delete #{deploy_id} --metadata one-#{vmid}-backup
#{qcow2_util} -s -l -d "#{disks}" -x #{bck_dir}/vm.xml -p #{rdir}
# ----------------------
# Cleanup backup folders

View File

@ -61,7 +61,7 @@ require 'rexml/document'
vm_xml = STDIN.read
dir = ARGV[0].split ':'
disks = ARGV[1].split ':'
disks = ARGV[1]
_deployid = ARGV[2]
_vmid = ARGV[3]
_dsid = ARGV[4]
@ -69,24 +69,8 @@ _dsid = ARGV[4]
rhost = dir[0]
rdir = dir[1]
xml_doc = REXML::Document.new(vm_xml)
vm = xml_doc.root
bck_dir = "#{rdir}/backup"
convert = ''
# TODO: Make compression configurable. Does it intefere with deduplication?
vm.elements.each 'TEMPLATE/DISK' do |d|
did = d.elements['DISK_ID'].text
next unless disks.include? did
dsrc = "#{rdir}/disk.#{did}"
ddst = "#{bck_dir}/disk.#{did}"
convert << "qemu-img convert -m 4 -O qcow2 #{dsrc} #{ddst}; "
end
bck_dir = "#{rdir}/backup"
qcow2_util = '/var/tmp/one/tm/lib/backup_qcow2.rb'
pre_script = <<~EOS
set -ex -o pipefail
@ -103,7 +87,7 @@ pre_script = <<~EOS
# -------------------
# Convert and flatten
# -------------------
#{convert}
#{qcow2_util} -d "#{disks}" -x #{bck_dir}/vm.xml -p #{rdir}
EOS
rc = TransferManager::Action.ssh('prebackup',

View File

@ -64,44 +64,19 @@ include TransferManager::KVM
#-------------------------------------------------------------------------------
vm_xml = STDIN.read
dir = ARGV[0].split ':'
disks = ARGV[1].split ':'
deploy_id = ARGV[2]
vmid = ARGV[3]
_dsid = ARGV[4]
dir = ARGV[0].split ':'
disks = ARGV[1]
_deploy_id = ARGV[2]
_vmid = ARGV[3]
_dsid = ARGV[4]
rhost = dir[0]
rdir = dir[1]
xml_doc = REXML::Document.new(vm_xml)
vm = xml_doc.root
tmp_dir = "#{rdir}/tmp"
bck_dir = "#{rdir}/backup"
dskspec = ''
touch = ''
convert = ''
# TODO: Make compression configurable. Does it intefere with deduplication?
vm.elements.each 'TEMPLATE/DISK' do |d|
did = d.elements['DISK_ID'].text
next unless disks.include? did
tgt = d.elements['TARGET'].text
file = "#{tmp_dir}/overlay_#{did}.qcow2"
dskspec << "--diskspec #{tgt},file=#{file} "
touch << "touch #{file}; "
dsrc = "#{rdir}/disk.#{did}"
ddst = "#{bck_dir}/disk.#{did}"
convert << "qemu-img convert -m 4 -O qcow2 #{dsrc} #{ddst}; "
end
freeze, thaw = fsfreeze(vm, deploy_id)
qcow2_util = '/var/tmp/one/tm/lib/backup_qcow2.rb'
pre_script = <<~EOS
set -ex -o pipefail
@ -119,25 +94,14 @@ pre_script = <<~EOS
echo "#{Base64.encode64(vm_xml)}" > #{bck_dir}/vm.xml
#{touch}
# --------------------------------------
# Create external snapshots for VM disks
# Create backup live
# --------------------------------------
#{freeze}
#{virsh} snapshot-create-as --domain #{deploy_id} --name one-#{vmid}-backup \
--disk-only --atomic #{dskspec}
#{thaw}
# -------------------
# Convert and flatten
# -------------------
#{convert}
#{qcow2_util} -l -d "#{disks}" -x #{bck_dir}/vm.xml -p #{rdir}
EOS
rc = TransferManager::Action.ssh('prebackup_live', :host => rhost,
rc = TransferManager::Action.ssh('prebackup_live',
:host => rhost,
:cmds => pre_script,
:nostdout => false,
:nostderr => false)

View File

@ -46,6 +46,14 @@ bool Backups::do_volatile() const
return false;
}
Backups::Mode Backups::mode() const
{
string mode_s;
config.get("MODE", mode_s);
return str_to_mode(mode_s);
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
@ -109,7 +117,7 @@ int Backups::from_xml(const ObjectXML* xml)
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int Backups::parse(std::string& error_str, Template *tmpl)
int Backups::parse(Template *tmpl, bool can_increment, std::string& error_str)
{
vector<VectorAttribute *> cfg_a;
@ -127,7 +135,13 @@ int Backups::parse(std::string& error_str, Template *tmpl)
if ( cfg == 0 )
{
error_str = "Internal error parsing BACKUP_CONFIG attribute.";
goto error;
for (auto &i : cfg_a)
{
delete i;
}
return -1;
}
/* ---------------------------------------------------------------------- */
@ -135,6 +149,7 @@ int Backups::parse(std::string& error_str, Template *tmpl)
/* - KEEP_LAST */
/* - BACKUP_VOLATILE */
/* - FSFREEZE */
/* - MODE */
/* ---------------------------------------------------------------------- */
if (cfg->vector_value("KEEP_LAST", iattr) == 0)
{
@ -175,19 +190,33 @@ int Backups::parse(std::string& error_str, Template *tmpl)
config.replace("FS_FREEZE", "NONE");
}
if (!can_increment) //Only FULL backups for this VM
{
config.replace("INCREMENTAL_BACKUP_ID", -1);
config.replace("LAST_INCREMENT_ID", -1);
config.replace("MODE", mode_to_str(FULL));
}
else
{
sattr = cfg->vector_value("MODE");
Mode new_mode = str_to_mode(sattr);
// Reset incremental backup pointers if mode changed to/from FULL
if (new_mode != INCREMENT || mode() != INCREMENT)
{
config.replace("INCREMENTAL_BACKUP_ID", -1);
config.replace("LAST_INCREMENT_ID", -1);
}
config.replace("MODE", mode_to_str(new_mode));
}
for (auto &i : cfg_a)
{
delete i;
}
return 0;
error:
for (auto &i : cfg_a)
{
delete i;
}
return -1;
}

View File

@ -971,16 +971,6 @@ int VirtualMachine::insert(SqlDB * db, string& error_str)
goto error_vrouter;
}
// ------------------------------------------------------------------------
// Parse the backup attribute
// ------------------------------------------------------------------------
rc = _backups.parse(error_str, user_obj_template.get());
if ( rc != 0 )
{
goto error_backup;
}
// ------------------------------------------------------------------------
// Get network leases
// ------------------------------------------------------------------------
@ -1027,6 +1017,18 @@ int VirtualMachine::insert(SqlDB * db, string& error_str)
goto error_boot_order;
}
// ------------------------------------------------------------------------
// Parse the backup attribute. It assumes volatile disks will take part of
// the backups
// ------------------------------------------------------------------------
rc = _backups.parse(user_obj_template.get(), disks.backup_increment(true),
error_str);
if ( rc != 0 )
{
goto error_backup;
}
// -------------------------------------------------------------------------
// Parse the context & requirements
// -------------------------------------------------------------------------
@ -1117,20 +1119,11 @@ int VirtualMachine::insert(SqlDB * db, string& error_str)
return 0;
error_update:
goto error_rollback;
error_boot_order:
goto error_rollback;
error_context:
goto error_rollback;
error_requirements:
goto error_rollback;
error_graphics:
goto error_rollback;
error_backup:
error_rollback:
release_disk_images(quotas, true);
@ -1171,7 +1164,6 @@ error_os:
error_pci:
error_defaults:
error_vrouter:
error_backup:
error_public:
error_name:
error_common:
@ -3033,12 +3025,18 @@ int VirtualMachine::updateconf(VirtualMachineTemplate* tmpl, string &err,
}
// -------------------------------------------------------------------------
// Parse backup configuration
// Parse backup configuration (if not doing a backup). Uses current value of
// BACKUP_VOLATILE attribute.
// -------------------------------------------------------------------------
if ( _backups.parse(err, tmpl) != 0 )
if ( lcm_state != BACKUP && lcm_state != BACKUP_POWEROFF)
{
NebulaLog::log("ONE",Log::ERROR, err);
return -1;
bool increment = disks.backup_increment(_backups.do_volatile());
if ( _backups.parse(tmpl, increment, err) != 0 )
{
NebulaLog::log("ONE",Log::ERROR, err);
return -1;
}
}
encrypt();

View File

@ -1533,7 +1533,7 @@ void VirtualMachineDisks::delete_non_persistent_snapshots(Template &vm_quotas,
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void VirtualMachineDisks::backup_size(Template &ds_quotas, bool do_volatile)
long long VirtualMachineDisks::backup_size(Template &ds_quotas, bool do_volatile)
{
long long size = 0;
@ -1556,6 +1556,36 @@ void VirtualMachineDisks::backup_size(Template &ds_quotas, bool do_volatile)
}
ds_quotas.add("SIZE", size);
return size;
}
/* -------------------------------------------------------------------------- */
bool VirtualMachineDisks::backup_increment(bool do_volatile)
{
for (const auto disk : *this)
{
string type = disk->vector_value("TYPE");
one_util::toupper(type);
if ((type == "SWAP") || ((type == "FS") && !do_volatile))
{
continue;
}
string format = disk->vector_value("FORMAT");
one_util::toupper(format);
if (format != "QCOW2")
{
return false;
}
}
return true;
}
/* -------------------------------------------------------------------------- */

View File

@ -246,7 +246,8 @@ std::map<std::string,std::vector<std::string>> VirtualMachineTemplate::UPDATECON
"FS_FREEZE",
"KEEP_LAST",
"BACKUP_VOLATILE",
"FREQUENCY_SECONDS"}
"FREQUENCY_SECONDS",
"MODE"}
}
};