1
0
mirror of https://github.com/OpenNebula/one.git synced 2024-12-22 13:33:52 +03:00

F #6053: In-place restore of VM backups (shared and ssh drivers)

This commit implements the in-place restore of VM backups. Selected VM disks will
be replaced with the specified backup:

* A new API call has been added to the XML-RPC API (`one.vm.restore`) with
  the following arguments:
    - VM ID to be restored, needs to be in **poweroff** state
    - IMAGE ID of the backup to restore
    - INCREMENT ID, only for incremental backups, the increment to use
      (defults to -1 to use the last increment available)
    - DISK ID of the disk to restore (defaults to -1 to restore all VM
      disks)

* Datastore drivers needs to implemente a new operation `ls`. This new
  operation takes the VM, image information of the backup and datastore
  information and returns the restore URL for the disks in the backup.

* This commit includes the implementation for qcow2 and ssh drivers,
  ceph will be implemented in a separated PR. The new driver action is
  `restore host:vm_dir vm_id img_id inc_id disk_id`

* The restore operation is performed in a new state `PROLOG_RESTORE`
  rendered as `RESTORE` and `rest` in short form. State in in RSuntone.

TODO:
  - Remove any existing VM snapshot (system/disk) in the VM. Note that
    snapshots are not included in a backup.

  - Ceph drivers

  - JAVA, GO Lang API bindings

  - Sunstone interface, new state and new operation. Review new state in
    RSuntone.

co-authored-by: Pavel Czerny <pczerny@opennebula.io>
This commit is contained in:
Ruben S. Montero 2024-04-26 11:33:49 +02:00
parent 669cc6db5a
commit 3dfd53df1c
No known key found for this signature in database
GPG Key ID: A0CEA6FA880A1D87
32 changed files with 1037 additions and 60 deletions

View File

@ -513,6 +513,21 @@ public:
*/
int backup_cancel(int vid, const RequestAttributes& ra, std::string& error_str);
/**
* Restore VM from backup
*
* @param vid the VM id
* @param img_id the ID of the backup Image
* @param inc_id the ID of the increment to restore
* @param disk_id the ID of the disk (-1 for all)
* @param ra information about the API call request
* @param error_str Error reason, if any
*
* @return 0 on success, -1 otherwise
*/
int restore(int vid, int img_id, int inc_id, int disk_id,
const RequestAttributes& ra, std::string& error_str);
/**
* Resize cpu and memory
*

View File

@ -148,6 +148,9 @@ public:
void trigger_resize_success(int vid);
void trigger_resize_failure(int vid);
void trigger_disk_restore_success(int vid);
void trigger_disk_restore_failure(int vid);
void trigger_backup_success(int vid);
void trigger_backup_failure(int vid);
// -------------------------------------------------------------------------

View File

@ -745,4 +745,25 @@ protected:
void request_execute(xmlrpc_c::paramList const& pl,
RequestAttributes& ra) override;
};
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
class VirtualMachineRestore : public RequestManagerVirtualMachine
{
public:
VirtualMachineRestore():
RequestManagerVirtualMachine("one.vm.restore",
"Restore VM disks from backup Image",
"A:siiii")
{
vm_action = VMActions::RESTORE_ACTION;
auth_op = AuthRequest::ADMIN;
}
protected:
void request_execute(xmlrpc_c::paramList const& pl,
RequestAttributes& ra) override;
};
#endif

View File

@ -351,6 +351,11 @@ public:
* This function resizes a VM disk
*/
void trigger_resize(int vid);
/**
* This function restores VM disk from backup
*/
void trigger_prolog_restore(int vid, int img_id, int inc_id, int disk_id);
};
#endif /*TRANSFER_MANAGER_H*/

View File

@ -87,7 +87,8 @@ public:
SG_ATTACH_ACTION = 56, // "one.vm.attachsg"
SG_DETACH_ACTION = 57, // "one.vm.detachsg"
PCI_ATTACH_ACTION = 58, // "one.vm.attachpci"
PCI_DETACH_ACTION = 59 // "one.vm.detachpci"
PCI_DETACH_ACTION = 59, // "one.vm.detachpci"
RESTORE_ACTION = 60 // "one.vm.restore"
};
static std::string action_to_str(Action action);

View File

@ -141,7 +141,8 @@ public:
HOTPLUG_SAVEAS_UNDEPLOYED = 67,
HOTPLUG_SAVEAS_STOPPED = 68,
BACKUP = 69,
BACKUP_POWEROFF = 70
BACKUP_POWEROFF = 70,
PROLOG_RESTORE = 71
};
static const int MAX_VNC_PASSWD_LENGTH = 8;

View File

@ -1984,7 +1984,8 @@ TM_SHARED_FILES="src/tm_mad/shared/clone \
src/tm_mad/shared/prebackup_live \
src/tm_mad/shared/prebackup \
src/tm_mad/shared/postbackup_live \
src/tm_mad/shared/postbackup"
src/tm_mad/shared/postbackup \
src/tm_mad/shared/restore"
TM_QCOW2_FILES="${TM_SHARED_FILES}"
@ -2064,7 +2065,8 @@ TM_SSH_FILES="src/tm_mad/ssh/clone \
src/tm_mad/ssh/prebackup_live \
src/tm_mad/ssh/prebackup \
src/tm_mad/ssh/postbackup_live \
src/tm_mad/ssh/postbackup"
src/tm_mad/ssh/postbackup \
src/tm_mad/ssh/restore"
TM_SSH_ETC_FILES="src/tm_mad/ssh/sshrc"
@ -2085,7 +2087,8 @@ TM_DUMMY_FILES="src/tm_mad/dummy/clone \
src/tm_mad/dummy/snap_revert \
src/tm_mad/dummy/monitor \
src/tm_mad/dummy/cpds \
src/tm_mad/dummy/resize"
src/tm_mad/dummy/resize \
src/tm_mad/dummy/prolog_restore"
TM_CEPH_FILES="src/tm_mad/ceph/clone \
src/tm_mad/ceph/clone.ssh \
@ -2265,7 +2268,8 @@ DATASTORE_DRIVER_RSYNC_SCRIPTS="src/datastore_mad/remotes/rsync/cp \
src/datastore_mad/remotes/rsync/backup_cancel \
src/datastore_mad/remotes/rsync/restore \
src/datastore_mad/remotes/rsync/export \
src/datastore_mad/remotes/rsync/increment_flatten"
src/datastore_mad/remotes/rsync/increment_flatten \
src/datastore_mad/remotes/rsync/ls"
DATASTORE_DRIVER_ETC_SCRIPTS="src/datastore_mad/remotes/datastore.conf"

View File

@ -814,7 +814,8 @@ DEFAULT_UMASK = 177
# - sg-attach, includes attach and detach actions
#******************************************************************************
VM_ADMIN_OPERATIONS = "migrate, delete, recover, retry, deploy, resched, backup"
VM_ADMIN_OPERATIONS = "migrate, delete, recover, retry, deploy, resched,
backup, restore"
VM_MANAGE_OPERATIONS = "undeploy, hold, release, stop, suspend, resume, reboot,
poweroff, disk-attach, nic-attach, disk-snapshot, terminate, disk-resize,

View File

@ -257,6 +257,21 @@ CommandParser::CmdParser.new(ARGV) do
:description => 'Creates a new backup image, from a new full backup (only for incremental)'
}
INCREMENT = {
:name => 'increment',
:large => '--increment increment_id',
:format => Integer,
:description => 'Use the given increment ID to restore the backup.'\
' If not provided the last one will be used'
}
DISK_ID = {
:name => 'disk_id',
:large => '--disk-id disk_id',
:format => Integer,
:description => 'Use only selected disk ID'
}
OpenNebulaHelper::TEMPLATE_OPTIONS_VM.delete_if do |v|
['as_gid', 'as_uid'].include?(v[:name])
end
@ -281,6 +296,10 @@ CommandParser::CmdParser.new(ARGV) do
OpenNebulaHelper.rname_to_id(arg, 'USER')
end
set :format, :imageid, OpenNebulaHelper.rname_to_id_desc('IMAGE') do |arg|
OpenNebulaHelper.rname_to_id(arg, 'IMAGE')
end
set :format,
:datastoreid,
OpenNebulaHelper.rname_to_id_desc('DATASTORE') do |arg|
@ -1771,6 +1790,27 @@ CommandParser::CmdParser.new(ARGV) do
end
end
restore_desc = <<-EOT.unindent
Restore the Virtual Machine from the backup Image. The VM must be in poweroff state.
EOT
command :restore, restore_desc, :vmid, :imageid, :options => [DISK_ID, INCREMENT] do
helper.perform_action(args[0], options, 'Restoring VM from backup') do |vm|
disk_id = options[:disk_id]
disk_id ||= -1 # All disks by default
inc_id = options[:increment]
inc_id ||= -1 # Last increment by default
rc = vm.restore(args[1], inc_id, disk_id)
if OpenNebula.is_error?(rc)
STDERR.puts "Error restoring: #{rc.message}"
exit(-1)
end
end
end
# Deprecated commands, remove these commands in release 8.x
deprecated_command(:'delete-chart', 'sched-delete')

View File

@ -0,0 +1,145 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2023, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
ONE_LOCATION = ENV['ONE_LOCATION']
if !ONE_LOCATION
RUBY_LIB_LOCATION = '/usr/lib/one/ruby'
GEMS_LOCATION = '/usr/share/one/gems'
VMDIR = '/var/lib/one'
CONFIG_FILE = '/var/lib/one/config'
VAR_LOCATION = '/var/lib/one'
else
RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby'
GEMS_LOCATION = ONE_LOCATION + '/share/gems'
VMDIR = ONE_LOCATION + '/var'
CONFIG_FILE = ONE_LOCATION + '/var/config'
VAR_LOCATION = ONE_LOCATION + '/var'
end
SERVERADMIN_AUTH = VAR_LOCATION + '/.one/onegate_auth'
# %%RUBYGEMS_SETUP_BEGIN%%
if File.directory?(GEMS_LOCATION)
real_gems_path = File.realpath(GEMS_LOCATION)
if !defined?(Gem) || Gem.path != [real_gems_path]
$LOAD_PATH.reject! {|l| l =~ /vendor_ruby/ }
# Suppress warnings from Rubygems
# https://github.com/OpenNebula/one/issues/5379
begin
verb = $VERBOSE
$VERBOSE = nil
require 'rubygems'
Gem.use_paths(real_gems_path)
ensure
$VERBOSE = verb
end
end
end
# %%RUBYGEMS_SETUP_END%%
$LOAD_PATH << RUBY_LIB_LOCATION
require 'opennebula'
require 'pathname'
require 'rexml/document'
require 'getoptlong'
require_relative '../../tm/lib/backup'
require_relative '../../tm/lib/tm_action'
opts = GetoptLong.new(
['--increment', '-i', GetoptLong::REQUIRED_ARGUMENT]
)
begin
# --------------------------------------------------------------------------
# Parse input parameters
# --------------------------------------------------------------------------
increment_id = -1
opts.each do |opt, arg|
case opt
when '--increment'
increment_id = arg.to_i
end
end
action = STDIN.read
# --------------------------------------------------------------------------
# Image and Datastore attributes
# --------------------------------------------------------------------------
image = TransferManager::BackupImage.new(action)
chain = if increment_id == -1
image.chain
else
image.chain_up_to(increment_id)
end
xml = REXML::Document.new(action).root
ds_id = xml.elements['DATASTORE/ID'].text.to_i
bpath = xml.elements['DATASTORE/BASE_PATH'].text
ruser = xml.elements['DATASTORE/TEMPLATE/RSYNC_USER']&.text || 'oneadmin'
rhost = xml.elements['DATASTORE/TEMPLATE/RSYNC_HOST'].text
snap = image.selected || image.last
burl = "rsync://#{ds_id}/#{image.bj_id}/#{chain}"
# --------------------------------------------------------------------------
# Get a list of disk paths stored in the backup
# --------------------------------------------------------------------------
script = [<<~EOS]
set -e -o pipefail; shopt -qs failglob
EOS
snap_dir = %(#{bpath}/#{image.vm_id}/#{snap}/)
snap_dir = Pathname.new(snap_dir).cleanpath.to_s
script << %(find '#{snap_dir}' -type f -name 'disk.*')
rc = TransferManager::Action.ssh 'list_files',
:host => "#{ruser}@#{rhost}",
:forward => true,
:cmds => script.join("\n"),
:nostdout => false,
:nostderr => false
raise StandardError, "Error listing backups: #{rc.stderr}" if rc.code != 0
disk_paths = rc.stdout.lines.map(&:strip).reject(&:empty?)
rescue StandardError => e
STDERR.puts e.full_message
exit(-1)
end
# ------------------------------------------------------------------------------
# Out a json with disk PATHS (for downloader):
# {"0":"rsync://102//0:0e6658/var/lib/one/datastores/102/21/0e6658/disk.0.0"}
# ------------------------------------------------------------------------------
disks = {}
disk_paths.each do |f|
m = f.match(/disk\.([0-9]+)/)
next unless m
disks[m[1]] = "#{burl}#{f}"
end
puts disks.to_json

View File

@ -2867,6 +2867,43 @@ int DispatchManager::backup_cancel(int vid,
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int DispatchManager::restore(int vid, int img_id, int inc_id, int disk_id,
const RequestAttributes& ra, std::string& error_str)
{
ostringstream oss;
auto vm = vmpool->get(vid);
if ( vm == nullptr )
{
error_str ="Could not restore VM, it does not exist";
return -1;
}
if (vm->get_state() != VirtualMachine::POWEROFF)
{
error_str ="Could not restore VM, it must be in poweroff";
return -1;
}
vm->set_state(VirtualMachine::ACTIVE);
vm->set_state(VirtualMachine::PROLOG_RESTORE);
// Call driver action to copy disk from image backup to VM system disk
tm->trigger_prolog_restore(vm->get_oid(), img_id, inc_id, disk_id);
vm->set_vm_info();
close_cp_history(vmpool, vm.get(), VMActions::RESTORE_ACTION, ra);
vmpool->update(vm.get());
return 0;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
static int test_set_capacity(VirtualMachine * vm, float cpu, long mem, int vcpu,
string& error)
{

View File

@ -1204,6 +1204,11 @@ void LifeCycleManager::clean_up_vm(VirtualMachine * vm, bool dispose,
tm->trigger_epilog_delete(vm);
break;
case VirtualMachine::PROLOG_RESTORE:
tm->trigger_driver_cancel(vid);
tm->trigger_epilog_delete(vm);
break;
case VirtualMachine::MIGRATE:
vm->set_running_etime(the_time);
@ -1563,6 +1568,17 @@ void LifeCycleManager::recover(VirtualMachine * vm, bool success,
trigger_disk_resize_failure(vim);
}
break;
case VirtualMachine::PROLOG_RESTORE:
if (success)
{
trigger_disk_restore_success(vim);
}
else
{
trigger_disk_restore_failure(vim);
}
break;
}
}
@ -1785,6 +1801,7 @@ void LifeCycleManager::retry(VirtualMachine * vm)
case VirtualMachine::UNKNOWN:
case VirtualMachine::BACKUP:
case VirtualMachine::BACKUP_POWEROFF:
case VirtualMachine::PROLOG_RESTORE:
break;
}
@ -1902,6 +1919,7 @@ void LifeCycleManager::trigger_updatesg(int sgid)
case VirtualMachine::HOTPLUG_PROLOG_POWEROFF:
case VirtualMachine::HOTPLUG_EPILOG_POWEROFF:
case VirtualMachine::BACKUP_POWEROFF:
case VirtualMachine::PROLOG_RESTORE:
is_tmpl = true;
break;

View File

@ -2432,7 +2432,7 @@ void LifeCycleManager::trigger_disk_resize_failure(int vid)
if ( disk == nullptr )
{
vm->log("LCM", Log::ERROR,
"disk_resize_failure, but the VM doesn't have a disk with resize operation in progress");
"disk_resize_failure, but no resize operation in progress");
return;
}
@ -2682,6 +2682,60 @@ void LifeCycleManager::trigger_resize_failure(int vid)
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void LifeCycleManager::trigger_disk_restore_success(int vid)
{
trigger([this, vid] {
if ( auto vm = vmpool->get(vid) )
{
VirtualMachine::LcmState state = vm->get_lcm_state();
if (state == VirtualMachine::PROLOG_RESTORE)
{
// todo: Clear VM and disk snapshots
vm->set_state(VirtualMachine::POWEROFF);
vm->log("LCM", Log::INFO, "VM restore operation completed.");
}
else
{
vm->log("LCM", Log::ERROR, "VM restore success, VM in a wrong state");
return;
}
vmpool->update(vm.get());
}
});
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void LifeCycleManager::trigger_disk_restore_failure(int vid)
{
trigger([this, vid] {
if ( auto vm = vmpool->get(vid) )
{
VirtualMachine::LcmState lcm_state = vm->get_lcm_state();
if (lcm_state == VirtualMachine::PROLOG_RESTORE)
{
vm->set_state(VirtualMachine::POWEROFF);
vm->log("LCM", Log::INFO, "VM restore operation fails");
}
else
{
vm->log("LCM", Log::ERROR,
"restore fails, VM in a wrong state: " + vm->state_str());
return;
}
vmpool->update(vm.get());
}
});
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
static int create_backup_image(VirtualMachine * vm, string& msg)
{
Nebula& nd = Nebula::instance();

View File

@ -63,7 +63,8 @@ module OpenNebula
:updatenic => 'vm.updatenic',
:backupcancel => 'vm.backupcancel',
:attachpci => 'vm.attachpci',
:detachpci => 'vm.detachpci'
:detachpci => 'vm.detachpci',
:restore => 'vm.restore'
}
VM_STATE=['INIT', 'PENDING', 'HOLD', 'ACTIVE', 'STOPPED', 'SUSPENDED', 'DONE', 'FAILED',
@ -140,7 +141,8 @@ module OpenNebula
'HOTPLUG_SAVEAS_UNDEPLOYED',
'HOTPLUG_SAVEAS_STOPPED',
'BACKUP',
'BACKUP_POWEROFF'
'BACKUP_POWEROFF',
'RESTORE'
]
SHORT_VM_STATES={
@ -227,7 +229,8 @@ module OpenNebula
'HOTPLUG_SAVEAS_UNDEPLOYED' => 'hotp',
'HOTPLUG_SAVEAS_STOPPED' => 'hotp',
'BACKUP' => 'back',
'BACKUP_POWEROFF' => 'back'
'BACKUP_POWEROFF' => 'back',
'RESTORE' => 'rest'
}
HISTORY_ACTION=[
@ -290,7 +293,8 @@ module OpenNebula
'sg-attach',
'sg-detach',
'pci-attach',
'pci-detach'
'pci-detach',
'restore'
]
EXTERNAL_IP_ATTRS = [
@ -891,6 +895,14 @@ module OpenNebula
call(VM_METHODS[:detachpci], @pe_id, pci_id)
end
# Restore the VM from backup Image
#
# @return [nil, OpenNebula::Error] nil in case of sucess, Error
# otherwise.
def restore(img_id, inc_id, disk_id)
@client.call(VM_METHODS[:restore], @pe_id, img_id, inc_id, disk_id)
end
########################################################################
# Helpers to get VirtualMachine information
########################################################################

View File

@ -352,6 +352,7 @@ void RequestManager::register_xml_methods()
xmlrpc_c::methodPtr vm_backupcancel(new VirtualMachineBackupCancel());
xmlrpc_c::methodPtr vm_attachpci(new VirtualMachineAttachPCI());
xmlrpc_c::methodPtr vm_detachpci(new VirtualMachineDetachPCI());
xmlrpc_c::methodPtr vm_restore(new VirtualMachineRestore());
xmlrpc_c::methodPtr vm_pool_acct(new VirtualMachinePoolAccounting());
xmlrpc_c::methodPtr vm_pool_monitoring(new VirtualMachinePoolMonitoring());
@ -619,6 +620,7 @@ void RequestManager::register_xml_methods()
RequestManagerRegistry.addMethod("one.vm.backupcancel", vm_backupcancel);
RequestManagerRegistry.addMethod("one.vm.attachpci", vm_attachpci);
RequestManagerRegistry.addMethod("one.vm.detachpci", vm_detachpci);
RequestManagerRegistry.addMethod("one.vm.restore", vm_restore);
RequestManagerRegistry.addMethod("one.vmpool.info", vm_pool_info);
RequestManagerRegistry.addMethod("one.vmpool.infoextended", vm_pool_info_extended);

View File

@ -4202,6 +4202,95 @@ void VirtualMachineDetachPCI::request_execute(
{
success_response(id, att);
}
}
// -----------------------------------------------------------------------------
void VirtualMachineRestore::request_execute(
xmlrpc_c::paramList const& paramList, RequestAttributes& att)
{
// Get request parameters
int vm_id = paramList.getInt(1);
int img_id = paramList.getInt(2);
int inc_id = paramList.getInt(3);
int disk_id = paramList.getInt(4);
Nebula& nd = Nebula::instance();
ImagePool *ipool = nd.get_ipool();
// Authorize request
PoolObjectAuth vm_perms, img_perms;
if (auto vm = get_vm_ro(vm_id, att))
{
vm->get_permissions(vm_perms);
if (disk_id != -1 && !vm->get_disk(disk_id))
{
att.resp_msg = "VM disk does not exist";
failure_response(ACTION, att);
return;
}
}
else
{
att.resp_obj = PoolObjectSQL::VM;
att.resp_id = vm_id;
failure_response(NO_EXISTS, att);
return;
}
if (auto img = ipool->get_ro(img_id))
{
if (img->get_type() != Image::BACKUP)
{
att.resp_msg = "Image has to be of type BACKUP";
failure_response(ACTION, att);
return;
}
if (inc_id > img->last_increment_id())
{
att.resp_msg = "Wrong increment";
failure_response(ACTION, att);
return;
}
img->get_permissions(img_perms);
}
else
{
att.resp_obj = PoolObjectSQL::IMAGE;
att.resp_id = img_id;
failure_response(NO_EXISTS, att);
return;
}
AuthRequest ar(att.uid, att.group_ids);
ar.add_auth(att.auth_op, vm_perms);
ar.add_auth(AuthRequest::USE, img_perms);
if (UserPool::authorize(ar) == -1)
{
att.resp_msg = ar.message;
failure_response(AUTHORIZATION, att);
return;
}
if (dm->restore(vm_id, img_id, inc_id, disk_id, att, att.resp_msg) != 0)
{
failure_response(ACTION, att);
return;
}
success_response(vm_id, att);
return;
}

View File

@ -145,7 +145,8 @@ define(function(require) {
"HOTPLUG_SAVEAS_UNDEPLOYED",
"HOTPLUG_SAVEAS_STOPPED",
"BACKUP",
"BACKUP_POWEROFF"
"BACKUP_POWEROFF",
"RESTORE"
];
var LCM_STATES_CLASSES = [
@ -220,6 +221,7 @@ define(function(require) {
"info", // HOTPLUG_SAVEAS_STOPPED
"info", // BACKUP
"info", // BACKUP_POWEROFF
"info", // RESTORE_POWEROFF
];
var LCM_STATES = {
@ -294,6 +296,7 @@ define(function(require) {
HOTPLUG_SAVEAS_STOPPED : 68,
BACKUP : 69,
BACKUP_POWEROFF : 70,
RESTORE : 71,
};
var SHORT_LCM_STATES_STR = [
@ -368,6 +371,7 @@ define(function(require) {
Locale.tr("HOTPLUG"), // HOTPLUG_SAVEAS_STOPPED
Locale.tr("BACKUP"), // BACKUP
Locale.tr("BACKUP"), // BACKUP_POWEROFF
Locale.tr("RESTORE"), // RESTORE_POWEROFF
];
var VNC_STATES = [

View File

@ -129,6 +129,7 @@ define(function(require) {
LCM_STATE_ACTIONS[ OpenNebulaVM.LCM_STATES.HOTPLUG_SAVEAS_STOPPED ] = [];
LCM_STATE_ACTIONS[ OpenNebulaVM.LCM_STATES.BACKUP ] = [];
LCM_STATE_ACTIONS[ OpenNebulaVM.LCM_STATES.BACKUP_POWEROFF ] = [];
LCM_STATE_ACTIONS[ OpenNebulaVM.LCM_STATES.RESTORE_POWEROFF ] = [];
return {
'disableAllStateActions': disableAllStateActions,

View File

@ -579,7 +579,7 @@ void OpenNebulaTemplate::set_conf_default()
#*******************************************************************************
*/
set_conf_single("VM_ADMIN_OPERATIONS", "migrate, delete, recover, retry, "
"deploy, resched");
"deploy, resched, backup, restore");
set_conf_single("VM_MANAGE_OPERATIONS", "undeploy, hold, release, stop, "
"suspend, resume, reboot, poweroff, disk-attach, nic-attach, "

View File

@ -22,6 +22,7 @@
#include "VirtualMachineDisk.h"
#include "VirtualMachinePool.h"
#include "LifeCycleManager.h"
#include "ImagePool.h"
using namespace std;
@ -933,7 +934,7 @@ void TransferManager::epilog_transfer_command(
{
string save = disk->vector_value("SAVE");
int disk_id = disk->get_disk_id();
string tm_mad_system;
string tm_mad_system;
if ( one_util::toupper(save) == "YES" )
{
@ -2235,6 +2236,88 @@ void TransferManager::trigger_resize(int vid)
});
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void TransferManager::trigger_prolog_restore(int vid, int img_id, int inc_id,
int disk_id)
{
trigger([this, vid, img_id, inc_id, disk_id]{
ostringstream oss;
ofstream xfr;
string xfr_name;
auto tm_md = get();
Nebula& nd = Nebula::instance();
unique_ptr<VirtualMachine> vm;
if (tm_md == nullptr)
{
goto error_driver;
}
vm = vmpool->get(vid);
if (!vm)
{
return;
}
if (!vm->hasHistory())
{
goto error_history;
}
xfr_name = vm->get_transfer_file() + ".prolog_restore";
xfr.open(xfr_name.c_str(), ios::out | ios::trunc);
if (xfr.fail() == true)
{
goto error_file;
}
//RESTORE tm_mad host:remote_dir vm_id img_id inc_id disk_id
xfr << "RESTORE" << " "
<< vm->get_tm_mad() << " "
<< vm->get_hostname() << ":" << vm->get_system_dir() << " "
<< vid << " "
<< img_id << " "
<< inc_id << " "
<< disk_id << " "
<< endl;
xfr.close();
{
transfer_msg_t msg(TransferManagerMessages::TRANSFER, "", vid, xfr_name);
tm_md->write(msg);
}
return;
error_driver:
oss << "prolog_restore, error getting TM driver.";
goto error_common;
error_history:
oss << "prolog_restore, the VM has no history";
goto error_common;
error_file:
oss << "prolog_restore, could not open file: " << xfr_name;
goto error_common;
error_common:
vm->log("TrM", Log::ERROR, oss);
nd.get_lcm()->trigger_disk_restore_failure(vm->get_oid());
return;
});
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */

View File

@ -114,6 +114,10 @@ void TransferManager::_transfer(unique_ptr<transfer_msg_t> msg)
lcm->trigger_disk_resize_success(id);
break;
case VirtualMachine::PROLOG_RESTORE:
lcm->trigger_disk_restore_success(id);
break;
default:
goto error_state;
}
@ -184,6 +188,10 @@ void TransferManager::_transfer(unique_ptr<transfer_msg_t> msg)
lcm->trigger_disk_resize_failure(id);
break;
case VirtualMachine::PROLOG_RESTORE:
lcm->trigger_disk_restore_failure(id);
break;
default:
goto error_state;
}

1
src/tm_mad/ceph/restore Symbolic link
View File

@ -0,0 +1 @@
../common/not_supported.sh

1
src/tm_mad/dummy/restore Symbolic link
View File

@ -0,0 +1 @@
../common/dummy.sh

1
src/tm_mad/fs_lvm/restore Symbolic link
View File

@ -0,0 +1 @@
../common/not_supported.sh

View File

@ -0,0 +1 @@
../common/not_supported.sh

View File

@ -26,6 +26,8 @@ module TransferManager
# lets you get datastore attributes
class Datastore
attr_reader :ds, :mad
# Constants for wrapper commands
SYSTEMD_RUN = 'systemd-run --user --quiet --pipe --collect --wait'
IONICE = 'ionice'

View File

@ -32,7 +32,7 @@ module TransferManager
# "ubuntu2004-6-4-4643-1.test:/var/lib/one//datastores/0/2/disk.0"
class Action
attr_reader :logger, :vm
attr_reader :logger, :vm, :one
def initialize(options = {})
@options={
@ -164,6 +164,10 @@ module TransferManager
@vm['/VM/HISTORY_RECORDS/HISTORY[last()]/VM_MAD']
end
def persistent?(disk_id)
@vm["/VM/TEMPLATE/DISK [ DISK_ID = #{disk_id} ]/SAVE"].casecmp('YES') == 0
end
def kvm?
'kvm'.casecmp(vm_mad) == 0
end

View File

@ -52,7 +52,6 @@ end
$LOAD_PATH << RUBY_LIB_LOCATION
require 'pp'
require 'shellwords'
require 'OpenNebulaDriver'
require 'CommandManager'
@ -67,7 +66,7 @@ class TransferManagerDriver < OpenNebulaDriver
# Register TRANSFER action, and tm drivers available
# @param tm_type [Array] of tm types
# @param options [Hash] basic options for an OpenNebula driver
def initialize(tm_type, options={})
def initialize(tm_type, options = {})
@options={
:concurrency => 15,
:threaded => true,
@ -76,7 +75,7 @@ class TransferManagerDriver < OpenNebulaDriver
super('tm/', @options)
if tm_type == nil
if tm_type.nil?
@types = Dir["#{@local_scripts_path}/*/"].map do |d|
d.split('/')[-1]
end
@ -86,8 +85,7 @@ class TransferManagerDriver < OpenNebulaDriver
@types = tm_type
end
# register actions
register_action(:TRANSFER, method("action_transfer"))
register_action(:TRANSFER, method('action_transfer'))
end
# Driver Action: TRANSFER id script_file
@ -95,23 +93,19 @@ class TransferManagerDriver < OpenNebulaDriver
def action_transfer(id, script_file)
script = parse_script(script_file)
if script.nil?
send_message("TRANSFER",
RESULT[:failure],
id,
"Transfer file '#{script_file}' does not exist")
return
return send_message('TRANSFER', RESULT[:failure], id,
"Transfer file '#{script_file}' does not exist")
end
script.each { |command|
script.each do |command|
result, info = do_transfer_action(id, command)
if result == RESULT[:failure]
send_message("TRANSFER", result, id, info)
return
return send_message('TRANSFER', result, id, info)
end
}
end
send_message("TRANSFER", RESULT[:success], id)
send_message('TRANSFER', RESULT[:success], id)
end
# Executes a single transfer action (command), as returned by the parse
@ -119,32 +113,36 @@ class TransferManagerDriver < OpenNebulaDriver
# @param id [String] with the OpenNebula ID for the TRANSFER action
# @param command [Array]
# @param stdin [String]
def do_transfer_action(id, command, stdin=nil)
def do_transfer_action(id, command, stdin = nil)
cmd = command[0].downcase
tm = command[1]
args = command[2..-1].map{|e| Shellwords.escape(e)}.join(" ")
args = command[2..-1].map {|e| Shellwords.escape(e) }.join(' ')
if not @types.include?(tm)
if !@types.include?(tm)
return RESULT[:failure], "Transfer Driver '#{tm}' not available"
end
path = File.join(@local_scripts_path, tm, cmd)
if !File.exist?(path)
md = cmd.match(/(.*)\.(.*)/)
if md && md[1]
path_shortened = File.join(@local_scripts_path, tm, md[1])
if !File.exist?(path_shortened)
return RESULT[:failure],
"Driver path '#{path}' nor '#{path_shortened}' exists"
end
path = path_shortened
else
md = cmd.match(/(.*)\.(.*)/)
if !md || !md[1]
return RESULT[:failure], "Driver path '#{path}' does not exists"
end
path_shortened = File.join(@local_scripts_path, tm, md[1])
if !File.exist?(path_shortened)
return RESULT[:failure],
"Driver path '#{path}' nor '#{path_shortened}' exists"
end
path = path_shortened
end
path << " " << args
path << ' ' << args
rc = LocalCommand.run(path, log_method(id), stdin)
result, info = get_info_from_execution(rc)
@ -159,19 +157,19 @@ class TransferManagerDriver < OpenNebulaDriver
# @return lines [Array] with the commands of the script. Each command is an
# array itself.
def parse_script(sfile)
return nil if !File.exist?(sfile)
return unless File.exist?(sfile)
stext = File.read(sfile)
lines = Array.new
lines = []
stext.each_line {|line|
stext.each_line do |line|
next if line.match(/^\s*#/) # skip if the line is commented
next if line.match(/^\s*$/) # skip if the line is empty
command = line.split(" ")
command = line.split(' ')
lines << command
}
end
return lines
end
@ -183,12 +181,11 @@ end
# TransferManager Driver Main program
################################################################################
################################################################################
if __FILE__ == $0
opts = GetoptLong.new(
[ '--threads', '-t', GetoptLong::OPTIONAL_ARGUMENT ],
[ '--tm-types', '-d', GetoptLong::OPTIONAL_ARGUMENT ],
[ '--timeout', '-w', GetoptLong::OPTIONAL_ARGUMENT ]
['--threads', '-t', GetoptLong::OPTIONAL_ARGUMENT],
['--tm-types', '-d', GetoptLong::OPTIONAL_ARGUMENT],
['--timeout', '-w', GetoptLong::OPTIONAL_ARGUMENT]
)
tm_type = nil
@ -198,15 +195,15 @@ if __FILE__ == $0
begin
opts.each do |opt, arg|
case opt
when '--threads'
threads = arg.to_i
when '--tm-types'
tm_type = arg.split(',').map {|a| a.strip }
when '--timeout'
timeout = arg
when '--threads'
threads = arg.to_i
when '--tm-types'
tm_type = arg.split(',').map {|a| a.strip }
when '--timeout'
timeout = arg
end
end
rescue Exception => e
rescue StandardError
exit(-1)
end

248
src/tm_mad/qcow2/restore Executable file
View File

@ -0,0 +1,248 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2023, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
ONE_LOCATION = ENV['ONE_LOCATION']
if !ONE_LOCATION
RUBY_LIB_LOCATION = '/usr/lib/one/ruby'
GEMS_LOCATION = '/usr/share/one/gems'
VMDIR = '/var/lib/one'
CONFIG_FILE = '/var/lib/one/config'
else
RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby'
GEMS_LOCATION = ONE_LOCATION + '/share/gems'
VMDIR = ONE_LOCATION + '/var'
CONFIG_FILE = ONE_LOCATION + '/var/config'
end
# %%RUBYGEMS_SETUP_BEGIN%%
if File.directory?(GEMS_LOCATION)
real_gems_path = File.realpath(GEMS_LOCATION)
if !defined?(Gem) || Gem.path != [real_gems_path]
$LOAD_PATH.reject! {|l| l =~ /vendor_ruby/ }
# Suppress warnings from Rubygems
# https://github.com/OpenNebula/one/issues/5379
begin
verb = $VERBOSE
$VERBOSE = nil
require 'rubygems'
Gem.use_paths(real_gems_path)
ensure
$VERBOSE = verb
end
end
end
# %%RUBYGEMS_SETUP_END%%
$LOAD_PATH << RUBY_LIB_LOCATION
require_relative '../lib/tm_action'
require_relative '../lib/datastore'
require 'rexml/document'
require 'json'
# Replace a non-persistent VM disk with a backup image
# :rhost host where the VM is running
# :rdir VM folder
# :id disk id
# :path path to the disk image
def replace_np(opts = {})
snap_path = "#{opts[:rdir]}/disk.#{opts[:id]}.snap"
disk_path = "#{opts[:rdir]}/disk.#{opts[:id]}"
_script = <<~EOS
set -e -o pipefail
# Clean existing snapshots
[ -d #{snap_path} ] && rm -rf #{snap_path}
mkdir -p #{snap_path}
mv #{opts[:path]} #{snap_path}/0
rm -f #{disk_path}
ln -sf disk.#{opts[:id]}.snap/0 #{disk_path}
cd #{snap_path}
ln -sf . disk.#{opts[:id]}.snap
EOS
end
def replace_p(opts = {})
image_snap = "#{opts[:source]}.snap"
disk_path = "#{opts[:rdir]}/disk.#{opts[:id]}"
_script = <<~EOS
set -e -o pipefail
# Replace existing disk in image datastore
[ -d #{image_snap} ] && rm -rf #{image_snap}
mkdir -p #{image_snap}
mv #{opts[:path]} #{opts[:source]}
ln -sf #{opts[:source]} #{image_snap}/0
cd #{image_snap}
ln -sf . `basename #{image_snap}`
# VM disk symlink to the image
rm #{disk_path}
ln -sf #{image_snap}/0 #{disk_path}
[ -d #{disk_path}.snap ] && rm -rf #{disk_path}.snap
exit 0
EOS
end
#-------------------------------------------------------------------------------
# RESTORE vm_id img_id inc_id disk_id
#-------------------------------------------------------------------------------
dir = ARGV[0].split ':'
vm_id = ARGV[1]
img_id = ARGV[2]
inc_id = ARGV[3]
disk_id = ARGV[4].to_i
rhost = dir[0]
rdir = dir[1]
begin
action = TransferManager::Action.new(:action_name => 'restore',
:vm_id => vm_id)
# --------------------------------------------------------------------------
# Image information
# --------------------------------------------------------------------------
image = OpenNebula::Image.new_with_id(img_id.to_i, action.one)
rc = image.info
raise rc.message.to_s if OpenNebula.is_error?(rc)
# --------------------------------------------------------------------------
# Datastore information
# --------------------------------------------------------------------------
ds_id = image['/IMAGE/DATASTORE_ID'].to_i
ds = OpenNebula::Datastore.new_with_id(ds_id, action.one)
rc = ds.info true
raise rc.message.to_s if OpenNebula.is_error?(rc)
ds_cmd = "#{__dir__}/../../datastore/#{ds['/DATASTORE/DS_MAD'].downcase}/ls"
# --------------------------------------------------------------------------
# Backup information
# --------------------------------------------------------------------------
driver_action = <<~EOS
<DS_DRIVER_ACTION_DATA>
#{action.vm.to_xml}
#{ds.to_xml}
#{image.to_xml}
</DS_DRIVER_ACTION_DATA>
EOS
rc = action.ssh(:host => nil,
:cmds => "echo '#{driver_action}' | #{ds_cmd} -i #{inc_id}",
:forward => false,
:nostdout => false,
:nostderr => false)
raise 'cannot list backup contents' unless rc.code == 0
disks = JSON.parse(rc.stdout)
# --------------------------------------------------------------------------
# Restore disks in Host VM folder
# --------------------------------------------------------------------------
dpaths = {}
disks.each do |id, url|
next if disk_id != -1 && id.to_i != disk_id
if action.persistent? id
isource = action.vm["/VM/TEMPLATE/DISK [ DISK_ID = #{id} ]/SOURCE"]
dst = "#{isource}.backup"
dpaths[id] = {
:persistent => true,
:path => dst,
:source => isource
}
else
dst = "#{rdir}/disk.#{id}.backup"
dpaths[id] = {
:persistent => false,
:path => dst,
:source => nil
}
end
download = <<~EOS
#{__dir__}/../../datastore/downloader.sh --nodecomp #{url} - | \
ssh #{rhost} dd of=#{dst} bs=64k conv=sparse
EOS
rc = action.ssh(:host => nil,
:cmds => download,
:forward => false,
:nostdout => false,
:nostderr => false)
# TODO: cleanup dpaths
raise 'cannot download backup disk' unless rc.code == 0
end
raise "disk #{disk_id} not found" if dpaths.empty?
# --------------------------------------------------------------------------
# Replace VM disks with backup copies (~prolog)
# --------------------------------------------------------------------------
dpaths.each do |id, rdisk|
opts = {
:rhost => rhost,
:rdir => rdir,
:id => id,
:path => rdisk[:path],
:source => rdisk[:source]
}
script = if rdisk[:persistent]
replace_p(opts)
else
replace_np(opts)
end
rc = action.ssh(:host => rhost,
:cmds => script,
:forward => false,
:nostdout => false,
:nostderr => false)
raise 'cannot copy disk backup' unless rc.code == 0
end
rescue StandardError => e
STDERR.puts "Error restoring VM disks: #{e.message}"
exit(1)
end

163
src/tm_mad/ssh/restore Executable file
View File

@ -0,0 +1,163 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2023, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
ONE_LOCATION = ENV['ONE_LOCATION']
if !ONE_LOCATION
RUBY_LIB_LOCATION = '/usr/lib/one/ruby'
GEMS_LOCATION = '/usr/share/one/gems'
VMDIR = '/var/lib/one'
CONFIG_FILE = '/var/lib/one/config'
else
RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby'
GEMS_LOCATION = ONE_LOCATION + '/share/gems'
VMDIR = ONE_LOCATION + '/var'
CONFIG_FILE = ONE_LOCATION + '/var/config'
end
# %%RUBYGEMS_SETUP_BEGIN%%
if File.directory?(GEMS_LOCATION)
real_gems_path = File.realpath(GEMS_LOCATION)
if !defined?(Gem) || Gem.path != [real_gems_path]
$LOAD_PATH.reject! {|l| l =~ /vendor_ruby/ }
# Suppress warnings from Rubygems
# https://github.com/OpenNebula/one/issues/5379
begin
verb = $VERBOSE
$VERBOSE = nil
require 'rubygems'
Gem.use_paths(real_gems_path)
ensure
$VERBOSE = verb
end
end
end
# %%RUBYGEMS_SETUP_END%%
$LOAD_PATH << RUBY_LIB_LOCATION
require_relative '../lib/tm_action'
require_relative '../lib/datastore'
require 'rexml/document'
require 'json'
#-------------------------------------------------------------------------------
# RESTORE vm_id img_id inc_id disk_id
#-------------------------------------------------------------------------------
dir = ARGV[0].split ':'
vm_id = ARGV[1]
img_id = ARGV[2]
inc_id = ARGV[3]
disk_id = ARGV[4].to_i
rhost = dir[0]
rdir = dir[1]
begin
action = TransferManager::Action.new(:action_name => 'restore',
:vm_id => vm_id)
# --------------------------------------------------------------------------
# Image information
# --------------------------------------------------------------------------
image = OpenNebula::Image.new_with_id(img_id.to_i, action.one)
rc = image.info
raise rc.message.to_s if OpenNebula.is_error?(rc)
# --------------------------------------------------------------------------
# Datastore information
# --------------------------------------------------------------------------
ds_id = image['/IMAGE/DATASTORE_ID'].to_i
ds = OpenNebula::Datastore.new_with_id(ds_id, action.one)
rc = ds.info true
raise rc.message.to_s if OpenNebula.is_error?(rc)
ds_cmd = "#{__dir__}/../../datastore/#{ds['/DATASTORE/DS_MAD'].downcase}/ls"
# --------------------------------------------------------------------------
# Backup information
# --------------------------------------------------------------------------
driver_action = <<~EOS
<DS_DRIVER_ACTION_DATA>
#{action.vm.to_xml}
#{ds.to_xml}
#{image.to_xml}
</DS_DRIVER_ACTION_DATA>
EOS
rc = action.ssh(:host => nil,
:cmds => "echo '#{driver_action}' | #{ds_cmd} -i #{inc_id}",
:forward => false,
:nostdout => false,
:nostderr => false)
raise 'cannot list backup contents' unless rc.code == 0
disks = JSON.parse(rc.stdout)
# --------------------------------------------------------------------------
# Restore disks in Host VM folder
# --------------------------------------------------------------------------
dpaths = {}
disks.each do |id, url|
next if disk_id != -1 && id.to_i != disk_id
download = <<~EOS
#{__dir__}/../../datastore/downloader.sh --nodecomp #{url} - | \
ssh #{rhost} dd of=#{rdir}/disk.#{id}.backup bs=64k conv=sparse
EOS
rc = action.ssh(:host => nil,
:cmds => download,
:forward => false,
:nostdout => false,
:nostderr => false)
# TODO: cleanup ssh host rm #{rdir}/disk.*.backup if rc.code == 0
raise 'cannot download backup disk' unless rc.code == 0
dpaths[id] = "#{rdir}/disk.#{id}.backup"
end
raise "disk #{disk_id} not found" if dpaths.empty?
# --------------------------------------------------------------------------
# Replace VM disks with backup copies (~prolog)
# --------------------------------------------------------------------------
dpaths.each do |id, path|
copy = <<~EOS
[ -d #{rdir}/disk.#{id}.snap ] && rm -rf #{rdir}/disk.#{id}.snap
mv #{path} #{rdir}/disk.#{id}
EOS
rc = action.ssh(:host => rhost,
:cmds => copy,
:forward => false,
:nostdout => false,
:nostderr => false)
raise 'cannot copy disk backup' unless rc.code == 0
end
rescue StandardError => e
STDERR.puts "Error restoring VM disks: #{e.message}"
exit(1)
end

View File

@ -225,6 +225,10 @@ int VMActions::set_auth_ops(const string& ops_str,
ops_set.set(PCI_ATTACH_ACTION);
ops_set.set(PCI_DETACH_ACTION);
}
else if ( the_op == "restore" )
{
ops_set.set(RESTORE_ACTION);
}
else
{
error = "Unknown vm operation: " + the_op;
@ -412,6 +416,9 @@ string VMActions::action_to_str(Action action)
case PCI_DETACH_ACTION:
st = "pci-detach";
break;
case RESTORE_ACTION:
st = "restore";
break;
case NONE_ACTION:
st = "none";
break;
@ -646,6 +653,10 @@ int VMActions::action_from_str(const string& st, Action& action)
{
action = PCI_DETACH_ACTION;
}
else if ( st == "restore")
{
action = RESTORE_ACTION;
}
else
{
action = NONE_ACTION;

View File

@ -300,6 +300,8 @@ int VirtualMachine::lcm_state_from_str(string& st, LcmState& state)
state = BACKUP;
} else if ( st == "BACKUP_POWEROFF" ) {
state = BACKUP_POWEROFF;
} else if ( st == "PROLOG_RESTORE" ) {
state = PROLOG_RESTORE;
} else {
return -1;
}
@ -447,6 +449,8 @@ string& VirtualMachine::lcm_state_to_str(string& st, LcmState state)
st = "BACKUP"; break;
case BACKUP_POWEROFF:
st = "BACKUP_POWEROFF"; break;
case PROLOG_RESTORE:
st = "PROLOG_RESTORE"; break;
}
return st;