2008-06-17 20:27:32 +04:00
/* -------------------------------------------------------------------------- */
2023-01-09 14:23:19 +03:00
/* Copyright 2002-2023, OpenNebula Project, OpenNebula Systems */
2008-06-17 20:27:32 +04:00
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
# ifndef VIRTUAL_MACHINE_MANAGER_H_
# define VIRTUAL_MACHINE_MANAGER_H_
# include "VirtualMachineManagerDriver.h"
2020-06-29 13:14:00 +03:00
# include "DriverManager.h"
2020-07-24 17:00:59 +03:00
# include "Listener.h"
2008-06-17 20:27:32 +04:00
2020-06-29 13:14:00 +03:00
class DatastorePool ;
class HostPool ;
2021-09-14 16:26:21 +03:00
class VirtualMachine ;
2020-06-29 13:14:00 +03:00
class VirtualMachinePool ;
2008-06-17 20:27:32 +04:00
2017-02-03 16:19:15 +03:00
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
2020-06-29 13:14:00 +03:00
class VirtualMachineManager :
public DriverManager < VirtualMachineManagerDriver > ,
2020-07-24 17:00:59 +03:00
public Listener
2017-02-03 16:19:15 +03:00
{
public :
VirtualMachineManager (
2020-06-29 13:14:00 +03:00
const std : : string & _mads ) ;
2017-02-03 16:19:15 +03:00
2020-06-29 13:14:00 +03:00
~ VirtualMachineManager ( ) = default ;
2017-02-03 16:19:15 +03:00
2008-06-17 20:27:32 +04:00
/**
2012-06-19 17:26:22 +04:00
* This functions starts the associated listener thread , and creates a
2008-06-17 20:27:32 +04:00
* new thread for the Virtual Machine Manager . This thread will wait in
* an action loop till it receives ACTION_FINALIZE .
* @ return 0 on success .
*/
int start ( ) ;
/**
* Loads Virtual Machine Manager Mads defined in configuration file
2020-06-29 13:14:00 +03:00
* @ param _mads configuration of drivers
2008-06-17 20:27:32 +04:00
*/
2020-06-29 13:14:00 +03:00
int load_drivers ( const std : : vector < const VectorAttribute * > & _mads ) ;
2012-06-19 17:26:22 +04:00
2015-11-20 17:44:37 +03:00
/**
* Check if action is supported for imported VMs
* @ param mad name of the driver
* @ param action
* @ return True if it is supported
*/
2020-07-02 23:42:10 +03:00
bool is_imported_action_supported ( const std : : string & mad , VMActions : : Action action )
2015-11-20 17:44:37 +03:00
{
const VirtualMachineManagerDriver * vmd = get ( mad ) ;
2020-06-29 13:14:00 +03:00
if ( vmd = = nullptr )
2015-11-20 17:44:37 +03:00
{
return false ;
}
return vmd - > is_imported_action_supported ( action ) ;
}
2016-03-02 01:31:31 +03:00
/**
* Updates firewall rules of a VM
* @ param vm pointer to VM , needs to be locked
* @ param sgid the id of the security group
*
* @ return 0 on success
*/
int updatesg ( VirtualMachine * vm , int sgid ) ;
F #5989: Live update of Virtual Network attributes
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Christian González <cgonzalez@opennebula.io>
* VNET updates trigger a driver action on running VMs with NICs in the
network.
* VNET includes a sets with VM status: updated, outdated, error and
updating. With VMs in each state.
* VNET flags error situations with a new state UPDATE_FAILURE.
* The same procedure is applied when an AR is updated (only VMs in that
AR are updated).
* A new options in the one.vn.recover API call enable to recover or
retry this VM update operations.
* The following attributes can be live-updated per VNET driver:
- PHYDEV (novlan, vlan, ovs driver)
- MTU (vlan, ovs driver)
- VLAN_ID (vlan, ovs driver)
- QINQ_TYPE (ovs driver)
- CVLANS (ovs driver)
- VLAN_TAGGED_ID (ovs driver)
- OUTER_VLAN_ID (ovs driver)
- INBOUND_AVG_BW (SG, ovs driver + KVM)
- INBOUND_PEAK_BW (SG, ovs driver + KVM)
- INBOUND_PEAK_KB (SG, ovs driver + KVM)
- OUTBOUND_AVG_BW (SG, ovs driver + KVM)
- OUTBOUND_PEAK_BW (SG, ovs driver + KVM)
- OUTBOUND_PEAK_KB (SG, ovs driver + KVM)
* New API call one.vm.updatenic, allows to update individual NICs
without the need of detach/attach (only QoS supported).
* Update operations for: 802.1Q, bridge, fw, ovswitch, ovswitch_vxlan
and vxlan network drivers.
* VNET attributes (old values) stored in VNET_UPDATE to allow
implementation of update operations. The attribute is removed after a
successful update.
* Updates to CLI onevnet (--retry option) / onevm (nicupdate command)
* XSD files updated to reflect the new data model
* Ruby and JAVA bindings updated: new VNET state and recover option, new
VM API call.
* Suntone and Fireedge implementation (lease status, recover option, new
states)
TODO: Virtual Functions does not support this functionality
iii
2022-11-16 15:35:29 +03:00
/**
* Updates nic attributes of a VM
* @ param vm pointer to VM , needs to be locked
* @ param vnid the id of the virtual network
*
* @ return 0 on success
*/
int updatenic ( VirtualMachine * vm , int vnid ) ;
2016-05-18 20:48:43 +03:00
/**
* Get keep_snapshots capability from driver
*/
2020-07-02 23:42:10 +03:00
bool is_keep_snapshots ( const std : : string & name )
2016-05-18 20:48:43 +03:00
{
const VirtualMachineManagerDriver * vmd = get ( name ) ;
2020-02-26 19:51:40 +03:00
if ( vmd = = nullptr )
2016-05-18 20:48:43 +03:00
{
return false ;
}
return vmd - > is_keep_snapshots ( ) ;
}
2020-02-26 19:51:40 +03:00
/**
* Get cold_nic_attach behavior for the driver . When true the driver will be
* invoked in cold NIC attach operations
*/
2020-07-02 23:42:10 +03:00
bool is_cold_nic_attach ( const std : : string & name )
2020-02-26 19:51:40 +03:00
{
const VirtualMachineManagerDriver * vmd = get ( name ) ;
if ( vmd = = nullptr )
{
return false ;
}
return vmd - > is_cold_nic_attach ( ) ;
}
2020-11-17 13:24:52 +03:00
/**
* Get live_resize capability from driver
*/
bool is_live_resize ( const std : : string & name )
{
const VirtualMachineManagerDriver * vmd = get ( name ) ;
if ( vmd = = nullptr )
{
return false ;
}
return vmd - > is_live_resize ( ) ;
}
2019-01-31 19:27:55 +03:00
/**
* Returns a pointer to a Virtual Machine Manager driver . The driver is
* searched by its name .
* @ param name the name of the driver
* @ return the VM driver owned by uid with attribute name equal to value
* or 0 in not found
*/
2020-07-05 23:01:32 +03:00
const VirtualMachineManagerDriver * get ( const std : : string & name ) const
2019-01-31 19:27:55 +03:00
{
2020-06-29 13:14:00 +03:00
return DriverManager : : get_driver ( name ) ;
2019-01-31 19:27:55 +03:00
} ;
2020-05-18 03:23:29 +03:00
/**
* Validates raw sections in the Virtual Machine Template for the
* target driver
* @ param template of the virtual machine
* @ param error_str error if any
*
* @ return 0 on success ( valid raw )
*/
2020-07-02 23:42:10 +03:00
int validate_raw ( const Template * vmt , std : : string & error_str ) ;
2020-05-18 03:23:29 +03:00
2021-09-14 16:26:21 +03:00
/**
* Validate if the VM template satisfy all driver conditions
* @ param vmm_mad is the tm_mad for system datastore chosen
*/
int validate_template ( const std : : string & vmm_mad , const VirtualMachine * vm ,
int hid , int cluster_id , std : : string & error ) ;
2008-06-17 20:27:32 +04:00
private :
/**
* Pointer to the Virtual Machine Pool , to access VMs
*/
VirtualMachinePool * vmpool ;
/**
* Pointer to the Host Pool , to access hosts
*/
HostPool * hpool ;
2012-06-19 17:26:22 +04:00
2015-10-29 03:58:35 +03:00
/**
* Pointer to the Datastore Pool
*/
DatastorePool * ds_pool ;
2020-06-29 13:14:00 +03:00
// -------------------------------------------------------------------------
// Protocol implementation, procesing messages from driver
// -------------------------------------------------------------------------
2020-07-02 23:42:10 +03:00
static void _undefined ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
2008-06-17 20:27:32 +04:00
/**
2020-06-29 13:14:00 +03:00
*
2008-06-17 20:27:32 +04:00
*/
2020-07-02 23:42:10 +03:00
void _deploy ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2020-07-02 23:42:10 +03:00
void _shutdown ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2020-07-02 23:42:10 +03:00
void _reset ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2020-07-02 23:42:10 +03:00
void _reboot ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2020-07-02 23:42:10 +03:00
void _cancel ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2020-07-02 23:42:10 +03:00
void _cleanup ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2020-07-02 23:42:10 +03:00
void _checkpoint ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2020-07-02 23:42:10 +03:00
void _save ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2020-07-02 23:42:10 +03:00
void _restore ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2020-07-02 23:42:10 +03:00
void _migrate ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2020-07-02 23:42:10 +03:00
void _attachdisk ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2020-07-02 23:42:10 +03:00
void _detachdisk ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2020-07-02 23:42:10 +03:00
void _attachnic ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2020-07-02 23:42:10 +03:00
void _detachnic ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2020-07-02 23:42:10 +03:00
void _snapshotcreate ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2020-07-02 23:42:10 +03:00
void _snapshotrevert ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2020-07-02 23:42:10 +03:00
void _snapshotdelete ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2020-07-02 23:42:10 +03:00
void _disksnapshotcreate ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2020-07-02 23:42:10 +03:00
void _disksnapshotrevert ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2020-07-02 23:42:10 +03:00
void _resizedisk ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2020-07-02 23:42:10 +03:00
void _updateconf ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2020-07-02 23:42:10 +03:00
void _updatesg ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2020-07-02 23:42:10 +03:00
void _driver_cancel ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
2020-11-17 13:24:52 +03:00
/**
*
*/
void _resize ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2021-06-30 11:05:05 +03:00
void _log ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
F #5516: New backup interface for OpenNebula
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Neal Hansen <nhansen@opennebula.io>
co-authored-by: Daniel Clavijo Coca <dclavijo@opennebula.io>
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
BACKUP INTERFACE
=================
* Backups are exposed through a a special Datastore (BACKUP_DS) and
Image (BACKUP) types. These new types can only be used for backup'ing
up VMs. This approach allows to:
- Implement tier based backup policies (backups made on different
locations).
- Leverage access control and quota systems
- Support differnt storage and backup technologies
* Backup interface for the VMs:
- VM configures backups with BACKUP_CONFIG. This attribute can be set
in the VM template or updated with updateconf API call. It can include:
+ BACKUP_VOLATILE: To backup or not volatile disks
+ FS_FREEZE: How the FS is freeze for running VMs (qemu-agent,
suspend or none). When possible backups are crash consistent.
+ KEEP_LAST: keep only a given number of backups.
- Backups are initiated by the one.vm.backup API call that requires
the target Datastore to perform the backup (one-shot). This is
exposed by the onevm backup command.
- Backups can be periodic through scheduled actions.
- Backup configuration is updated with one.vm.updateconf API call.
* Restore interface:
- Restores are initiated by the one.image.restore API call. This is
exposed by oneimage restore command.
- Restore include configurable options for the VM template
+ NO_IP: to not preserve IP addresses (but keep the NICs and network
mapping)
+ NO_NIC: to not preserve network mappings
- Other template attributes:
+ Clean PCI devices, including network configuration in case of TYPE=NIC
attributes. By default it removes SHORT_ADDRESS and leave the "auto"
selection attributes.
+ Clean NUMA_NODE, removes node id and cpu sets. It keeps the NUMA node
- It is possible to restore single files stored in the repository by
using the backup specific URL.
* Sunstone (Ruby version) has been updated to expose this feautres.
BACKUP DRIVERS & IMPLEMENTATION
===============================
* Backup operation is implemented by a combination of 3 driver operations:
- VMM. New (internal oned <-> one_vmm_exec.rb) to orchestrate
backups for RUNNING VMs.
- TM. This commit introduces 2 new operations (and their
corresponding _live variants):
+ pre_backup(_live): Prepares the disks to be back'ed up in the
repository. It is specific to the driver: (i) ceph uses the export
operation; (ii) qcow2/raw uses snapshot-create-as and fs_freeze as
needed.
+ post_backup(_live): Performs cleanning operations, i.e. KVM
snapshots or tmp dirs.
- DATASTORE. Each backup technology is represented by its
corresponfing driver, that needs to implement:
+ backup: it takes the VM disks in file (qcow2) format and stores it
the backup repository.
+ restore: it takes a backup image and restores the associated disks
and VM template.
+ monitor: to gather available space in the repository
+ rm: to remove existing backups
+ stat: to return the "restored" size of a disk stored in a backup
+ downloader pseudo-URL handler: in the form
<backup_proto>://<driver_snapshot_id>/<disk filename>
BACKUP MANAGEMENT
=================
Backup actions may potentially take some time, leaving some vmm_exec threads in
use for a long time, stucking other vmm operations. Backups are planned
by the scheduler through the sched action interface.
Two attributes has been added to sched.conf:
* MAX_BACKUPS max active backup operations in the cloud. No more
backups will be started beyond this limit.
* MAX_BACKUPS_HOST max number of backups per host
* Fix onevm CLI to properly show and manage schedule actions. --schedule
supports now, as well as relative times +<seconds_from_stime>
onvm backup --schedule now -d 100 63
* Backup is added as VM_ADMIN_ACTIONS in oned.conf. Regular users needs
to use the batch interface or request specific permissions
Internal restructure of Scheduler:
- All sched_actions interface is now in SchedActionsXML class and files.
This class uses references to VM XML, and MUST be used in the same
lifetime scope.
- XMLRPC API calls for sched actions has been moved to ScheduledActionXML.cc as
static functions.
- VirtualMachineActionPool includes counters for active backups (total
and per host).
SUPPORTED PLATFORMS
====================
* hypervisor: KVM
* TM: qcow2/shared/ssh, ceph
* backup: restic, rsync
Notes on Ceph
* Ceph backups are performed in the following steps:
1. A snapshot of each disk is taken (group snapshots cannot be used as
it seems we cannot export the disks afterwards)
2. Disks are export to a file
3. File is converted to qcow2 format
4. Disk files are upload to the backup repo
TODO:
* Confirm crash consistent snapshots cannot be used in Ceph
TODO:
* Check if using VM dir instead of full path is better to accomodate
DS migrations i.e.:
- Current path: /var/lib/one/datastores/100/53/backup/disk.0
- Proposal: 53/backup/disk.0
RESTIC DRIVER
=============
Developed together with this feature is part of the EE edtion.
* It supports the SFTP protocol, the following attributes are
supported:
- RESTIC_SFTP_SERVER
- RESTIC_SFTP_USER: only if different from oneadmin
- RESTIC_PASSWORD
- RESTIC_IONICE: Run restic under a given ionice priority (class 2)
- RESTIC_NICE: Run restic under a given nice
- RESTIC_BWLIMIT: Limit restic upload/download BW
- RESTIC_COMPRESSION: Restic 0.14 implements compression (three modes:
off, auto, max). This requires repositories version 2. By default,
auto is used (average compression without to much CPU usage)
- RESTIC_CONNECTIONS: Sets the number of concurrent connections to a
backend (5 by default). For high-latency backends this number can be
increased.
* downloader URL: restic://<datastore_id>/<snapshot_id>/<file_name>
snapshot_id is the restic snapshot hash. To recover single disk images
from a backup. This URLs support:
- RESTIC_CONNECTIONS
- RESTIC_BWLIMIT
- RESTIC_IONICE
- RESTIC_NICE
These options needs to be defined in the associated datastore.
RSYNC DRIVER
=============
A rsync driver is included as part of the CE distribution. It uses the
rsync tool to store backups in a remote server through SSH:
* The following attributes are supported to configure the backup
datastore:
- RSYNC_HOST
- RSYNC_USER
- RSYNC_ARGS: Arguments to perform the rsync operatin (-aS by default)
* downloader URL: rsync://<ds_id>/<vmid>/<hash>/<file> can be used to recover
single files from an existing backup. (RSYNC_HOST and RSYN_USER needs
to be set in ds_id
EMULATOR_CPUS
=============
This commit includes a non related backup feature:
* Add EMULATOR_CPUS (KVM). This host (or cluster attribute) defines the
CPU IDs where the emulator threads will be pinned. If this value is
not defined the allocated CPU wll be used when using a PIN policy.
(cherry picked from commit a9e6a8e000e9a5a2f56f80ce622ad9ffc9fa032b)
F OpenNebula/one#5516: adding rsync backup driver
(cherry picked from commit fb52edf5d009dc02b071063afb97c6519b9e8305)
F OpenNebula/one#5516: update install.sh, add vmid to source, some polish
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 6fc6f8a67e435f7f92d5c40fdc3d1c825ab5581d)
F OpenNebula/one#5516: cleanup
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 12f4333b833f23098142cd4762eb9e6c505e1340)
F OpenNebula/one#5516: update downloader, default args, size check
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 510124ef2780a4e2e8c3d128c9a42945be38a305)
LL
(cherry picked from commit d4fcd134dc293f2b862086936db4d552792539fa)
2022-09-09 12:46:44 +03:00
/**
*
*/
void _backup ( std : : unique_ptr < vm_msg_t > msg ) ;
F #5989: Live update of Virtual Network attributes
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Christian González <cgonzalez@opennebula.io>
* VNET updates trigger a driver action on running VMs with NICs in the
network.
* VNET includes a sets with VM status: updated, outdated, error and
updating. With VMs in each state.
* VNET flags error situations with a new state UPDATE_FAILURE.
* The same procedure is applied when an AR is updated (only VMs in that
AR are updated).
* A new options in the one.vn.recover API call enable to recover or
retry this VM update operations.
* The following attributes can be live-updated per VNET driver:
- PHYDEV (novlan, vlan, ovs driver)
- MTU (vlan, ovs driver)
- VLAN_ID (vlan, ovs driver)
- QINQ_TYPE (ovs driver)
- CVLANS (ovs driver)
- VLAN_TAGGED_ID (ovs driver)
- OUTER_VLAN_ID (ovs driver)
- INBOUND_AVG_BW (SG, ovs driver + KVM)
- INBOUND_PEAK_BW (SG, ovs driver + KVM)
- INBOUND_PEAK_KB (SG, ovs driver + KVM)
- OUTBOUND_AVG_BW (SG, ovs driver + KVM)
- OUTBOUND_PEAK_BW (SG, ovs driver + KVM)
- OUTBOUND_PEAK_KB (SG, ovs driver + KVM)
* New API call one.vm.updatenic, allows to update individual NICs
without the need of detach/attach (only QoS supported).
* Update operations for: 802.1Q, bridge, fw, ovswitch, ovswitch_vxlan
and vxlan network drivers.
* VNET attributes (old values) stored in VNET_UPDATE to allow
implementation of update operations. The attribute is removed after a
successful update.
* Updates to CLI onevnet (--retry option) / onevm (nicupdate command)
* XSD files updated to reflect the new data model
* Ruby and JAVA bindings updated: new VNET state and recover option, new
VM API call.
* Suntone and Fireedge implementation (lease status, recover option, new
states)
TODO: Virtual Functions does not support this functionality
iii
2022-11-16 15:35:29 +03:00
/**
*
*/
void _updatenic ( std : : unique_ptr < vm_msg_t > msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
void log_error ( VirtualMachine * vm_id ,
const std : : string & payload ,
2021-01-15 13:52:34 +03:00
const std : : string & msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
2021-01-15 13:52:34 +03:00
void log_error ( int vm_id ,
const std : : string & payload ,
const std : : string & msg ) ;
2020-06-29 13:14:00 +03:00
/**
*
*/
bool check_vm_state ( int vm_id , vm_msg_t * msg ) ;
2008-06-17 20:27:32 +04:00
2017-02-03 16:19:15 +03:00
// -------------------------------------------------------------------------
// Action Listener interface
// -------------------------------------------------------------------------
2020-06-29 13:14:00 +03:00
static const int drivers_timeout = 10 ;
2020-07-24 17:00:59 +03:00
void finalize_action ( )
2017-02-03 16:19:15 +03:00
{
2020-06-29 13:14:00 +03:00
DriverManager : : stop ( drivers_timeout ) ;
2017-02-03 16:19:15 +03:00
} ;
2011-11-10 14:15:58 +04:00
/**
* Function to format a VMM Driver message in the form :
* < VMM_DRIVER_ACTION_DATA >
* < HOST > hostname < / HOST >
* < MIGR_HOST > m_hostname < / MIGR_HOST >
* < DOMAIN > domain_id < / DOMAIN >
* < DEPLOYMENT_FILE > dfile < / DEPLOYMENT_FILE >
* < CHECKPOINT_FILE > cfile < / CHECKPOINT_FILE >
2011-11-11 03:45:33 +04:00
* < VM >
2011-11-10 14:15:58 +04:00
* VM representation in XML
2011-11-11 03:45:33 +04:00
* < / VM >
2015-10-29 03:58:35 +03:00
* < DATASTORE >
* System DS information in XML
* < / DATASTORE >
2011-11-10 14:15:58 +04:00
* < / VMM_DRIVER_ACTION_DATA >
*
* @ param hostname of the host to perform the action
* @ param m_hostname name of the host to migrate the VM
* @ param domain domain id as returned by the hypervisor
* @ param dfile deployment file to boot the VM
* @ param cfile checkpoint file to save the VM
2012-06-13 20:42:42 +04:00
* @ param disk_id Disk to attach / detach , if any
* @ param tm_command Transfer Manager command to attach / detach , if any
2013-11-10 23:40:04 +04:00
* @ param tm_command_rollback TM command in case of attach failure
2012-06-13 20:42:42 +04:00
* @ param disk_target_path Path of the disk to attach , if any
2011-11-10 14:15:58 +04:00
* @ param tmpl the VM information in XML
2015-10-29 03:58:35 +03:00
* @ param ds_id of the system datastore
2016-03-02 01:31:31 +03:00
* @ param id of the security group
2011-11-10 14:15:58 +04:00
*/
2020-07-05 23:01:32 +03:00
std : : string format_message (
2020-07-02 23:42:10 +03:00
const std : : string & hostname ,
const std : : string & m_hostname ,
const std : : string & domain ,
const std : : string & ldfile ,
const std : : string & rdfile ,
const std : : string & cfile ,
const std : : string & tm_command ,
const std : : string & tm_command_rollback ,
const std : : string & disk_target_path ,
const std : : string & tmpl ,
2016-03-02 01:31:31 +03:00
int ds_id ,
F #5989: Live update of Virtual Network attributes
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Christian González <cgonzalez@opennebula.io>
* VNET updates trigger a driver action on running VMs with NICs in the
network.
* VNET includes a sets with VM status: updated, outdated, error and
updating. With VMs in each state.
* VNET flags error situations with a new state UPDATE_FAILURE.
* The same procedure is applied when an AR is updated (only VMs in that
AR are updated).
* A new options in the one.vn.recover API call enable to recover or
retry this VM update operations.
* The following attributes can be live-updated per VNET driver:
- PHYDEV (novlan, vlan, ovs driver)
- MTU (vlan, ovs driver)
- VLAN_ID (vlan, ovs driver)
- QINQ_TYPE (ovs driver)
- CVLANS (ovs driver)
- VLAN_TAGGED_ID (ovs driver)
- OUTER_VLAN_ID (ovs driver)
- INBOUND_AVG_BW (SG, ovs driver + KVM)
- INBOUND_PEAK_BW (SG, ovs driver + KVM)
- INBOUND_PEAK_KB (SG, ovs driver + KVM)
- OUTBOUND_AVG_BW (SG, ovs driver + KVM)
- OUTBOUND_PEAK_BW (SG, ovs driver + KVM)
- OUTBOUND_PEAK_KB (SG, ovs driver + KVM)
* New API call one.vm.updatenic, allows to update individual NICs
without the need of detach/attach (only QoS supported).
* Update operations for: 802.1Q, bridge, fw, ovswitch, ovswitch_vxlan
and vxlan network drivers.
* VNET attributes (old values) stored in VNET_UPDATE to allow
implementation of update operations. The attribute is removed after a
successful update.
* Updates to CLI onevnet (--retry option) / onevm (nicupdate command)
* XSD files updated to reflect the new data model
* Ruby and JAVA bindings updated: new VNET state and recover option, new
VM API call.
* Suntone and Fireedge implementation (lease status, recover option, new
states)
TODO: Virtual Functions does not support this functionality
iii
2022-11-16 15:35:29 +03:00
int sgid = - 1 ,
int nicid = - 1 ) ;
2012-06-19 17:26:22 +04:00
2020-07-24 17:00:59 +03:00
public :
2008-06-17 20:27:32 +04:00
/**
* Function executed when a DEPLOY action is received . It deploys a VM on
* a Host .
* @ param vid the id of the VM to be deployed .
*/
2020-07-24 17:00:59 +03:00
void trigger_deploy ( int vid ) ;
2008-06-17 20:27:32 +04:00
/**
2012-06-19 17:26:22 +04:00
* Function to stop a running VM and generate a checkpoint file . This
2008-06-17 20:27:32 +04:00
* function is executed when a SAVE action is triggered .
* @ param vid the id of the VM .
*/
2020-07-24 17:00:59 +03:00
void trigger_save ( int vid ) ;
2008-06-17 20:27:32 +04:00
/**
* Shutdowns a VM when a SHUTDOWN action is received .
* @ param vid the id of the VM .
*/
2020-07-24 17:00:59 +03:00
void trigger_shutdown ( int vid ) ;
2008-06-17 20:27:32 +04:00
/**
* Cancels a VM when a CANCEL action is received .
* @ param vid the id of the VM .
*/
2020-07-24 17:00:59 +03:00
void trigger_cancel ( int vid ) ;
2008-06-17 20:27:32 +04:00
2009-07-09 18:34:34 +04:00
/**
* Cancels a VM ( in the previous host ) when a CANCEL action is received .
* Note that the domain - id is the last one returned by a boot action
* @ param vid the id of the VM .
*/
2020-07-24 17:00:59 +03:00
void trigger_cancel_previous ( int vid ) ;
2009-07-09 18:34:34 +04:00
2013-01-21 03:15:46 +04:00
/**
* Cleanups a host ( cancel VM + delete disk images ) .
* @ param vid the id of the VM .
2013-01-21 15:27:18 +04:00
* @ param cancel_previous if true the VM will be canceled in the previous
* host ( only relevant to delete VM ' s in MIGRATE state )
2013-01-21 03:15:46 +04:00
*/
2020-07-24 17:00:59 +03:00
void trigger_cleanup ( int vid , bool cancel_previous ) ;
2013-01-21 03:15:46 +04:00
2013-01-21 15:27:18 +04:00
/**
* Cleanups the previous host ( cancel VM + delete disk images ) .
* @ param vid the id of the VM .
*/
2020-07-24 17:00:59 +03:00
void trigger_cleanup_previous ( int vid ) ;
2013-01-21 15:27:18 +04:00
2008-06-17 20:27:32 +04:00
/**
* Function to migrate ( live ) a VM ( MIGRATE action ) .
* @ param vid the id of the VM .
*/
2020-07-24 17:00:59 +03:00
void trigger_migrate ( int vid ) ;
2008-06-17 20:27:32 +04:00
/**
* Restores a VM from a checkpoint file .
* @ param vid the id of the VM .
*/
2020-07-24 17:00:59 +03:00
void trigger_restore ( int vid ) ;
2008-06-22 05:51:49 +04:00
2011-12-26 02:46:19 +04:00
/**
* Reboots a running VM .
* @ param vid the id of the VM .
*/
2020-07-24 17:00:59 +03:00
void trigger_reboot ( int vid ) ;
2012-06-19 17:26:22 +04:00
2012-05-09 00:33:59 +04:00
/**
* Resets a running VM .
* @ param vid the id of the VM .
*/
2020-07-24 17:00:59 +03:00
void trigger_reset ( int vid ) ;
2011-12-26 02:46:19 +04:00
2012-06-13 20:42:42 +04:00
/**
* Attaches a new disk to a VM . The VM must have a disk with the
* attribute ATTACH = YES
* @ param vid the id of the VM .
*/
2020-07-24 17:00:59 +03:00
void trigger_attach ( int vid ) ;
2012-06-13 20:42:42 +04:00
2012-06-14 19:45:41 +04:00
/**
* Detaches a disk from a VM . The VM must have a disk with the
* attribute ATTACH = YES
* @ param vid the id of the VM .
*/
2020-07-24 17:00:59 +03:00
void trigger_detach ( int vid ) ;
2012-06-14 19:45:41 +04:00
2012-12-12 21:31:27 +04:00
/**
* Attaches a new NIC to a VM . The VM must have a NIC with the
* attribute ATTACH = YES
* @ param vid the id of the VM .
*/
2020-07-24 17:00:59 +03:00
void trigger_attach_nic ( int vid ) ;
2012-12-12 21:31:27 +04:00
/**
* Detaches a NIC from a VM . The VM must have a NIC with the
* attribute ATTACH = YES
* @ param vid the id of the VM .
*/
2020-07-24 17:00:59 +03:00
void trigger_detach_nic ( int vid ) ;
2012-12-12 21:31:27 +04:00
2016-12-17 21:30:11 +03:00
/**
* This function cancels the current driver operation
*/
2020-07-24 17:00:59 +03:00
void trigger_driver_cancel ( int vid ) ;
2016-12-17 21:30:11 +03:00
2013-02-20 19:04:09 +04:00
/**
* Creates a new system snapshot . The VM must have a snapshot with the
* attribute ACTIVE = YES
*
* @ param vid the id of the VM .
*/
2020-07-24 17:00:59 +03:00
void trigger_snapshot_create ( int vid ) ;
2013-02-20 19:04:09 +04:00
/**
* Reverts to a snapshot . The VM must have a snapshot with the
* attribute ACTIVE = YES
*
* @ param vid the id of the VM .
*/
2020-07-24 17:00:59 +03:00
void trigger_snapshot_revert ( int vid ) ;
2013-02-20 19:04:09 +04:00
2013-02-21 18:01:48 +04:00
/**
* Deletes a snapshot . The VM must have a snapshot with the
* attribute ACTIVE = YES
*
* @ param vid the id of the VM .
*/
2020-07-24 17:00:59 +03:00
void trigger_snapshot_delete ( int vid ) ;
2013-02-21 18:01:48 +04:00
2015-07-01 14:37:58 +03:00
/**
* Creates a new disk system snapshot .
*
* @ param vid the id of the VM .
*/
2020-07-24 17:00:59 +03:00
void trigger_disk_snapshot_create ( int vid ) ;
2015-07-01 14:37:58 +03:00
2009-07-09 18:34:34 +04:00
/**
2016-12-17 21:30:11 +03:00
* Resize a VM disk
*
* @ param vid the id of the VM .
2009-07-09 18:34:34 +04:00
*/
2020-07-24 17:00:59 +03:00
void trigger_disk_resize ( int vid ) ;
2019-07-26 14:45:26 +03:00
/**
* Update VM context
*
* @ param vid the id of the VM .
*/
2020-07-24 17:00:59 +03:00
void trigger_update_conf ( int vid ) ;
2020-11-17 13:24:52 +03:00
/**
* Update VM context
*
* @ param vid the id of the VM .
*/
void trigger_resize ( int vid ) ;
F #5516: New backup interface for OpenNebula
co-authored-by: Frederick Borges <fborges@opennebula.io>
co-authored-by: Neal Hansen <nhansen@opennebula.io>
co-authored-by: Daniel Clavijo Coca <dclavijo@opennebula.io>
co-authored-by: Pavel Czerný <pczerny@opennebula.systems>
BACKUP INTERFACE
=================
* Backups are exposed through a a special Datastore (BACKUP_DS) and
Image (BACKUP) types. These new types can only be used for backup'ing
up VMs. This approach allows to:
- Implement tier based backup policies (backups made on different
locations).
- Leverage access control and quota systems
- Support differnt storage and backup technologies
* Backup interface for the VMs:
- VM configures backups with BACKUP_CONFIG. This attribute can be set
in the VM template or updated with updateconf API call. It can include:
+ BACKUP_VOLATILE: To backup or not volatile disks
+ FS_FREEZE: How the FS is freeze for running VMs (qemu-agent,
suspend or none). When possible backups are crash consistent.
+ KEEP_LAST: keep only a given number of backups.
- Backups are initiated by the one.vm.backup API call that requires
the target Datastore to perform the backup (one-shot). This is
exposed by the onevm backup command.
- Backups can be periodic through scheduled actions.
- Backup configuration is updated with one.vm.updateconf API call.
* Restore interface:
- Restores are initiated by the one.image.restore API call. This is
exposed by oneimage restore command.
- Restore include configurable options for the VM template
+ NO_IP: to not preserve IP addresses (but keep the NICs and network
mapping)
+ NO_NIC: to not preserve network mappings
- Other template attributes:
+ Clean PCI devices, including network configuration in case of TYPE=NIC
attributes. By default it removes SHORT_ADDRESS and leave the "auto"
selection attributes.
+ Clean NUMA_NODE, removes node id and cpu sets. It keeps the NUMA node
- It is possible to restore single files stored in the repository by
using the backup specific URL.
* Sunstone (Ruby version) has been updated to expose this feautres.
BACKUP DRIVERS & IMPLEMENTATION
===============================
* Backup operation is implemented by a combination of 3 driver operations:
- VMM. New (internal oned <-> one_vmm_exec.rb) to orchestrate
backups for RUNNING VMs.
- TM. This commit introduces 2 new operations (and their
corresponding _live variants):
+ pre_backup(_live): Prepares the disks to be back'ed up in the
repository. It is specific to the driver: (i) ceph uses the export
operation; (ii) qcow2/raw uses snapshot-create-as and fs_freeze as
needed.
+ post_backup(_live): Performs cleanning operations, i.e. KVM
snapshots or tmp dirs.
- DATASTORE. Each backup technology is represented by its
corresponfing driver, that needs to implement:
+ backup: it takes the VM disks in file (qcow2) format and stores it
the backup repository.
+ restore: it takes a backup image and restores the associated disks
and VM template.
+ monitor: to gather available space in the repository
+ rm: to remove existing backups
+ stat: to return the "restored" size of a disk stored in a backup
+ downloader pseudo-URL handler: in the form
<backup_proto>://<driver_snapshot_id>/<disk filename>
BACKUP MANAGEMENT
=================
Backup actions may potentially take some time, leaving some vmm_exec threads in
use for a long time, stucking other vmm operations. Backups are planned
by the scheduler through the sched action interface.
Two attributes has been added to sched.conf:
* MAX_BACKUPS max active backup operations in the cloud. No more
backups will be started beyond this limit.
* MAX_BACKUPS_HOST max number of backups per host
* Fix onevm CLI to properly show and manage schedule actions. --schedule
supports now, as well as relative times +<seconds_from_stime>
onvm backup --schedule now -d 100 63
* Backup is added as VM_ADMIN_ACTIONS in oned.conf. Regular users needs
to use the batch interface or request specific permissions
Internal restructure of Scheduler:
- All sched_actions interface is now in SchedActionsXML class and files.
This class uses references to VM XML, and MUST be used in the same
lifetime scope.
- XMLRPC API calls for sched actions has been moved to ScheduledActionXML.cc as
static functions.
- VirtualMachineActionPool includes counters for active backups (total
and per host).
SUPPORTED PLATFORMS
====================
* hypervisor: KVM
* TM: qcow2/shared/ssh, ceph
* backup: restic, rsync
Notes on Ceph
* Ceph backups are performed in the following steps:
1. A snapshot of each disk is taken (group snapshots cannot be used as
it seems we cannot export the disks afterwards)
2. Disks are export to a file
3. File is converted to qcow2 format
4. Disk files are upload to the backup repo
TODO:
* Confirm crash consistent snapshots cannot be used in Ceph
TODO:
* Check if using VM dir instead of full path is better to accomodate
DS migrations i.e.:
- Current path: /var/lib/one/datastores/100/53/backup/disk.0
- Proposal: 53/backup/disk.0
RESTIC DRIVER
=============
Developed together with this feature is part of the EE edtion.
* It supports the SFTP protocol, the following attributes are
supported:
- RESTIC_SFTP_SERVER
- RESTIC_SFTP_USER: only if different from oneadmin
- RESTIC_PASSWORD
- RESTIC_IONICE: Run restic under a given ionice priority (class 2)
- RESTIC_NICE: Run restic under a given nice
- RESTIC_BWLIMIT: Limit restic upload/download BW
- RESTIC_COMPRESSION: Restic 0.14 implements compression (three modes:
off, auto, max). This requires repositories version 2. By default,
auto is used (average compression without to much CPU usage)
- RESTIC_CONNECTIONS: Sets the number of concurrent connections to a
backend (5 by default). For high-latency backends this number can be
increased.
* downloader URL: restic://<datastore_id>/<snapshot_id>/<file_name>
snapshot_id is the restic snapshot hash. To recover single disk images
from a backup. This URLs support:
- RESTIC_CONNECTIONS
- RESTIC_BWLIMIT
- RESTIC_IONICE
- RESTIC_NICE
These options needs to be defined in the associated datastore.
RSYNC DRIVER
=============
A rsync driver is included as part of the CE distribution. It uses the
rsync tool to store backups in a remote server through SSH:
* The following attributes are supported to configure the backup
datastore:
- RSYNC_HOST
- RSYNC_USER
- RSYNC_ARGS: Arguments to perform the rsync operatin (-aS by default)
* downloader URL: rsync://<ds_id>/<vmid>/<hash>/<file> can be used to recover
single files from an existing backup. (RSYNC_HOST and RSYN_USER needs
to be set in ds_id
EMULATOR_CPUS
=============
This commit includes a non related backup feature:
* Add EMULATOR_CPUS (KVM). This host (or cluster attribute) defines the
CPU IDs where the emulator threads will be pinned. If this value is
not defined the allocated CPU wll be used when using a PIN policy.
(cherry picked from commit a9e6a8e000e9a5a2f56f80ce622ad9ffc9fa032b)
F OpenNebula/one#5516: adding rsync backup driver
(cherry picked from commit fb52edf5d009dc02b071063afb97c6519b9e8305)
F OpenNebula/one#5516: update install.sh, add vmid to source, some polish
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 6fc6f8a67e435f7f92d5c40fdc3d1c825ab5581d)
F OpenNebula/one#5516: cleanup
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 12f4333b833f23098142cd4762eb9e6c505e1340)
F OpenNebula/one#5516: update downloader, default args, size check
Signed-off-by: Neal Hansen <nhansen@opennebula.io>
(cherry picked from commit 510124ef2780a4e2e8c3d128c9a42945be38a305)
LL
(cherry picked from commit d4fcd134dc293f2b862086936db4d552792539fa)
2022-09-09 12:46:44 +03:00
/**
* Create backup fot the VM
*
* @ param vid the id of the VM .
*/
void trigger_backup ( int vid ) ;
2008-06-17 20:27:32 +04:00
} ;
# endif /*VIRTUAL_MACHINE_MANAGER_H*/