1
0
mirror of https://github.com/OpenNebula/one.git synced 2024-12-22 13:33:52 +03:00
one/include/VirtualMachineManagerDriver.h

617 lines
18 KiB
C
Raw Normal View History

/* -------------------------------------------------------------------------- */
2023-01-09 14:23:19 +03:00
/* Copyright 2002-2023, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#ifndef VIRTUAL_MACHINE_MANAGER_DRIVER_H_
#define VIRTUAL_MACHINE_MANAGER_DRIVER_H_
#include <map>
#include <string>
#include <sstream>
#include "ProtocolMessages.h"
#include "Driver.h"
#include "ActionSet.h"
#include "VMActions.h"
#include "Host.h"
#include "Cluster.h"
#include "VirtualMachine.h"
F #5989: Live update of Virtual Network attributes co-authored-by: Pavel Czerný <pczerny@opennebula.systems> co-authored-by: Frederick Borges <fborges@opennebula.io> co-authored-by: Christian González <cgonzalez@opennebula.io> * VNET updates trigger a driver action on running VMs with NICs in the network. * VNET includes a sets with VM status: updated, outdated, error and updating. With VMs in each state. * VNET flags error situations with a new state UPDATE_FAILURE. * The same procedure is applied when an AR is updated (only VMs in that AR are updated). * A new options in the one.vn.recover API call enable to recover or retry this VM update operations. * The following attributes can be live-updated per VNET driver: - PHYDEV (novlan, vlan, ovs driver) - MTU (vlan, ovs driver) - VLAN_ID (vlan, ovs driver) - QINQ_TYPE (ovs driver) - CVLANS (ovs driver) - VLAN_TAGGED_ID (ovs driver) - OUTER_VLAN_ID (ovs driver) - INBOUND_AVG_BW (SG, ovs driver + KVM) - INBOUND_PEAK_BW (SG, ovs driver + KVM) - INBOUND_PEAK_KB (SG, ovs driver + KVM) - OUTBOUND_AVG_BW (SG, ovs driver + KVM) - OUTBOUND_PEAK_BW (SG, ovs driver + KVM) - OUTBOUND_PEAK_KB (SG, ovs driver + KVM) * New API call one.vm.updatenic, allows to update individual NICs without the need of detach/attach (only QoS supported). * Update operations for: 802.1Q, bridge, fw, ovswitch, ovswitch_vxlan and vxlan network drivers. * VNET attributes (old values) stored in VNET_UPDATE to allow implementation of update operations. The attribute is removed after a successful update. * Updates to CLI onevnet (--retry option) / onevm (nicupdate command) * XSD files updated to reflect the new data model * Ruby and JAVA bindings updated: new VNET state and recover option, new VM API call. * Suntone and Fireedge implementation (lease status, recover option, new states) TODO: Virtual Functions does not support this functionality iii
2022-11-16 15:35:29 +03:00
#include "NebulaLog.h"
/**
* VirtualMachineManagerDriver provides a base class to implement VM Manager
* Drivers. This class implements the protocol and recover functions
* from the Mad interface. Classes derived from the VirtualMachineManagerDriver
* must implement the deployment function to generate specific VM
* deployment information for the unerlying MAD.
*/
class VirtualMachineManagerDriver : public Driver<vm_msg_t>
{
public:
VirtualMachineManagerDriver(const std::string& mad_location,
const std::map<std::string,std::string>& attrs);
virtual ~VirtualMachineManagerDriver() = default;
/**
* Generates a driver-specific deployment file:
* @param vm pointer to a virtual machine
* @param file_name to generate the deployment description
* @return 0 on success
*/
virtual int deployment_description(
const VirtualMachine * vm,
const std::string& file_name) const = 0;
/**
* Validates the VM raws section
* @param raw_section raw section of the VM.
* @param error description on error
* @return 0 on success
*/
virtual int validate_raw(const std::string& raw, std::string& error) const
{
return 0;
}
/**
* Validates driver specific attributes in VM Template
* @param tmpl Virtual Machine Template
* @param error description on error
* @return 0 on success
*/
virtual int validate_template(const VirtualMachine* vm, int hid, int cluster_id,
std::string& error) const
{
return 0;
}
/**
* Check if action is supported for imported VMs
* @param action
* @return True if it is supported
*/
bool is_imported_action_supported(VMActions::Action action) const
{
return imported_actions.is_set(action);
}
/**
* @return true if system snapshots are preserved
*/
bool is_keep_snapshots() const
{
return keep_snapshots;
}
/**
* @return true if datastore live migration
*/
bool is_ds_live_migration() const
{
return ds_live_migration;
}
/**
* @return true if cold nic attach
*/
bool is_cold_nic_attach() const
{
return cold_nic_attach;
}
/**
* @return true if hotplug vcpu and memory supported
*/
bool is_live_resize() const
{
return live_resize;
}
/**
* @return true if shareable disks are supported
*/
bool support_shareable() const
{
return support_shareable_;
}
protected:
/**
* Gets a configuration attr from driver configuration file (single
* version)
* @param name of config attribute
* @param value of the attribute
*/
template<typename T>
void get_default(const std::string& name, T& value) const
{
driver_conf.get(name, value);
}
/**
* Gets a configuration attr from driver configuration file (vector
* version)
* @param name of config vector attribute for the domain
* @param vname of the attribute
* @param value of the attribute
*/
template<typename T>
int get_default(const char* name, const char* vname, T& value) const
{
const VectorAttribute * vattr = driver_conf.get(name);
if (vattr == 0)
{
return -1;
}
return vattr->vector_value(vname, value);
}
/**
* Gets a configuration attribute (single version)
* priority VM > host > cluster > config_file
* @param vm pointer to Virtual Machine
* @param host pointer to Host
* @param cluster pointer Cluster
* @param name of config attribute
* @param value of the attribute
* @return true if atribute was found, false otherwise
*/
template<typename T>
bool get_attribute(const VirtualMachine * vm,
const Host * host,
const Cluster * cluster,
const std::string& name,
T& value) const
{
// Get value from VM
if (vm && vm->get_template_attribute(name, value))
{
return true;
}
// Get value from host
if (host && host->get_template_attribute(name, value))
{
return true;
}
// Get value from cluster
if (cluster && cluster->get_template_attribute(name, value))
{
return true;
}
return driver_conf.get(name, value);
}
/**
* Gets a configuration attribute (vector version)
* priority VM > host > cluster > config_file
* @param vm pointer to Virtual Machine
* @param host pointer to Host
* @param cluster pointer Cluster
* @param name of config vector attribute for the domain
* @param vname of the attribute
* @param value of the attribute
* @return true if atribute was found, false otherwise
*/
template<typename T>
bool get_attribute(const VirtualMachine * vm,
const Host * host,
const Cluster * cluster,
const std::string& name,
const std::string& vname,
T& value) const
{
const VectorAttribute * vattr;
// Get value from VM
if (vm)
{
vattr = vm->get_template_attribute(name);
if (vattr && vattr->vector_value(vname, value) == 0)
{
return true;
}
}
// Get value from host
if (host)
{
vattr = host->get_template_attribute(name);
if (vattr && vattr->vector_value(vname, value) == 0)
{
return true;
}
}
// Get value from cluster
if (cluster)
{
vattr = cluster->get_template_attribute(name);
if (vattr && vattr->vector_value(vname, value) == 0)
{
return true;
}
}
vattr = driver_conf.get(name);
if (vattr && vattr->vector_value(vname, value) == 0)
{
return true;
}
return false;
}
private:
friend class VirtualMachineManager;
static const std::string imported_actions_default;
static const std::string imported_actions_default_public;
2015-07-01 22:15:40 +03:00
/**
* Configuration file for the driver
*/
Template driver_conf;
/**
* List of available actions for imported VMs. Each bit is an action
* as defined in History.h, 1=supported and 0=not supported
*/
ActionSet<VMActions::Action> imported_actions;
/**
* Set to true if the hypervisor can keep system snapshots across
* create/delete cycles and live migrations.
*/
bool keep_snapshots;
/**
* Set to true if live migration between datastores is allowed.
*/
bool ds_live_migration;
/**
* Set to true if cold nic attach/detach calls (pre, post, clean scripts)
*/
bool cold_nic_attach;
/**
* Set to true if hypervisor supports hotplug vcpu and memory
*/
bool live_resize;
/**
* Set to true if hypervisor supports shareable disks
*/
bool support_shareable_;
/**
* Sends a deploy request to the MAD: "DEPLOY ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void deploy(
const int oid,
const std::string& drv_msg) const
{
write_drv(VMManagerMessages::DEPLOY, oid, drv_msg);
}
/**
* Sends a shutdown request to the MAD: "SHUTDOWN ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void shutdown(
const int oid,
const std::string& drv_msg) const
{
write_drv(VMManagerMessages::SHUTDOWN, oid, drv_msg);
}
/**
* Sends a reset request to the MAD: "RESET ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void reset(
const int oid,
const std::string& drv_msg) const
{
write_drv(VMManagerMessages::RESET, oid, drv_msg);
}
/**
* Sends a reboot request to the MAD: "REBOOT ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void reboot(
const int oid,
const std::string& drv_msg) const
{
write_drv(VMManagerMessages::REBOOT, oid, drv_msg);
}
/**
* Sends a cancel request to the MAD: "CANCEL ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void cancel(
const int oid,
const std::string& drv_msg) const
{
write_drv(VMManagerMessages::CANCEL, oid, drv_msg);
}
/**
* Sends a cleanup request to the MAD: "CLEANUP ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void cleanup(
const int oid,
const std::string& drv_msg) const
{
write_drv(VMManagerMessages::CLEANUP, oid, drv_msg);
}
/**
* Sends a checkpoint request to the MAD: "CHECKPOINT ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void checkpoint(
const int oid,
const std::string& drv_msg) const
{
write_drv(VMManagerMessages::CHECKPOINT, oid, drv_msg);
}
/**
* Sends a save request to the MAD: "SAVE ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void save(
const int oid,
const std::string& drv_msg) const
{
write_drv(VMManagerMessages::SAVE, oid, drv_msg);
}
/**
* Sends a save request to the MAD: "SAVE ID XML_DRV_MSG"
* @param oid the virtual machine id.
*/
void driver_cancel(const int oid) const
{
write_drv(VMManagerMessages::DRIVER_CANCEL, oid, "");
}
/**
* Sends a save request to the MAD: "RESTORE ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void restore(
const int oid,
const std::string& drv_msg) const
{
write_drv(VMManagerMessages::RESTORE, oid, drv_msg);
}
/**
* Sends a migrate request to the MAD: "MIGRATE ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void migrate(
const int oid,
const std::string& drv_msg) const
{
write_drv(VMManagerMessages::MIGRATE, oid, drv_msg);
}
2012-06-13 20:42:42 +04:00
/**
* Sends an attach request to the MAD: "ATTACHDISK ID XML_DRV_MSG"
2012-06-13 20:42:42 +04:00
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void attach(
const int oid,
const std::string& drv_msg) const
2012-06-13 20:42:42 +04:00
{
write_drv(VMManagerMessages::ATTACHDISK, oid, drv_msg);
2012-06-13 20:42:42 +04:00
}
2012-06-14 19:45:41 +04:00
/**
* Sends a detach request to the MAD: "DETACHDISK ID XML_DRV_MSG"
2012-06-14 19:45:41 +04:00
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void detach(
const int oid,
const std::string& drv_msg) const
2012-06-14 19:45:41 +04:00
{
write_drv(VMManagerMessages::DETACHDISK, oid, drv_msg);
2012-06-14 19:45:41 +04:00
}
/**
* Sends an attach NIC request to the MAD: "ATTACHNIC ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void attach_nic(
const int oid,
const std::string& drv_msg) const
{
write_drv(VMManagerMessages::ATTACHNIC, oid, drv_msg);
}
/**
* Sends a detach request to the MAD: "DETACHNIC ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void detach_nic(
const int oid,
const std::string& drv_msg) const
{
write_drv(VMManagerMessages::DETACHNIC, oid, drv_msg);
}
/**
* Sends a snapshot create request to the MAD:
* "SNAPSHOTCREATE ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void snapshot_create(
const int oid,
const std::string& drv_msg) const
{
write_drv(VMManagerMessages::SNAPSHOTCREATE, oid, drv_msg);
}
/**
* Sends a snapshot revert request to the MAD:
* "SNAPSHOTREVERT ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void snapshot_revert(
const int oid,
const std::string& drv_msg) const
{
write_drv(VMManagerMessages::SNAPSHOTREVERT, oid, drv_msg);
}
/**
* Sends a snapshot delete request to the MAD:
* "SNAPSHOTDELETE ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void snapshot_delete(
const int oid,
const std::string& drv_msg) const
{
write_drv(VMManagerMessages::SNAPSHOTDELETE, oid, drv_msg);
}
/**
* Sends a disk snapshot create request to the MAD:
* "DISKSNAPSHOTCREATE ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void disk_snapshot_create(
const int oid,
const std::string& drv_msg) const
{
write_drv(VMManagerMessages::DISKSNAPSHOTCREATE, oid, drv_msg);
}
/**
* Sends a disk resize request to the MAD:
* "RESIZE ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void disk_resize(
const int oid,
const std::string& drv_msg) const
{
write_drv(VMManagerMessages::RESIZEDISK, oid, drv_msg);
}
/**
* Sends an updateconf request to the MAD: "UPDATECONF ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void update_conf(
const int oid,
const std::string& drv_msg) const
{
write_drv(VMManagerMessages::UPDATECONF, oid, drv_msg);
}
/**
* Sends a request to update the VM security groups:
* "UPDATESG ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void updatesg(
const int oid,
const std::string& drv_msg) const
{
write_drv(VMManagerMessages::UPDATESG, oid, drv_msg);
}
F #5516: New backup interface for OpenNebula co-authored-by: Frederick Borges <fborges@opennebula.io> co-authored-by: Neal Hansen <nhansen@opennebula.io> co-authored-by: Daniel Clavijo Coca <dclavijo@opennebula.io> co-authored-by: Pavel Czerný <pczerny@opennebula.systems> BACKUP INTERFACE ================= * Backups are exposed through a a special Datastore (BACKUP_DS) and Image (BACKUP) types. These new types can only be used for backup'ing up VMs. This approach allows to: - Implement tier based backup policies (backups made on different locations). - Leverage access control and quota systems - Support differnt storage and backup technologies * Backup interface for the VMs: - VM configures backups with BACKUP_CONFIG. This attribute can be set in the VM template or updated with updateconf API call. It can include: + BACKUP_VOLATILE: To backup or not volatile disks + FS_FREEZE: How the FS is freeze for running VMs (qemu-agent, suspend or none). When possible backups are crash consistent. + KEEP_LAST: keep only a given number of backups. - Backups are initiated by the one.vm.backup API call that requires the target Datastore to perform the backup (one-shot). This is exposed by the onevm backup command. - Backups can be periodic through scheduled actions. - Backup configuration is updated with one.vm.updateconf API call. * Restore interface: - Restores are initiated by the one.image.restore API call. This is exposed by oneimage restore command. - Restore include configurable options for the VM template + NO_IP: to not preserve IP addresses (but keep the NICs and network mapping) + NO_NIC: to not preserve network mappings - Other template attributes: + Clean PCI devices, including network configuration in case of TYPE=NIC attributes. By default it removes SHORT_ADDRESS and leave the "auto" selection attributes. + Clean NUMA_NODE, removes node id and cpu sets. It keeps the NUMA node - It is possible to restore single files stored in the repository by using the backup specific URL. * Sunstone (Ruby version) has been updated to expose this feautres. BACKUP DRIVERS & IMPLEMENTATION =============================== * Backup operation is implemented by a combination of 3 driver operations: - VMM. New (internal oned <-> one_vmm_exec.rb) to orchestrate backups for RUNNING VMs. - TM. This commit introduces 2 new operations (and their corresponding _live variants): + pre_backup(_live): Prepares the disks to be back'ed up in the repository. It is specific to the driver: (i) ceph uses the export operation; (ii) qcow2/raw uses snapshot-create-as and fs_freeze as needed. + post_backup(_live): Performs cleanning operations, i.e. KVM snapshots or tmp dirs. - DATASTORE. Each backup technology is represented by its corresponfing driver, that needs to implement: + backup: it takes the VM disks in file (qcow2) format and stores it the backup repository. + restore: it takes a backup image and restores the associated disks and VM template. + monitor: to gather available space in the repository + rm: to remove existing backups + stat: to return the "restored" size of a disk stored in a backup + downloader pseudo-URL handler: in the form <backup_proto>://<driver_snapshot_id>/<disk filename> BACKUP MANAGEMENT ================= Backup actions may potentially take some time, leaving some vmm_exec threads in use for a long time, stucking other vmm operations. Backups are planned by the scheduler through the sched action interface. Two attributes has been added to sched.conf: * MAX_BACKUPS max active backup operations in the cloud. No more backups will be started beyond this limit. * MAX_BACKUPS_HOST max number of backups per host * Fix onevm CLI to properly show and manage schedule actions. --schedule supports now, as well as relative times +<seconds_from_stime> onvm backup --schedule now -d 100 63 * Backup is added as VM_ADMIN_ACTIONS in oned.conf. Regular users needs to use the batch interface or request specific permissions Internal restructure of Scheduler: - All sched_actions interface is now in SchedActionsXML class and files. This class uses references to VM XML, and MUST be used in the same lifetime scope. - XMLRPC API calls for sched actions has been moved to ScheduledActionXML.cc as static functions. - VirtualMachineActionPool includes counters for active backups (total and per host). SUPPORTED PLATFORMS ==================== * hypervisor: KVM * TM: qcow2/shared/ssh, ceph * backup: restic, rsync Notes on Ceph * Ceph backups are performed in the following steps: 1. A snapshot of each disk is taken (group snapshots cannot be used as it seems we cannot export the disks afterwards) 2. Disks are export to a file 3. File is converted to qcow2 format 4. Disk files are upload to the backup repo TODO: * Confirm crash consistent snapshots cannot be used in Ceph TODO: * Check if using VM dir instead of full path is better to accomodate DS migrations i.e.: - Current path: /var/lib/one/datastores/100/53/backup/disk.0 - Proposal: 53/backup/disk.0 RESTIC DRIVER ============= Developed together with this feature is part of the EE edtion. * It supports the SFTP protocol, the following attributes are supported: - RESTIC_SFTP_SERVER - RESTIC_SFTP_USER: only if different from oneadmin - RESTIC_PASSWORD - RESTIC_IONICE: Run restic under a given ionice priority (class 2) - RESTIC_NICE: Run restic under a given nice - RESTIC_BWLIMIT: Limit restic upload/download BW - RESTIC_COMPRESSION: Restic 0.14 implements compression (three modes: off, auto, max). This requires repositories version 2. By default, auto is used (average compression without to much CPU usage) - RESTIC_CONNECTIONS: Sets the number of concurrent connections to a backend (5 by default). For high-latency backends this number can be increased. * downloader URL: restic://<datastore_id>/<snapshot_id>/<file_name> snapshot_id is the restic snapshot hash. To recover single disk images from a backup. This URLs support: - RESTIC_CONNECTIONS - RESTIC_BWLIMIT - RESTIC_IONICE - RESTIC_NICE These options needs to be defined in the associated datastore. RSYNC DRIVER ============= A rsync driver is included as part of the CE distribution. It uses the rsync tool to store backups in a remote server through SSH: * The following attributes are supported to configure the backup datastore: - RSYNC_HOST - RSYNC_USER - RSYNC_ARGS: Arguments to perform the rsync operatin (-aS by default) * downloader URL: rsync://<ds_id>/<vmid>/<hash>/<file> can be used to recover single files from an existing backup. (RSYNC_HOST and RSYN_USER needs to be set in ds_id EMULATOR_CPUS ============= This commit includes a non related backup feature: * Add EMULATOR_CPUS (KVM). This host (or cluster attribute) defines the CPU IDs where the emulator threads will be pinned. If this value is not defined the allocated CPU wll be used when using a PIN policy. (cherry picked from commit a9e6a8e000e9a5a2f56f80ce622ad9ffc9fa032b) F OpenNebula/one#5516: adding rsync backup driver (cherry picked from commit fb52edf5d009dc02b071063afb97c6519b9e8305) F OpenNebula/one#5516: update install.sh, add vmid to source, some polish Signed-off-by: Neal Hansen <nhansen@opennebula.io> (cherry picked from commit 6fc6f8a67e435f7f92d5c40fdc3d1c825ab5581d) F OpenNebula/one#5516: cleanup Signed-off-by: Neal Hansen <nhansen@opennebula.io> (cherry picked from commit 12f4333b833f23098142cd4762eb9e6c505e1340) F OpenNebula/one#5516: update downloader, default args, size check Signed-off-by: Neal Hansen <nhansen@opennebula.io> (cherry picked from commit 510124ef2780a4e2e8c3d128c9a42945be38a305) LL (cherry picked from commit d4fcd134dc293f2b862086936db4d552792539fa)
2022-09-09 12:46:44 +03:00
/**
* Sends a backup create request to the MAD:
* "BACKUP ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void backup(
const int oid,
const std::string& drv_msg) const
{
write_drv(VMManagerMessages::BACKUP, oid, drv_msg);
}
F #5989: Live update of Virtual Network attributes co-authored-by: Pavel Czerný <pczerny@opennebula.systems> co-authored-by: Frederick Borges <fborges@opennebula.io> co-authored-by: Christian González <cgonzalez@opennebula.io> * VNET updates trigger a driver action on running VMs with NICs in the network. * VNET includes a sets with VM status: updated, outdated, error and updating. With VMs in each state. * VNET flags error situations with a new state UPDATE_FAILURE. * The same procedure is applied when an AR is updated (only VMs in that AR are updated). * A new options in the one.vn.recover API call enable to recover or retry this VM update operations. * The following attributes can be live-updated per VNET driver: - PHYDEV (novlan, vlan, ovs driver) - MTU (vlan, ovs driver) - VLAN_ID (vlan, ovs driver) - QINQ_TYPE (ovs driver) - CVLANS (ovs driver) - VLAN_TAGGED_ID (ovs driver) - OUTER_VLAN_ID (ovs driver) - INBOUND_AVG_BW (SG, ovs driver + KVM) - INBOUND_PEAK_BW (SG, ovs driver + KVM) - INBOUND_PEAK_KB (SG, ovs driver + KVM) - OUTBOUND_AVG_BW (SG, ovs driver + KVM) - OUTBOUND_PEAK_BW (SG, ovs driver + KVM) - OUTBOUND_PEAK_KB (SG, ovs driver + KVM) * New API call one.vm.updatenic, allows to update individual NICs without the need of detach/attach (only QoS supported). * Update operations for: 802.1Q, bridge, fw, ovswitch, ovswitch_vxlan and vxlan network drivers. * VNET attributes (old values) stored in VNET_UPDATE to allow implementation of update operations. The attribute is removed after a successful update. * Updates to CLI onevnet (--retry option) / onevm (nicupdate command) * XSD files updated to reflect the new data model * Ruby and JAVA bindings updated: new VNET state and recover option, new VM API call. * Suntone and Fireedge implementation (lease status, recover option, new states) TODO: Virtual Functions does not support this functionality iii
2022-11-16 15:35:29 +03:00
/**
* Sends a request to update the VM nic:
* "UPDATENIC ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void updatenic(
const int oid,
const std::string& drv_msg) const
{
write_drv(VMManagerMessages::UPDATENIC, oid, drv_msg);
}
/**
*
*/
void write_drv(VMManagerMessages type,
const int oid,
const std::string& msg) const
{
vm_msg_t drv_msg(type, "", oid, msg);
write(drv_msg);
}
};
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
#endif /*VIRTUAL_MACHINE_MANAGER_DRIVER_H_*/