1
0
mirror of https://github.com/OpenNebula/one.git synced 2024-12-22 13:33:52 +03:00

F #5940: Improve PCI Passthrough and SRIOV support

- SHORT_ADDRESS can be used to select specific devices  (useful on
  homogenous clusters or nic attach operations). This name has been
  selected because of:
    1. It is the attrbute shown host info
    2. It does not collide with the reserved ADDRESS attribute

- New test and add functions that considers both allocation methods: by
  name (VENDOR/CLASS/DEVICE) or address (SHORT_ADDRESS)

- Parameter check on VM creation

- revert and add method use the same pci_attribute function to add info
  to the VM PCI attribute

- Remove well-known attributes when parsing PCI devices (ADDRESS,
  PREV_ADDRES, BUS, FUNCTION, SLOT, NUMA_NODE, UUID)

- Support for attach and detach NIC with PCI attributes
    * onevm_exec.rb looks for PCI devices for ATTACH=YES when attaching/detaching an interface
    * script action are now written in Ruby
    * KVM module with common actions (hostdev/interface device str)
    * Minor changes in xmlparser and OpenNebulaVM classes

- PCI selection options to onevm nic-attach:
    * pci short_address
    * pci_device device ID
    * pci_vendor vendor ID
    * pci_class class ID

- VF can be configured by setting some parameters through IP link (e.g.
  MAC or VLAN_ID). This commit includes a mixin to activate_vf.

    * one_vmm_exec.rb looks for PCI VN_MAD drivers
    * VM class (VNM) adds a @pcis array
    * activate_vf should be called in the pre stage. The following drivers
      acticate VFs (VLAN_ID is implemented as 802.1Q tag)
            - 802.1Q
            - bridge
            - fw
            - ovswitch

- Improve integration with Libvirt/QEMU:

    * When attach, only activate the VF being attached
    * Attach: Use <interface> and not <hostdev> for VF. There seems to be a race
      condition between accessing the vfio device and permission setup.
    * Attach: Remove address on attach as it may fail beacuse PCI controller
      is not present, e.g.:

        ATTACHNIC: Could not attach NIC to 28534240: error: Failed to attach device
        from /dev/fd/63 error: XML error: Invalid PCI address 0000:01:01.0.
        Only PCI buses up to 0 are available ExitCode: 1

    * Detach: Detach always use <hostdev> as libvirt fails to identified the
      device just by address when using <interface>
This commit is contained in:
Ruben S. Montero 2022-08-11 01:46:57 +02:00
parent f3f28e3227
commit 907f26050f
No known key found for this signature in database
GPG Key ID: A0CEA6FA880A1D87
29 changed files with 1218 additions and 301 deletions

View File

@ -251,6 +251,11 @@ public:
}
};
bool add_pci(HostShareCapacity &sr)
{
return host_share.add_pci(sr);
}
/**
* Deletes a new VM to the host share by incrementing usage counters
* @param sr the capacity request of the VM
@ -271,6 +276,11 @@ public:
}
};
void del_pci(HostShareCapacity& sr)
{
host_share.del_pci(sr);
}
/**
* Revert changes in PCI Devices after migrate failure
* @param sr host share capacity info

View File

@ -65,12 +65,23 @@ public:
*/
void add(HostShareCapacity &sr);
bool add_pci(HostShareCapacity &sr)
{
// NOTE THIS FUNCTION DOES NOT PERFORM ANY ROLLBACK
return pci.add(sr.pci, sr.vmid);
}
/**
* Delete VM capacity from this share
* @param sr requested capacity by the VM
*/
void del(HostShareCapacity &sr);
void del_pci(HostShareCapacity &sr)
{
pci.del(sr.pci, sr.vmid);
}
/**
* Revert changes in PCI Devices
* @param sr capacity info by the VM

View File

@ -78,8 +78,12 @@ public:
* @param devs list of requested PCI devices, will include address of
* assigned devices.
* @param vmid of the VM
*
* @return true if the devices where added
*
* NOTE THIS FUNCTION DOES NOT PERFORM ANY ROLLBACK
*/
void add(std::vector<VectorAttribute *> &devs, int vmid);
bool add(std::vector<VectorAttribute *> &devs, int vmid);
/**
* Remove the VM assignment from the PCI device list
@ -87,7 +91,8 @@ public:
void del(const std::vector<VectorAttribute *> &devs, int vmid);
/**
* Revert the VM assignment from the PCI device list
* Revert the VM assignment from the PCI device list. It copies
* back the attributes from the previous PCI device
*/
void revert(std::vector<VectorAttribute *> &devs);
@ -125,19 +130,21 @@ public:
* - VM_SLOT: PCI_ID + 1
* - VM_FUNCTION: 0
* - VM_ADDRESS: BUS:SLOT.0
*
* Cleans internal attributes:
* - NUMA_NODE
* - UUID
* - BUS, SLOT, FUNCITION
* - ADDRESS, PREV_ADDRESS
* @param pci_device to set the address in
* @param default_bus if not set in PCI attribute (PCI_PASSTHROUGH_BUS
* in oned.conf)
* @return -1 if wrong bus 0 on success
*/
static int set_pci_address(VectorAttribute * pci_device, const std::string& dbus);
static int set_pci_address(VectorAttribute * pci_device, const std::string& dbus,
bool clean);
private:
/**
* Sets the internal class structures from the template
*/
void init();
/**
* Internal structure to represent PCI devices for fast look up and
* update
@ -162,6 +169,58 @@ private:
};
std::map<std::string, PCIDevice *> pci_devices;
/**
* Sets the internal class structures from the template
*/
void init();
/**
* Test if there is a suitable PCI device for the VM request. The test
* is done using the VENDOR/DEVICE/CLASS attributes
*
* @param dev VM attribute that represents the decive request
* @param addrs PCI addresses that should be considered in use
*
* @return true if the device can be allocated to this host
*/
bool test_by_name(const VectorAttribute *dev, std::set<std::string>& addrs) const;
/**
* Test if there is a suitable PCI device for the VM request. The test
* is done using the a specific address
*
* @param device VM attribute that represents the decive request
* @param addr the requested address
*
* @return PCI_ID of the tested device or -1 if no PCI found
*/
bool test_by_addr(const VectorAttribute *dev, const std::string& addr) const;
/**
* Allocates the given VM device using the VENDOR/DEVICE/CLASS attributes
* @param device VM attribute that represents the decive request
* @param vmid of the VM
*/
bool add_by_name(VectorAttribute *device, int vmid);
/**
* Allocates the given VM device using the SHORT_ADDRESS attribute
* @param device VM attribute that represents the decive request
* @param vmid of the VM
*
* @return pci_id of the allocated device or -1 if not allocated
*/
bool add_by_addr(VectorAttribute *device, const std::string& addr, int vmid);
/**
* Adds PCI attributes of the selected PCI to the VM PCI device
*
* @param device VM attribute
* @param pci Host device
* @param sp if true set the "PREVIOUS_ADDRESS" attribute
*/
void pci_attribute(VectorAttribute *device, PCIDevice *pci, bool sp);
};
#endif /*HOST_SHARE_PCI_H_*/

View File

@ -121,6 +121,14 @@ public:
return name() == "NIC_ALIAS";
}
/**
* Check is a nic is a PCI
*/
bool is_pci() const
{
return name() == "PCI";
}
/*
* Set nic NAME attribute if not empty, defaults to NAME = NIC${NIC_ID}
*/

View File

@ -680,6 +680,7 @@ INSTALL_FILES=(
VMM_EXEC_LIB_NSX_FILES:$LIB_LOCATION/ruby/nsx_driver
VMM_EXEC_LIB:$VAR_LOCATION/remotes/vmm/lib
VMM_EXEC_KVM_SCRIPTS:$VAR_LOCATION/remotes/vmm/kvm
VMM_EXEC_KVM_LIB:$VAR_LOCATION/remotes/vmm/kvm
VMM_EXEC_LXD_SCRIPTS:$VAR_LOCATION/remotes/vmm/lxd
VMM_EXEC_LXD_LIB:$VAR_LOCATION/remotes/vmm/lxd
VMM_EXEC_LXC_SCRIPTS:$VAR_LOCATION/remotes/vmm/lxc
@ -1277,6 +1278,8 @@ VMM_EXEC_KVM_SCRIPTS="src/vmm_mad/remotes/kvm/cancel \
src/vmm_mad/remotes/kvm/resize \
src/vmm_mad/remotes/kvm/resize_disk"
VMM_EXEC_KVM_LIB="src/vmm_mad/remotes/lib/kvm/opennebula_vm.rb"
#-------------------------------------------------------------------------------
# VMM configuration KVM scripts, to be installed under $REMOTES_LOCATION/etc/vmm/kvm
#-------------------------------------------------------------------------------
@ -1730,6 +1733,7 @@ NETWORK_FILES="src/vnm_mad/remotes/lib/vnm_driver.rb \
src/vnm_mad/remotes/lib/address.rb \
src/vnm_mad/remotes/lib/command.rb \
src/vnm_mad/remotes/lib/vm.rb \
src/vnm_mad/remotes/lib/vf.rb \
src/vnm_mad/remotes/lib/vlan.rb \
src/vnm_mad/remotes/lib/no_vlan.rb \
src/vnm_mad/remotes/lib/security_groups.rb \

View File

@ -249,6 +249,34 @@ CommandParser::CmdParser.new(ARGV) do
:description => 'VNC client to use'
}
PCI = {
:name => 'pci',
:large => '--pci short_address',
:format => String,
:description => 'Select PCI device by its short address'
}
PCI_DEVICE = {
:name => 'pci_device',
:large => '--pci_device device ID',
:format => String,
:description => 'Select PCI device by its device ID'
}
PCI_VENDOR = {
:name => 'pci_vendor',
:large => '--pci_vendor vendor ID',
:format => String,
:description => 'Select PCI device by its vendor ID'
}
PCI_CLASS = {
:name => 'pci_class',
:large => '--pci_class class ID',
:format => String,
:description => 'Select PCI device by its class ID'
}
OpenNebulaHelper::TEMPLATE_OPTIONS_VM.delete_if do |v|
%w[as_gid as_uid].include?(v[:name])
end
@ -853,8 +881,11 @@ CommandParser::CmdParser.new(ARGV) do
end
nic_attach_desc = <<-EOT.unindent
Attaches a NIC to a running VM. When using --file add only one
NIC instance.
Attaches a NIC to a VM. When using --file add only one NIC instance.
To hotplug a PCI device and use it as a NIC interface in the VM select
it with --pci (short_address) or --pci_device (device ID),
--pci_class (class ID) and/or --pci_vendor (vendor ID).
To attach a nic alias, use --file or --alias option.
@ -866,7 +897,11 @@ CommandParser::CmdParser.new(ARGV) do
OneVMHelper::NETWORK,
OneVMHelper::IP,
OneVMHelper::ALIAS,
OneVMHelper::NIC_NAME] do
OneVMHelper::NIC_NAME,
PCI,
PCI_CLASS,
PCI_VENDOR,
PCI_DEVICE] do
if options[:file].nil? && options[:network].nil?
STDERR.puts 'Provide a template file or a network:'
STDERR.puts "\t--file <file>"
@ -878,36 +913,36 @@ CommandParser::CmdParser.new(ARGV) do
template = File.read(options[:file])
else
network_id = options[:network]
ip = options[:ip]
nic_alias = options[:alias]
nic_name = options[:nic_name]
ip = options[:ip]
nic_alias = options[:alias]
nic_name = options[:nic_name]
if ip
if !nic_alias && !nic_name
template = "NIC = [ NETWORK_ID = #{network_id}, \
IP = #{ip} ]"
elsif !nic_alias && nic_name
template = "NIC = [ NETWORK_ID = #{network_id},
IP = #{ip},
NAME = #{nic_name} ]"
else
template = "NIC_ALIAS = \
[ NETWORK_ID = #{network_id},\
IP = #{ip},\
PARENT = #{nic_alias} ]"
end
is_pci = [:pci, :pci_device, :pci_vendor, :pci_class].any? { |o|
!options[o].nil?
}
if is_pci
pcia = options[:pci]
pcid = options[:pci_device]
pcic = options[:pci_class]
pciv = options[:pci_vendor]
template = "PCI = [ TYPE = NIC"
template << ", NETWORK_ID = #{network_id}"
template << ", SHORT_ADDRESS = \"#{pcia}\"" if pcia
template << ", DEVICE = \"#{pcid}\"" if pcid
template << ", CLASS = \"#{pcic}\"" if pcic
template << ", VENDOR = \"#{pciv}\"" if pciv
elsif nic_alias
template = "NIC_ALIAS = [ PARENT = #{nic_alias}"
template << ", NETWORK_ID = #{network_id}"
else
if !nic_alias && !nic_name
template = "NIC = [ NETWORK_ID = #{network_id} ]"
elsif !nic_alias && nic_name
template = "NIC = [ NETWORK_ID = #{network_id},
NAME = #{nic_name} ]"
else
template = "NIC_ALIAS = \
[ NETWORK_ID = #{network_id},\
PARENT = #{nic_alias} ]"
end
template = "NIC = [ NETWORK_ID = #{network_id}"
end
template << ", IP = #{ip}" if ip
template << ", NAME = #{nic_name}" if nic_name
template << "]"
end
helper.perform_action(args[0], options, 'Attaching NIC') do |vm|

View File

@ -326,22 +326,18 @@ bool HostShare::test_compute(int cpu, long long mem, std::string &error) const
bool HostShare::test_pci(vector<VectorAttribute *>& pci_devs, string& error) const
{
bool fits = pci.test(pci_devs);
error = "Unavailable PCI device.";
return fits;
return pci.test(pci_devs);
}
/* -------------------------------------------------------------------------- */
bool HostShare::test_numa(HostShareCapacity &sr, string& error) const
{
bool fits = numa.test(sr);
error = "Cannot allocate NUMA topology";
return fits;
return numa.test(sr);
}
/* -------------------------------------------------------------------------- */

View File

@ -20,6 +20,7 @@
#include <sstream>
#include <stdexcept>
#include <iomanip>
#include <set>
#include <math.h>
@ -75,47 +76,106 @@ void HostSharePCI::init()
}
}
/* ------------------------------------------------------------------------*/
/* ------------------------------------------------------------------------*/
/* -------------------------------------------------------------------------- */
/* Function to test PCI availability at the host */
/* -------------------------------------------------------------------------- */
bool HostSharePCI::test(const vector<VectorAttribute *> &devs) const
bool HostSharePCI::test_by_addr(const VectorAttribute *dev, const string& short_addr) const
{
std::set<string> assigned;
unsigned int vendor_id, device_id, class_id;
int vendor_rc, device_rc, class_rc;
bool found;
for (auto device : devs)
for (auto jt = pci_devices.begin(); jt != pci_devices.end(); jt++)
{
vendor_rc = get_pci_value("VENDOR", device, vendor_id);
device_rc = get_pci_value("DEVICE", device, device_id);
class_rc = get_pci_value("CLASS" , device, class_id);
PCIDevice * pci = jt->second;
if (vendor_rc <= 0 && device_rc <= 0 && class_rc <= 0)
if (pci->attrs->vector_value("SHORT_ADDRESS") != short_addr)
{
continue;
}
if (pci->vmid != -1)
{
return false;
}
found = false;
for (auto jt = pci_devices.begin(); jt != pci_devices.end(); jt++)
return true;
}
return false;
}
/* -------------------------------------------------------------------------- */
bool HostSharePCI::test_by_name(const VectorAttribute *device, std::set<string>& assigned) const
{
unsigned int vendor_id, device_id, class_id;
int vendor_rc = get_pci_value("VENDOR", device, vendor_id);
int device_rc = get_pci_value("DEVICE", device, device_id);
int class_rc = get_pci_value("CLASS" , device, class_id);
if (vendor_rc <= 0 && device_rc <= 0 && class_rc <= 0)
{
return false;
}
for (auto jt = pci_devices.begin(); jt != pci_devices.end(); jt++)
{
PCIDevice * pci = jt->second;
string short_addr = pci->attrs->vector_value("SHORT_ADDRESS");
if ((class_rc == 0 || pci->class_id == class_id) &&
(vendor_rc == 0 || pci->vendor_id == vendor_id) &&
(device_rc == 0 || pci->device_id == device_id) &&
pci->vmid == -1 &&
assigned.find(short_addr) == assigned.end())
{
PCIDevice * dev = jt->second;
assigned.insert(short_addr);
if ((class_rc == 0 || dev->class_id == class_id) &&
(vendor_rc == 0 || dev->vendor_id == vendor_id) &&
(device_rc == 0 || dev->device_id == device_id) &&
dev->vmid == -1 &&
assigned.find(dev->address) == assigned.end())
{
assigned.insert(dev->address);
found = true;
return true;
}
}
break;
}
return false;
}
/* -------------------------------------------------------------------------- */
bool HostSharePCI::test(const vector<VectorAttribute *> &devs) const
{
std::set<string> assigned;
std::set<const VectorAttribute *> tested;
// Test for "SHORT_ADDRESS" PCI selectio
// and pre-allocated these first
for (const auto& device : devs)
{
string short_addr = device->vector_value("SHORT_ADDRESS");
if (short_addr.empty())
{
continue;
}
if (!found)
if (!test_by_addr(device, short_addr))
{
return false;
}
tested.insert(device);
assigned.insert(short_addr);
}
// Test for "VENDOR/DEVICE/CLASS" PCI selection
// use any remaining free device
for (const auto& device : devs)
{
if (tested.find(device) != tested.end())
{
continue;
}
if (!test_by_name(device, assigned))
{
return false;
}
@ -124,65 +184,155 @@ bool HostSharePCI::test(const vector<VectorAttribute *> &devs) const
return true;
}
/* ------------------------------------------------------------------------*/
/* ------------------------------------------------------------------------*/
void HostSharePCI::add(vector<VectorAttribute *> &devs, int vmid)
/* -------------------------------------------------------------------------- */
/* Function to assign host PCI devices to a VM */
/* -------------------------------------------------------------------------- */
void HostSharePCI::pci_attribute(VectorAttribute *device, PCIDevice *pci,
bool set_prev)
{
unsigned int vendor_id, device_id, class_id;
string address, uuid;
int vendor_rc, device_rc, class_rc, addr_rc;
static vector<string> cp_attr = {"DOMAIN", "BUS", "SLOT", "FUNCTION",
"ADDRESS", "SHORT_ADDRESS"};
for (auto device : devs)
static vector<string> cp_check_attr = {"NUMA_NODE", "UUID"};
//Save previous address for migrations, clear on revert - failed migration
if (set_prev)
{
vendor_rc = get_pci_value("VENDOR", device, vendor_id);
device_rc = get_pci_value("DEVICE", device, device_id);
class_rc = get_pci_value("CLASS" , device, class_id);
string address = device->vector_value("ADDRESS");
addr_rc = device->vector_value("ADDRESS", address);
for (auto jt = pci_devices.begin(); jt != pci_devices.end(); jt++)
if (!address.empty())
{
PCIDevice * dev = jt->second;
if ((class_rc == 0 || dev->class_id == class_id) &&
(vendor_rc == 0 || dev->vendor_id == vendor_id) &&
(device_rc == 0 || dev->device_id == device_id) &&
dev->vmid == -1 )
{
int node = -1;
dev->vmid = vmid;
dev->attrs->replace("VMID", vmid);
device->replace("DOMAIN", dev->attrs->vector_value("DOMAIN"));
device->replace("BUS", dev->attrs->vector_value("BUS"));
device->replace("SLOT", dev->attrs->vector_value("SLOT"));
device->replace("FUNCTION",dev->attrs->vector_value("FUNCTION"));
device->replace("ADDRESS", dev->attrs->vector_value("ADDRESS"));
if (addr_rc != -1 && !address.empty())
{
device->replace("PREV_ADDRESS", address);
}
if (dev->attrs->vector_value("NUMA_NODE", node)==0 && node !=-1)
{
device->replace("NUMA_NODE", node);
}
uuid = dev->attrs->vector_value("UUID");
if ( !uuid.empty() )
{
device->replace("UUID", uuid);
}
break;
}
device->replace("PREV_ADDRESS", address);
}
}
else
{
device->remove("PREV_ADDRESS");
}
//Set PCI device attributes
for (const auto& attr : cp_attr)
{
device->replace(attr, pci->attrs->vector_value(attr));
}
//Set Optional PCI attributes
for (const auto& attr : cp_check_attr)
{
string vvalue = pci->attrs->vector_value(attr);
if (!vvalue.empty())
{
device->replace(attr, vvalue);
}
}
}
/* -------------------------------------------------------------------------- */
bool HostSharePCI::add_by_addr(VectorAttribute *device, const string& short_addr,
int vmid)
{
for (auto jt = pci_devices.begin(); jt != pci_devices.end(); jt++)
{
PCIDevice * pci = jt->second;
if (pci->attrs->vector_value("SHORT_ADDRESS") != short_addr)
{
continue;
}
if ( pci->vmid != -1 )
{
return false;
}
pci->vmid = vmid;
pci->attrs->replace("VMID", vmid);
pci_attribute(device, pci, true);
return true;
}
return false;
}
/* -------------------------------------------------------------------------- */
bool HostSharePCI::add_by_name(VectorAttribute *device, int vmid)
{
unsigned int vendor_id, device_id, class_id;
int vendor_rc = get_pci_value("VENDOR", device, vendor_id);
int device_rc = get_pci_value("DEVICE", device, device_id);
int class_rc = get_pci_value("CLASS" , device, class_id);
if (vendor_rc <= 0 && device_rc <= 0 && class_rc <= 0)
{
return false;
}
for (auto jt = pci_devices.begin(); jt != pci_devices.end(); jt++)
{
PCIDevice * pci = jt->second;
if ((class_rc == 0 || pci->class_id == class_id) &&
(vendor_rc == 0 || pci->vendor_id == vendor_id) &&
(device_rc == 0 || pci->device_id == device_id) &&
pci->vmid == -1 )
{
pci->vmid = vmid;
pci->attrs->replace("VMID", vmid);
pci_attribute(device, pci, true);
return true;
}
}
return false;
}
/* -------------------------------------------------------------------------- */
bool HostSharePCI::add(vector<VectorAttribute *> &devs, int vmid)
{
std::set<VectorAttribute *> added;
for (auto& device : devs)
{
string short_addr = device->vector_value("SHORT_ADDRESS");
if (short_addr.empty())
{
continue;
}
if (!add_by_addr(device, short_addr, vmid))
{
return false;
}
added.insert(device);
}
for (auto& device : devs)
{
if (added.find(device) != added.end())
{
continue;
}
if (!add_by_name(device, vmid))
{
return false;
}
}
return true;
}
/* ------------------------------------------------------------------------*/
@ -228,30 +378,19 @@ void HostSharePCI::revert(vector<VectorAttribute *> &devs)
{
device->vector_value("PREV_ADDRESS", address);
if (!address.empty())
if (address.empty())
{
auto dev = pci_devices[address];
if (!dev)
{
continue;
}
device->replace("DOMAIN", dev->attrs->vector_value("DOMAIN"));
device->replace("BUS", dev->attrs->vector_value("BUS"));
device->replace("SLOT", dev->attrs->vector_value("SLOT"));
device->replace("FUNCTION",dev->attrs->vector_value("FUNCTION"));
device->replace("ADDRESS", address);
device->remove("PREV_ADDRESS");
int node = -1;
if (dev->attrs->vector_value("NUMA_NODE", node)==0 && node !=-1)
{
device->replace("NUMA_NODE", node);
}
break;
continue;
}
auto pci = pci_devices[address];
if (!pci)
{
continue;
}
pci_attribute(device, pci, false);
}
}
@ -366,13 +505,25 @@ int HostSharePCI::get_pci_value(const char * name,
/* ------------------------------------------------------------------------*/
int HostSharePCI::set_pci_address(VectorAttribute * pci_device,
const string& dbus)
const string& dbus, bool clean)
{
string bus;
ostringstream oss;
unsigned int ibus, slot;
// ------------------- Remove well-known attributes -----------------------
static vector<string> rm_attr = {"DOMAIN", "BUS", "SLOT", "FUNCTION",
"ADDRESS", "PREV_ADDRESS", "NUMA_NODE", "UUID"};
if (clean)
{
for (const auto& attr : rm_attr)
{
pci_device->remove(attr);
}
}
// ------------------- DOMAIN & FUNCTION -------------------------
pci_device->replace("VM_DOMAIN", "0x0000");
pci_device->replace("VM_FUNCTION", "0");

View File

@ -2534,12 +2534,14 @@ Request::ErrorCode VirtualMachineAttachNic::request_execute(int id,
{
Nebula& nd = Nebula::instance();
DispatchManager * dm = nd.get_dm();
HostPool * hpool = nd.get_hpool();
DispatchManager* dm = nd.get_dm();
VirtualMachinePool* vmpool = nd.get_vmpool();
PoolObjectAuth vm_perms;
PoolObjectAuth vm_perms;
int rc;
int hid = -1;
// -------------------------------------------------------------------------
// Authorize the operation, restricted attributes & check quotas
@ -2547,6 +2549,11 @@ Request::ErrorCode VirtualMachineAttachNic::request_execute(int id,
if (auto vm = vmpool->get_ro(id))
{
vm->get_permissions(vm_perms);
if (vm->hasHistory())
{
hid = vm->get_hid();
}
}
else
{
@ -2586,14 +2593,67 @@ Request::ErrorCode VirtualMachineAttachNic::request_execute(int id,
return AUTHORIZATION;
}
// -------------------------------------------------------------------------
// PCI test and set
// -------------------------------------------------------------------------
VectorAttribute * pci = tmpl.get("PCI");
HostShareCapacity sr;
if ( pci != nullptr && hid != -1 )
{
if ( pci->vector_value("TYPE") != "NIC" )
{
att.resp_msg = "PCI device is not of type NIC";
quota_rollback(&tmpl, Quotas::NETWORK, att_quota);
return ACTION;
}
sr.vmid = id;
sr.pci.push_back(pci);
auto host = hpool->get(hid);
if ( host == nullptr )
{
att.resp_id = id;
att.resp_obj = PoolObjectSQL::HOST;
quota_rollback(&tmpl, Quotas::NETWORK, att_quota);
return NO_EXISTS;
}
if (!host->add_pci(sr))
{
att.resp_msg = "Cannot assign PCI device in host. Check address "
"and free devices";
quota_rollback(&tmpl, Quotas::NETWORK, att_quota);
return ACTION;
}
hpool->update(host.get());
}
// -------------------------------------------------------------------------
// Perform the attach
// -------------------------------------------------------------------------
rc = dm->attach_nic(id, &tmpl, att, att.resp_msg);
int rc = dm->attach_nic(id, &tmpl, att, att.resp_msg);
if ( rc != 0 )
{
quota_rollback(&tmpl, Quotas::NETWORK, att_quota);
if ( pci != nullptr && hid != -1 )
{
if (auto host = hpool->get(hid))
{
host->del_pci(sr);
hpool->update(host.get());
}
}
return ACTION;
}
@ -2606,8 +2666,8 @@ Request::ErrorCode VirtualMachineAttachNic::request_execute(int id,
void VirtualMachineDetachNic::request_execute(
xmlrpc_c::paramList const& paramList, RequestAttributes& att)
{
int id = xmlrpc_c::value_int(paramList.getInt(1));
int nic_id = xmlrpc_c::value_int(paramList.getInt(2));
int id = xmlrpc_c::value_int(paramList.getInt(1));
int nic_id = xmlrpc_c::value_int(paramList.getInt(2));
// -------------------------------------------------------------------------
// Check if the VM is a Virtual Router

View File

@ -3375,23 +3375,74 @@ int VirtualMachine::get_network_leases(string& estr)
int VirtualMachine::set_up_attach_nic(VirtualMachineTemplate * tmpl, string& err)
{
bool is_pci = false;
// -------------------------------------------------------------------------
// Get the new NIC attribute from the template
// -------------------------------------------------------------------------
VectorAttribute * new_nic = tmpl->get("NIC");
if ( new_nic == 0 )
if (new_nic == nullptr)
{
new_nic = tmpl->get("NIC_ALIAS");
if ( new_nic == 0 )
if (new_nic == nullptr)
{
err = "Wrong format or missing NIC/NIC_ALIAS attribute";
return -1;
new_nic = tmpl->get("PCI");
if ( new_nic != nullptr && new_nic->vector_value("TYPE") != "NIC" )
{
new_nic = nullptr;
}
is_pci = true;
}
}
new_nic = new_nic->clone();
if ( new_nic == nullptr )
{
err = "Wrong format or missing NIC/NIC_ALIAS/PCI attribute";
return -1;
}
// -------------------------------------------------------------------------
// Setup PCI attribute
// -------------------------------------------------------------------------
std::unique_ptr<VectorAttribute> _new_nic(new_nic->clone());
if ( is_pci )
{
Nebula& nd = Nebula::instance();
string default_bus;
std::vector<const VectorAttribute*> pcis;
int max_pci_id = -1;
obj_template->get("PCI", pcis);
for (const auto& pci: pcis)
{
int pci_id;
pci->vector_value("PCI_ID", pci_id, -1);
if (pci_id > max_pci_id)
{
max_pci_id = pci_id;
}
}
_new_nic->replace("PCI_ID", max_pci_id + 1);
nd.get_configuration_attribute("PCI_PASSTHROUGH_BUS", default_bus);
if ( HostSharePCI::set_pci_address(_new_nic.get(), default_bus, false) != 0 )
{
err = "Wrong BUS in PCI attribute";
return -1;
}
}
// -------------------------------------------------------------------------
// Setup nic for attachment
@ -3400,12 +3451,11 @@ int VirtualMachine::set_up_attach_nic(VirtualMachineTemplate * tmpl, string& err
VectorAttribute * nic_default = obj_template->get("NIC_DEFAULT");
int rc = nics.set_up_attach_nic(oid, uid, get_cid(), new_nic, nic_default,
sgs, err);
int rc = nics.set_up_attach_nic(oid, uid, get_cid(), _new_nic.get(),
nic_default, sgs, err);
if ( rc != 0 )
{
delete new_nic;
return -1;
}
@ -3414,7 +3464,7 @@ int VirtualMachine::set_up_attach_nic(VirtualMachineTemplate * tmpl, string& err
// -------------------------------------------------------------------------
set_vm_info();
obj_template->set(new_nic);
obj_template->set(_new_nic.release());
for (auto vattr : sgs)
{

View File

@ -316,22 +316,19 @@ int VirtualMachine::parse_vrouter(string& error_str, Template * tmpl)
/* -------------------------------------------------------------------------- */
static int check_pci_attributes(VectorAttribute * pci, const string& default_bus,
string& error_str)
string& error_str)
{
static string attrs[] = {"VENDOR", "DEVICE", "CLASS"};
static int num_attrs = 3;
string bus;
static std::vector<std::string> attrs = {"VENDOR", "DEVICE", "CLASS"};
bool found = false;
for (int i = 0; i < num_attrs; i++)
for (const auto& attr: attrs)
{
unsigned int val;
int rc = HostSharePCI::get_pci_value(attrs[i].c_str(), pci, val);
int rc = HostSharePCI::get_pci_value(attr.c_str(), pci, val);
if (rc == -1)
{
error_str = "Wrong Hex value for PCI attribute " + attrs[i];
error_str = "Wrong Hex value for PCI attribute " + attr;
return -1;
}
else if ( rc != 0 )
@ -340,13 +337,22 @@ static int check_pci_attributes(VectorAttribute * pci, const string& default_bus
}
}
if (!found)
string saddr;
pci->vector_value("SHORT_ADDRESS", saddr);
if (saddr.empty() && !found)
{
error_str = "DEVICE, VENDOR or CLASS must be defined for PCI.";
error_str = "SHORT_ADDRESS, DEVICE, VENDOR or CLASS must be defined for PCI.";
return -1;
}
else if (!saddr.empty() && found)
{
error_str = "SHORT_ADDRESS cannot be set with DEVICE, VENDOR or CLASS";
return -1;
}
if ( HostSharePCI::set_pci_address(pci, default_bus) != 0 )
if ( HostSharePCI::set_pci_address(pci, default_bus, true) != 0 )
{
error_str = "Wrong BUS in PCI attribute";
return -1;
@ -375,7 +381,7 @@ int VirtualMachine::parse_pci(string& error_str, Template * tmpl)
nd.get_configuration_attribute("PCI_PASSTHROUGH_BUS", default_bus);
for (auto attr : array_pci)
for (auto& attr : array_pci)
{
if ( check_pci_attributes(attr, default_bus, error_str) != 0 )
{

View File

@ -21,6 +21,7 @@
#include "HookStateVM.h"
#include "HookManager.h"
#include "ImageManager.h"
#include "HostPool.h"
#include <sstream>
@ -1061,19 +1062,13 @@ void VirtualMachinePool::delete_attach_disk(std::unique_ptr<VirtualMachine> vm)
void VirtualMachinePool::delete_attach_nic(std::unique_ptr<VirtualMachine> vm)
{
VirtualMachineNic * nic, * p_nic;
int uid;
int gid;
int oid;
set<int> pre, post;
Template tmpl;
vm->get_security_groups(pre);
nic = vm->delete_attach_nic();
VirtualMachineNic * nic = vm->delete_attach_nic();
if ( nic == nullptr )
{
@ -1082,7 +1077,10 @@ void VirtualMachinePool::delete_attach_nic(std::unique_ptr<VirtualMachine> vm)
return;
}
int nic_id = nic->get_nic_id();
int hid = vm->get_hid();
int vmid = vm->get_oid();
int nic_id = nic->get_nic_id();
if (!nic->is_alias())
{
@ -1097,7 +1095,7 @@ void VirtualMachinePool::delete_attach_nic(std::unique_ptr<VirtualMachine> vm)
vm->clear_nic_alias_context(parent_id, alias_id);
p_nic = vm->get_nic(parent_id);
VirtualMachineNic * p_nic = vm->get_nic(parent_id);
// As NIC is an alias, parent ALIAS_IDS array should be updated
// to remove the alias_id
@ -1110,9 +1108,9 @@ void VirtualMachinePool::delete_attach_nic(std::unique_ptr<VirtualMachine> vm)
p_nic->replace("ALIAS_IDS", one_util::join(p_a_ids, ','));
}
uid = vm->get_uid();
gid = vm->get_gid();
oid = vm->get_oid();
int uid = vm->get_uid();
int gid = vm->get_gid();
int oid = vm->get_oid();
vm->get_security_groups(post);
@ -1151,6 +1149,24 @@ void VirtualMachinePool::delete_attach_nic(std::unique_ptr<VirtualMachine> vm)
vm.reset();
//Check if PCI and delete capacity from host
if (nic->is_pci() && hid != -1)
{
HostPool * hpool = Nebula::instance().get_hpool();
HostShareCapacity sr;
sr.vmid = vmid;
sr.pci.push_back(nic->vector_attribute());
if (auto host = hpool->get(hid))
{
host->del_pci(sr);
hpool->update(host.get());
}
}
//Adjust quotas
tmpl.set(nic->vector_attribute());
Quotas::quota_del(Quotas::NETWORK, uid, gid, &tmpl);

View File

@ -211,7 +211,7 @@ class VmmAction
end
end
%w[NIC NIC_ALIAS].each do |r|
%w[NIC NIC_ALIAS PCI].each do |r|
vm_template_xml.elements.each("TEMPLATE/#{r}") do |element|
vn_mad = element.get_text('VN_MAD').to_s
@ -979,6 +979,8 @@ class ExecDriver < VirtualMachineDriver
if xml_data.elements["VM/TEMPLATE/NIC[ATTACH='YES']"]
base_tmpl = "VM/TEMPLATE/NIC[ATTACH='YES']"
elsif xml_data.elements["VM/TEMPLATE/PCI[ATTACH='YES']"]
base_tmpl = "VM/TEMPLATE/PCI[ATTACH='YES']"
else
base_tmpl = "VM/TEMPLATE/NIC_ALIAS[ATTACH='YES']"
nic_alias = true
@ -1104,6 +1106,8 @@ class ExecDriver < VirtualMachineDriver
if xml_data.elements["VM/TEMPLATE/NIC[ATTACH='YES']"]
base_tmpl = "VM/TEMPLATE/NIC[ATTACH='YES']"
elsif xml_data.elements["VM/TEMPLATE/PCI[ATTACH='YES']"]
base_tmpl = "VM/TEMPLATE/PCI[ATTACH='YES']"
else
base_tmpl = "VM/TEMPLATE/NIC_ALIAS[ATTACH='YES']"
nic_alias = true

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2022, OpenNebula Project, OpenNebula Systems #
@ -15,93 +15,75 @@
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
ONE_LOCATION = ENV['ONE_LOCATION']
source $(dirname $0)/../../etc/vmm/kvm/kvmrc
source $(dirname $0)/../../scripts_common.sh
DOMAIN=$1
get_nic_information "ATTACH='YES'"
# defaults
MODEL=${MODEL:-${DEFAULT_ATTACH_NIC_MODEL}}
FILTER=${FILTER:-${DEFAULT_ATTACH_NIC_FILTER}}
# interface XML
DEVICE=''
if [ -z "${BRIDGE}" ]; then
DEVICE+="<interface type='ethernet'>"
if !ONE_LOCATION
RUBY_LIB_LOCATION = '/usr/lib/one/ruby'
GEMS_LOCATION = '/usr/share/one/gems'
VMDIR = '/var/lib/one'
CONFIG_FILE = '/var/lib/one/config'
else
DEVICE+="<interface type='bridge'>"
if [[ "${BRIDGE_TYPE}" = openvswitch* ]]; then
DEVICE+=" <virtualport type='openvswitch'/>"
fi
DEVICE+=" <source bridge='$(xml_esc "${BRIDGE}")'/>"
fi
RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby'
GEMS_LOCATION = ONE_LOCATION + '/share/gems'
VMDIR = ONE_LOCATION + '/var'
CONFIG_FILE = ONE_LOCATION + '/var/config'
end
[ -n "${MAC}" ] && DEVICE+=" <mac address='$(xml_esc "${MAC}")'/>"
[ -n "${NIC_TARGET}" ] && DEVICE+=" <target dev='$(xml_esc "${NIC_TARGET}")'/>"
[ -n "${ORDER}" ] && DEVICE+=" <boot order='$(xml_esc "${ORDER}")'/>"
[ -n "${SCRIPT}" ] && DEVICE+=" <script path='$(xml_esc "${SCRIPT}")'/>"
[ -n "${MODEL}" ] && DEVICE+=" <model type='$(xml_esc "${MODEL}")'/>"
# %%RUBYGEMS_SETUP_BEGIN%%
if File.directory?(GEMS_LOCATION)
real_gems_path = File.realpath(GEMS_LOCATION)
if !defined?(Gem) || Gem.path != [real_gems_path]
$LOAD_PATH.reject! {|l| l =~ /vendor_ruby/ }
if [ -n "${VIRTIO_QUEUES}" ] && [ "${MODEL}" = "virtio" ]; then
DEVICE+=" <driver name='vhost' queues='$(xml_esc "${VIRTIO_QUEUES}")'/>"
fi
# Suppress warnings from Rubygems
# https://github.com/OpenNebula/one/issues/5379
begin
verb = $VERBOSE
$VERBOSE = nil
require 'rubygems'
Gem.use_paths(real_gems_path)
ensure
$VERBOSE = verb
end
end
end
# %%RUBYGEMS_SETUP_END%%
if [ -n "${IP}" ] && [ -n "${FILTER}" ]; then
DEVICE+=" <filterref filter='$(xml_esc "${FILTER}")'>"
DEVICE+=" <parameter name='IP' value='$(xml_esc "${IP}")'/>"
[ -n "${VROUTER_IP}" ] && DEVICE+=" <parameter name='IP' value='$(xml_esc "${VROUTER_IP}")'/>"
DEVICE+=" </filterref>"
fi
$LOAD_PATH << RUBY_LIB_LOCATION
if [ -n "${INBOUND_AVG_BW}${INBOUND_PEAK_BW}${INBOUND_PEAK_KB}" ] || \
[ -n "${OUTBOUND_AVG_BW}${OUTBOUND_PEAK_BW}${OUTBOUND_PEAK_KB}" ];
then
DEVICE+=" <bandwidth>"
require_relative './opennebula_vm'
require_relative '../lib/command'
if [ -n "${INBOUND_AVG_BW}${INBOUND_PEAK_BW}${INBOUND_PEAK_KB}" ]; then
DEVICE+=" <inbound"
include VirtualMachineManagerKVM
[ -n "${INBOUND_AVG_BW}" ] && \
DEVICE+=" average='$(xml_esc "${INBOUND_AVG_BW}")'"
[ -n "${INBOUND_PEAK_BW}" ] && \
DEVICE+=" peak='$(xml_esc "${INBOUND_PEAK_BW}")'"
[ -n "${INBOUND_PEAK_KB}" ] && \
DEVICE+=" burst='$(xml_esc "${INBOUND_PEAK_KB}")'"
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
#load_env("#{__dir__}/../../etc/vmm/kvm/kvmrc")
load_local_env
DEVICE+=" />"
fi
domain = ARGV[0]
if [ -n "${OUTBOUND_AVG_BW}${OUTBOUND_PEAK_BW}${OUTBOUND_PEAK_KB}" ]; then
DEVICE+=" <outbound"
vm = KvmVM.new(STDIN.read)
[ -n "${OUTBOUND_AVG_BW}" ] && \
DEVICE+=" average='$(xml_esc "${OUTBOUND_AVG_BW}")'"
[ -n "${OUTBOUND_PEAK_BW}" ] && \
DEVICE+=" peak='$(xml_esc "${OUTBOUND_PEAK_BW}")'"
[ -n "${OUTBOUND_PEAK_KB}" ] && \
DEVICE+=" burst='$(xml_esc "${OUTBOUND_PEAK_KB}")'"
if vm.pci_attach?
dev_xml = vm.hostdev_xml
else
dev_xml = vm.interface_xml
end
DEVICE+=" />"
fi
DEVICE+=" </bandwidth>"
fi
DEVICE+=" </interface>"
# device attach
ATTACH_CMD=$(cat <<EOF
virsh --connect $LIBVIRT_URI attach-device $DOMAIN <(
script =<<~EOS
#{virsh} attach-device #{domain} <(
cat <<EOT
$DEVICE
#{dev_xml}
EOT
)
EOF
)
EOS
multiline_exec_and_log "$ATTACH_CMD" \
"Could not attach NIC ${NIC_ID} ($MAC) to $DOMAIN"
rc, _out, err = Command.execute("bash -s", false, 0, :stdin_data => script)
if rc != 0
STDERR.puts "Could not attach NIC to #{domain}: #{err}"
exit(1)
end
exit 0

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2022, OpenNebula Project, OpenNebula Systems #
@ -16,31 +16,112 @@
# limitations under the License. #
#--------------------------------------------------------------------------- #
source $(dirname $0)/../../etc/vmm/kvm/kvmrc
source $(dirname $0)/../../scripts_common.sh
ONE_LOCATION = ENV['ONE_LOCATION']
DOMAIN=$1
MAC=$2
if !ONE_LOCATION
RUBY_LIB_LOCATION = '/usr/lib/one/ruby'
GEMS_LOCATION = '/usr/share/one/gems'
VMDIR = '/var/lib/one'
CONFIG_FILE = '/var/lib/one/config'
else
RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby'
GEMS_LOCATION = ONE_LOCATION + '/share/gems'
VMDIR = ONE_LOCATION + '/var'
CONFIG_FILE = ONE_LOCATION + '/var/config'
end
DETACH_ARGS="--domain $DOMAIN --type bridge --mac $MAC"
# %%RUBYGEMS_SETUP_BEGIN%%
if File.directory?(GEMS_LOCATION)
real_gems_path = File.realpath(GEMS_LOCATION)
if !defined?(Gem) || Gem.path != [real_gems_path]
$LOAD_PATH.reject! {|l| l =~ /vendor_ruby/ }
function is_attached
{
virsh --connect $LIBVIRT_URI domiflist $DOMAIN | grep $MAC > /dev/null 2>&1
}
# Suppress warnings from Rubygems
# https://github.com/OpenNebula/one/issues/5379
begin
verb = $VERBOSE
$VERBOSE = nil
require 'rubygems'
Gem.use_paths(real_gems_path)
ensure
$VERBOSE = verb
end
end
end
# %%RUBYGEMS_SETUP_END%%
function detach_nic
{
exec_and_log "virsh --connect $LIBVIRT_URI detach-interface $DETACH_ARGS" \
"Could not detach NIC ($MAC) from $DOMAIN" || return 1
$LOAD_PATH << RUBY_LIB_LOCATION
if is_attached; then
return 1
else
return 0
fi
}
require_relative './opennebula_vm'
require_relative '../lib/command'
if ! retry ${VIRSH_RETRIES:-3} detach_nic; then
exit 1
fi
include VirtualMachineManagerKVM
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def attached?(dom, mac)
cmd = "#{virsh} domiflist #{dom} | grep #{mac} > /dev/null 2>&1"
rc, _o, _e = Command.execute("bash -s", false, 0, :stdin_data => cmd)
rc == 0
end
def detach_interface(dom, mac)
cmd = "#{virsh} detach-interface --domain #{dom} --type bridge --mac #{mac}"
rc, _o, e = Command.execute("bash -s", false, 0, :stdin_data => cmd)
STDERR.puts "Error detaching interface (#{mac}): #{e}" if rc != 0
rc == 0
end
def detach_nic(dom, mac)
tries = ENV['VIRSH_RETRIES']
tries ||= 3
rc = tries.to_i.times do
detach_interface(dom, mac)
break 0 unless attached?(dom, mac)
end
rc
end
def detach_pci(dom, vm)
dev_xml = vm.hostdev_xml(true)
cmd =<<~EOS
#{virsh} detach-device #{dom} <(
cat <<EOT
#{dev_xml}
EOT
)
EOS
rc, _o, e = Command.execute("bash -s", false, 0, :stdin_data => cmd)
if rc != 0
STDERR.puts "Could not attach NIC to #{dom}: #{e}"
end
rc
end
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
#load_env("#{__dir__}/../../etc/vmm/kvm/kvmrc")
load_local_env
domain = ARGV[0]
mac = ARGV[1]
vm = KvmVM.new(STDIN.read)
if vm.pci_attach?
rc = detach_pci(domain, vm)
else
rc = detach_nic(domain, mac)
end
exit(rc)

View File

@ -22,7 +22,7 @@ require 'open3'
# locking capabilites using flock
module Command
def self.execute(cmd, block, verbose = 0)
def self.execute(cmd, block, verbose = 0, opts = {})
stdout = ''
stderr = ''
@ -31,7 +31,7 @@ module Command
STDERR.puts "Running command #{cmd}" if verbose >= 1
stdout, stderr, s = Open3.capture3(cmd)
stdout, stderr, s = Open3.capture3(cmd, opts)
ensure
unlock(fd) if block
end

View File

@ -0,0 +1,295 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2022, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require_relative '../lib/xmlparser'
require_relative '../lib/opennebula_vm'
# rubocop:disable Style/ClassAndModuleChildren
# rubocop:disable Style/ClassVars
# This module includes related KVM/Libvirt functions
module VirtualMachineManagerKVM
#---------------------------------------------------------------------------
# KVM Configuration
#---------------------------------------------------------------------------
# Default locations for kvmrc file on the front-end (local) or
# hypervisor (remote)
KVMRC_LOCAL = "/var/lib/one/remotes/etc/vmm/kvm/kvmrc"
KVMRC_REMOTE = "/var/tmp/one/etc/vmm/kvm/kvmrc"
# Loads env from the default local (front-end) path
def load_local_env
load_env(KVMRC_LOCAL)
end
# Loads env from the default remote (hypervisor) path
def load_remote_env
load_env(KVMRC_REMOTE)
end
# Defines env variables for the current proccess by parsing a Shell
# formatted file
def load_env(path)
File.readlines(path).each do |l|
next if l.empty? || l[0] == '#'
m = l.match(/(export)?[[:blank:]]*([^=]+)=([^[[:blank:]]]+)$/)
next unless m
ENV[m[2]] = m[3].delete("\n") if m[2] && m[3]
end
rescue StandardError
end
# @return a virsh command considering LIBVIRT_URI env
def virsh
uri = ENV['LIBVIRT_URI']
uri ||= 'qemu:///system'
"virsh --connect #{uri}"
end
#---------------------------------------------------------------------------
# OpenNebula KVM Virtual Machine
#---------------------------------------------------------------------------
# This class parses and wraps the information in the Driver action data
# It provides some helper functions to implement KVM driver actions
class KvmVM < OpenNebulaVM
def initialize(xml_action)
super(xml_action, {})
#if set, it will scope VM element access
@xpath_prefix = ''
end
# @return true if the VM includes a PCI device being attached
def pci_attach?
@xml.exist? "TEMPLATE/PCI[ATTACH='YES']"
end
#-----------------------------------------------------------------------
# This function generates a XML document to attach a new interface
# to the VM. The interface specification supports the same OpenNebula
# attributes.
#
# Model and filter can be set in kvmrc with DEFAULT_ATTACH_NIC_MODEL
# and DEFAULT_ATTACH_NIC_FILTER, respectively
#
# Example:
#
# <interface type='bridge'>
# <source bridge='onebr57'/>
# <mac address='02:00:c0:a8:96:01'/>
# <target dev='one-160-1'/>
# <model type='virtio'/>
# </interface>
#-----------------------------------------------------------------------
def interface_xml
prefix_old = @xpath_prefix
@xpath_prefix = "TEMPLATE/NIC[ATTACH='YES']/"
model = @xml["#{@xpath_prefix}MODEL"]
model = env('DEFAULT_ATTACH_NIC_MODEL') if model.empty?
model.encode!(:xml => :attr) unless model.empty?
filter = @xml["#{@xpath_prefix}FILTER"]
filter = env('DEFAULT_ATTACH_NIC_FILTER') if filter.empty?
filter.encode!(:xml => :attr) unless filter.empty?
if exist? 'BRIDGE'
dev = '<interface type="bridge">'
if @xml["#{@xpath_prefix}BRIDGE_TYPE"] =~ /openvswitch/
dev << '<virtualport type="openvswitch"/>'
end
dev << xputs("<source bridge=%s/>", 'BRIDGE')
else
dev = '<interface type="ethernet">'
end
dev << xputs('<mac address=%s/>', 'MAC')
dev << xputs('<script path=%s/>', 'SCRIPT')
dev << xputs('<target dev=%s/>', 'TARGET')
dev << xputs('<boot order=%s/>', 'ORDER')
dev << "<model type=#{model}/>" unless model.empty?
if model == 'virtio'
dev << xputs('<driver name="vhost" queues=%s/>', 'VIRTIO_QUEUES')
end
if exist?('IP') && !filter.empty?
dev << "<filterref filter=#{filter}>"
dev << xputs('<parameter name="IP" value=%s/>', 'IP')
dev << xputs('<parameter name="IP" value=%s/>', 'VROUTER_IP')
dev << "</filterref>"
end
inb_keys = %w[ INBOUND_AVG_BW INBOUND_PEAK_BW INBOUND_PEAK_KB ]
inbound = inb_keys.any? { |e| exist? e }
outb_keys = %w[ OUTBOUND_AVG_BW OUTBOUND_PEAK_BW OUTBOUND_PEAK_KB ]
outbound = outb_keys.any? { |e| exist? e }
if inbound || outbound
dev << "<bandwidth>"
if inbound
dev << "<inbound"
dev << xputs(" average=%s", 'INBOUND_AVG_BW')
dev << xputs(" peak=%s", 'INBOUND_PEAK_BW')
dev << xputs(" burst=%s", 'INBOUND_PEAK_KB')
dev << "/>"
end
if outbound
dev << "<outbound"
dev << xputs(" average=%s", 'OUTBOUND_AVG_BW')
dev << xputs(" peak=%s", 'OUTBOUND_PEAK_BW')
dev << xputs(" burst=%s", 'OUTBOUND_PEAK_KB')
dev << "/>"
end
dev << "</bandwidth>"
end
dev << "</interface>"
@xpath_prefix = prefix_old
dev
end
def vf?(short_address)
cmd = "find /sys/devices -type l -name 'virtfn*' -printf '%p#'"\
" -exec readlink -f '{}' \\;"
out, _err, _rc = Open3.capture3(cmd)
return false if out.nil? || out.empty?
regexp = Regexp.new("#{short_address}$")
!out.match(regexp).nil?
end
#-----------------------------------------------------------------------
# This function generates a XML document to attach a new device
# to the VM. The specification supports the same OpenNebula attributes.
#
# Example:
#
# <hostdev mode='subsystem' type='pci' managed='yes'>
# <source>
# <address domain='0x0000' bus='0x05' slot='0x02' function='0x0'/>
# </source>
# <address type='pci' domain='0x0' bus='0x01' slot='0x01' function='0'/>
# </hostdev>
#
# NOTE: Libvirt/QEMU seems to have a race condition accesing vfio device
# and the permission check/set that makes <hostdev> not work for VF.
#
# NOTE: On detach (as we are manging MAC/VLAN through ip link vf) devices
# needs to use <hostdev> format
#-----------------------------------------------------------------------
def hostdev_xml(force_hostdev = false)
prefix_old = @xpath_prefix
@xpath_prefix = "TEMPLATE/PCI[ATTACH='YES']/"
if exist? 'UUID'
dev = '<hostdev mode="subsystem" type="mdev" model="vfio-pci">'
dev << xputs('<source><address uuid=%s/></source>', 'UUID')
dev << "</hostdev>"
else
if force_hostdev
is_vf = false
else
is_vf = vf?(@xml["#{@xpath_prefix}SHORT_ADDRESS"])
end
if is_vf
dev = '<interface type="hostdev" managed="yes">'
dev_end = '</interface>'
else
dev = '<hostdev mode="subsystem" type="pci" managed="yes">'
dev_end = '</hostdev>'
end
dev << '<source><address'
dev << ' type="pci"' if is_vf
dev << xputs(' domain=%s', 'DOMAIN', :hex => true)
dev << xputs(' bus=%s', 'BUS', :hex => true)
dev << xputs(' slot=%s', 'SLOT', :hex => true)
dev << xputs(' function=%s', 'FUNCTION', :hex => true)
dev << '/></source>'
=begin
#Setting Bus address needs to check that a PCI contoller is
#present for Bus 1
vm_addr = %w[VM_DOMAIN VM_BUS VM_SLOT VM_FUNCTION].all? {|e|
exist? e
}
if vm_addr
dev << '<address type="pci"'
dev << xputs(' domain=%s', 'VM_DOMAIN')
dev << xputs(' bus=%s', 'VM_BUS')
dev << xputs(' slot=%s', 'VM_SLOT')
dev << xputs(' function=%s', 'VM_FUNCTION')
dev << '/>'
end
=end
dev << dev_end
end
@xpath_prefix = prefix_old
dev
end
private
# @return the string printing an XML VM attribute following the provided
# format.
# Options
# :hex to prepend 0x to the attribute
def xputs(format, name, opts = {})
value = @xml["#{@xpath_prefix}#{name}"]
return '' if value.empty?
value = "0x#{value}" if opts[:hex]
sprintf(format, value.encode(:xml => :attr))
end
# @return true if the given VM element exists (considers xpath_prefix)
def exist?(name)
@xml.exist?("#{@xpath_prefix}#{name}")
end
# @return a copy of an env variable or '' if not defined
def env(name)
return '' if ENV[name].nil?
ENV[name].dup
end
end
end

View File

@ -76,7 +76,7 @@ class OpenNebulaVM
return if wild?
# Sets the DISK ID of the root filesystem
# Sets the DISK ID of the root filesystem (LXC & Firecraker)
disk = @xml.element('//TEMPLATE/DISK')
return unless disk

View File

@ -52,6 +52,10 @@ class XMLElement
element
end
def exist?(key)
!@xml.elements[key.to_s].nil?
end
# Get elements by xpath. This function returns an Array of XMLElements
def elements(key)
collection = []

View File

@ -29,6 +29,9 @@ begin
filter_driver = VNMMAD::VNMDriver.filter_driver(template64,
xpath_filter,
deploy_id)
filter_driver.activate_vf(filter_driver.vm)
filter_driver.run_hooks(ARGV, template64) if filter_driver.activate == 0
rescue Exception => e
OpenNebula.log_error(e.message)

View File

@ -24,4 +24,7 @@ require 'vnmmad'
template64 = STDIN.read
hm = VNMMAD::VNMDriver.from_base64(template64)
hm.activate_vf(hm.vm)
exit hm.run_hooks(ARGV, template64)

View File

@ -29,6 +29,8 @@ begin
filter_driver = VNMMAD::VNMDriver.filter_driver(template64,
xpath_filter,
deploy_id)
filter_driver.activate_vf(filter_driver.vm)
filter_driver.run_hooks(ARGV, template64) if filter_driver.activate == 0
rescue Exception => e
OpenNebula.log_error(e.message)

View File

@ -25,23 +25,24 @@ module VNMMAD
# to local installations. Any modification requires to sync the hosts
# with onehost sync command.
COMMANDS = {
:ebtables => 'sudo -n ebtables --concurrent',
:iptables => 'sudo -n iptables -w 3 -W 20000',
:ip6tables=> 'sudo -n ip6tables -w 3 -W 20000',
:ip => 'sudo -n ip',
:ip_unpriv=> 'ip',
:virsh => 'virsh -c qemu:///system',
:ovs_vsctl=> 'sudo -n ovs-vsctl',
:ovs_ofctl=> 'sudo -n ovs-ofctl',
:ovs_appctl=> 'sudo -n ovs-appctl',
:lsmod => 'lsmod',
:ipset => 'sudo -n ipset'
:ebtables => 'sudo -n ebtables --concurrent',
:iptables => 'sudo -n iptables -w 3 -W 20000',
:ip6tables => 'sudo -n ip6tables -w 3 -W 20000',
:ip => 'sudo -n ip',
:ip_unpriv => 'ip',
:virsh => 'virsh -c qemu:///system',
:ovs_vsctl => 'sudo -n ovs-vsctl',
:ovs_ofctl => 'sudo -n ovs-ofctl',
:ovs_appctl => 'sudo -n ovs-appctl',
:lsmod => 'lsmod',
:ipset => 'sudo -n ipset'
}
# Adjust :ip[6]tables commands to work with legacy versions
begin
stdout = Open3.capture3('sudo iptables --version')[0]
regex = /.*v(?<version>\d+.\d+.\d+)/
regex = /.*v(?<version>\d+.\d+.\d+)/
iptables_version = Gem::Version.new(stdout.match(regex)[:version])
if Gem::Version.new('1.6.1') > iptables_version

View File

@ -0,0 +1,107 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2022, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
# rubocop:disable Style/ClassAndModuleChildren
###########################################################################
# Module to use as mixin for configuring VFs
###########################################################################
module VNMMAD::VirtualFunction
# This function iterates for each VF defined as a PCI device in the VM
# and sets the MAC and VLAN through the ip link command
# , parameters supported by ip-link:
# - mac LLADDRESS (*)
# - vlan VLANID (*)
# - qos VLAN-QOS
# - proto VLAN-PROTO 802.1Q or 802.1ad
# - rate TXRATE
# - max_tx_rate TXRATE
# - min_tx_rate TXRATE
# - spoofchk on|off
# - query_rss on|off
# - state auto|enable|disable
# - trust on|off
# - node_guid eui64 - Infiniband
# - port_guid eui64 - Infiniband
# (*) = supported by OpenNebula
#
#
# VF are linked in the system device map in the system. For example:
#
# /sys/devices/pci0000:80/0000:80:03.2/0000:85:00.0
# ├── virtfn0 -> ../0000:85:02.0
# ├── virtfn1 -> ../0000:85:02.1
# ├── virtfn2 -> ../0000:85:02.2
# ├── virtfn3 -> ../0000:85:02.3
# ├── virtfn4 -> ../0000:85:02.4 <---- VF short PCI address
# ├── virtfn5 -> ../0000:85:02.5
# ├── virtfn6 -> ../0000:85:02.6
# ├── virtfn7 -> ../0000:85:02.7
# ├── net
# │   └── enp133s0f0 <---- PF device
def activate_vf(vm)
is_attach = false
vm.each_pci do |pci|
if pci[:attach] == 'YES'
is_attach = true
break
end
end
vm.each_pci do |pci|
next if pci[:short_address].nil?
next if is_attach && pci[:attach] != 'YES'
#Look for the associated PF
cmd = "find /sys/devices -type l -name 'virtfn*' -printf '%p#'"\
" -exec readlink -f '{}' \\;"
out, _err, _rc = Open3.capture3(cmd)
next if out.nil? || out.empty?
regexp = Regexp.new("#{pci[:short_address]}$")
out.each_line do |line|
next unless line.match(regexp)
virtfn, _vf = line.split('#')
#Matched line is in the form:
#virtfn /sys/devices/pci0000:80/0000:80:03.2/0000:85:00.0/virtfn3
#_vf /sys/devices/pci0000:80/0000:80:03.2/0000:85:02.3
m = virtfn.match(/virtfn([0-9]+)/)
next if m.nil?
cmd = "ls #{File.dirname(virtfn)}/net"
pf_dev, _err, _rc = Open3.capture3(cmd)
next if pf_dev.nil? || pf_dev.empty?
pf_dev.strip!
cmd = "#{command(:ip)} link set #{pf_dev} vf #{m[1]}"
cmd << " mac #{pci[:mac]}" if pci[:mac]
cmd << " vlan #{pci[:vlan_id]}" if pci[:vlan_id]
OpenNebula.exec_and_log(cmd)
end
end
end
end
# rubocop:enable Style/ClassAndModuleChildren

View File

@ -41,6 +41,8 @@ module VNMMAD
@nics = VNMNetwork::Nics.new(hypervisor)
@nics_alias = VNMNetwork::Nics.new(hypervisor)
@pcis = VNMNetwork::Nics.new(hypervisor)
return if xpath_filter.nil?
@vm_root.elements.each(xpath_filter) do |nic_element|
@ -63,6 +65,16 @@ module VNMMAD
@nics_alias << nic
end
pci_xpath_filter = xpath_filter.gsub(/\/NIC/,'/PCI')
@vm_root.elements.each(pci_xpath_filter) do |ne|
nic = @pcis.new_nic
nic_build_hash(ne, nic)
@pcis << nic
end
end
# Iterator on each NIC of the VM
@ -83,6 +95,15 @@ module VNMMAD
end
end
# Iterator on each PCI of the VM
def each_pci(&block)
return if @pcis.nil?
@pcis.each do |the_nic|
block.call(the_nic)
end
end
def each_nic_all(&block)
all_nics = @nics

View File

@ -17,6 +17,8 @@
require 'shellwords'
require 'open3'
require_relative 'vf'
################################################################################
# The VNMMAD module provides the basic abstraction to implement custom
# virtual network drivers. The VNMAD module includes:
@ -33,6 +35,8 @@ module VNMMAD
attr_reader :vm
include VirtualFunction
# Creates new driver using:
# @param vm_tpl [String] XML String from oned
# @param xpath_filter [String] to get relevant NICs for the driver

View File

@ -24,6 +24,7 @@ require 'yaml'
require 'command'
require 'vm'
require 'nic'
require 'vf'
require 'address'
require 'security_groups'
require 'security_groups_iptables'

View File

@ -26,4 +26,7 @@ deploy_id = ARGV[0]
xpath_filter = OpenvSwitchVLAN::XPATH_FILTER
ovs = OpenvSwitchVLAN.from_base64(template64, xpath_filter, deploy_id)
ovs.activate_vf(ovs.vm)
ovs.run_hooks(ARGV, template64) if ovs.activate == 0

View File

@ -26,4 +26,4 @@ deploy_id = ARGV[0]
xpath_filter = OpenvSwitchVLAN::XPATH_FILTER
ovs = OpenvSwitchVLAN.from_base64(template64, xpath_filter, deploy_id)
ovs.run_hooks(ARGV, template64) if ovs.activate == 0
ovs.run_hooks(ARGV, template64) if ovs.activate == 0