1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-03-21 14:50:08 +03:00

B OpenNebula/one#6596: Fix Host NUMA nodes after VM migration (#3226)

* Fix Host NUMA nodes after VM migration
* Move template parsing to HostShareCapacity

+ add warning about unsafe pointer usage

(cherry picked from commit 0cde7d96a200ccfbffe7579124fc5efa07e97b0d)
This commit is contained in:
Pavel Czerný 2024-09-24 09:11:35 +02:00 committed by Ruben S. Montero
parent c788e5bc5f
commit 21dbf2a81f
No known key found for this signature in database
GPG Key ID: A0CEA6FA880A1D87
6 changed files with 94 additions and 23 deletions

View File

@ -17,6 +17,7 @@
#ifndef HOST_SHARE_CAPACITY_H_
#define HOST_SHARE_CAPACITY_H_
#include "Template.h"
#include "Attribute.h"
/* ------------------------------------------------------------------------ */
@ -53,6 +54,48 @@ struct HostShareCapacity
VectorAttribute * topology;
std::vector<VectorAttribute *> nodes;
/**
* Get the VM capacity from the template
* @param vid the VM ID
* @param tmpl the VM template. Warning: the HostShareCapacity use pointers to
* the tmpl, so it must exist for the lifetime of the HostareCapacity
*/
void set(int vid, Template& tmpl)
{
float fcpu;
pci.clear();
nodes.clear();
vmid = vid;
if ((tmpl.get("MEMORY", mem) == false) ||
(tmpl.get("CPU", fcpu) == false))
{
cpu = 0;
mem = 0;
disk = 0;
vcpu = 0;
return;
}
cpu = (int) (fcpu * 100); //%
mem = mem * 1024; //Kb
disk = 0;
tmpl.get("VCPU", vcpu);
tmpl.get("PCI", pci);
tmpl.get("NUMA_NODE", nodes);
topology = tmpl.get("TOPOLOGY");
return;
}
};
#endif /*HOST_SHARE_CAPACITY_H_*/

View File

@ -1034,10 +1034,19 @@ public:
/**
* Get the VM physical capacity requirements for the host.
* @param sr the HostShareCapacity to store the capacity request.
* @param sr the HostShareCapacity to store the capacity request. The sr
* use pointers to VM template, do not destroy the VM object before sr.
*/
void get_capacity(HostShareCapacity &sr) const;
/**
* Get the VM physical capacity from the previous history
* @param sr the HostShareCapacity to store the capacity request.
* @param tmpl temporary object, to hold pointers, do not release the tmpl
* before the HostShareCapacity
*/
void get_previous_capacity(HostShareCapacity &sr, Template &tmpl) const;
/**
* Adds automatic placement requirements: Datastore and Cluster
* @param cluster_ids set of viable clusters for this VM

View File

@ -165,6 +165,8 @@ int DispatchManager::import(unique_ptr<VirtualMachine> vm, const RequestAttribut
vm->set_running_stime(the_time);
vm->set_vm_info();
vmpool->update_history(vm.get());
vmpool->update(vm.get());

View File

@ -84,6 +84,8 @@ void LifeCycleManager::trigger_deploy(int vid)
vm->set_prolog_stime(thetime);
vm->set_vm_info();
vmpool->update_history(vm.get());
vmpool->update(vm.get());
@ -276,6 +278,8 @@ void LifeCycleManager::trigger_migrate(int vid, const RequestAttributes& ra,
vm->set_action(vm_action, uid, gid, req_id);
vm->set_vm_info();
vmpool->update_history(vm.get());
vm->set_previous_action(vm_action, uid, gid, req_id);
@ -355,6 +359,8 @@ void LifeCycleManager::trigger_migrate(int vid, const RequestAttributes& ra,
vm->set_prolog_stime(the_time);
vm->set_vm_info();
vmpool->update_history(vm.get());
vmpool->update(vm.get());
@ -416,6 +422,8 @@ void LifeCycleManager::trigger_live_migrate(int vid, const RequestAttributes& ra
vm->set_stime(time(0));
vm->set_vm_info();
vmpool->update_history(vm.get());
vm->set_previous_action(VMActions::LIVE_MIGRATE_ACTION, uid, gid,

View File

@ -54,10 +54,11 @@ void LifeCycleManager::start_prolog_migrate(VirtualMachine* vm)
vmpool->update_history(vm);
vm->get_capacity(sr);
if ( vm->get_hid() != vm->get_previous_hid() )
{
Template tmpl;
vm->get_previous_capacity(sr, tmpl);
hpool->del_capacity(vm->get_previous_hid(), sr);
vm->release_previous_vnc_port();
@ -840,6 +841,8 @@ void LifeCycleManager::trigger_prolog_failure(int vid)
hpool->add_capacity(vm->get_hid(), sr);
vm->set_vm_info();
vmpool->insert_history(vm.get());
vmpool->update(vm.get());

View File

@ -2143,39 +2143,45 @@ void VirtualMachine::cp_previous_history()
void VirtualMachine::get_capacity(HostShareCapacity& sr) const
{
float fcpu;
sr.set(oid, *obj_template);
}
sr.pci.clear();
sr.nodes.clear();
sr.vmid = oid;
if ((get_template_attribute("MEMORY", sr.mem) == false) ||
(get_template_attribute("CPU", fcpu) == false))
void VirtualMachine::get_previous_capacity(HostShareCapacity& sr, Template &tmpl) const
{
if (!previous_history)
{
sr.cpu = 0;
sr.mem = 0;
sr.disk = 0;
return;
}
sr.vcpu = 0;
const string& vm_info = previous_history->vm_info;
ObjectXML xml(vm_info);
vector<xmlNodePtr> content;
xml.get_nodes("/VM/TEMPLATE", content);
if (content.empty())
{
NebulaLog::error("ONE", "Unable to read capacity from previous history");
return;
}
sr.cpu = (int) (fcpu * 100); //%
sr.mem = sr.mem * 1024; //Kb
sr.disk = 0;
auto rc = tmpl.from_xml_node(content[0]);
get_template_attribute("VCPU", sr.vcpu);
if (rc != 0)
{
NebulaLog::error("ONE", "Unable to parse capacity from previous history");
obj_template->get("PCI", sr.pci);
return;
}
obj_template->get("NUMA_NODE", sr.nodes);
sr.set(oid, tmpl);
sr.topology = obj_template->get("TOPOLOGY");
ObjectXML::free_nodes(content);
return;
};
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */