mirror of
https://github.com/OpenNebula/one.git
synced 2025-03-21 14:50:08 +03:00
Merge branch 'master' of git.opennebula.org:one
This commit is contained in:
commit
429062ca23
@ -20,11 +20,6 @@
|
||||
#
|
||||
# LIVE_RESCHEDS: Perform live (1) or cold migrations (0) when rescheduling a VM
|
||||
#
|
||||
# FREE_CPU_THRESHOLD: CPU usage that is neglected for computing the available
|
||||
# CPU. Expressed as a fraction of the total CPU.
|
||||
# E.g. 0.9 means that a free CPU load greater than 90% will
|
||||
# result in a free CPU equal to the total CPU
|
||||
#
|
||||
# HYPERVISOR_MEM: Fraction of total MEMORY reserved for the hypervisor.
|
||||
# E.g. 0.1 means that only 90% of the total MEMORY will be used
|
||||
#
|
||||
@ -45,14 +40,13 @@ ONED_PORT = 2633
|
||||
|
||||
SCHED_INTERVAL = 30
|
||||
|
||||
MAX_VM = 300
|
||||
MAX_VM = 300
|
||||
MAX_DISPATCH = 30
|
||||
MAX_HOST = 1
|
||||
|
||||
LIVE_RESCHEDS = 0
|
||||
LIVE_RESCHEDS = 0
|
||||
|
||||
CPU_FREE_THRESHOLD = 0.9
|
||||
HYPERVISOR_MEM = 0.1
|
||||
HYPERVISOR_MEM = 0.1
|
||||
|
||||
DEFAULT_SCHED = [
|
||||
policy = 1
|
||||
|
@ -27,7 +27,10 @@ class HostPoolXML : public PoolXML
|
||||
{
|
||||
public:
|
||||
|
||||
HostPoolXML(Client* client):PoolXML(client){};
|
||||
HostPoolXML(Client* client, float mem):PoolXML(client)
|
||||
{
|
||||
HostXML::set_hypervisor_mem(mem);
|
||||
};
|
||||
|
||||
int set_up();
|
||||
|
||||
|
@ -40,16 +40,6 @@ public:
|
||||
return oid;
|
||||
};
|
||||
|
||||
/**
|
||||
* Gets the current host capacity
|
||||
* @param cpu the host free cpu, scaled according to a given threshold
|
||||
* @param memory the host free memory
|
||||
* @param cpu_threshold to consider the host totally free
|
||||
* @param mem_threshold to consider the host totally free
|
||||
*/
|
||||
void get_capacity(int& cpu, int& memory,
|
||||
float cpu_threshold, float mem_threshold) const;
|
||||
|
||||
/**
|
||||
* Tests whether a new VM can be hosted by the host or not
|
||||
* @param cpu needed by the VM (percentage)
|
||||
@ -81,6 +71,14 @@ public:
|
||||
running_vms++;
|
||||
};
|
||||
|
||||
/**
|
||||
* Sets the memory fraction reserved for the hypervisor. This function
|
||||
* should be called before using the host pool.
|
||||
*/
|
||||
static void set_hypervisor_mem(float mem)
|
||||
{
|
||||
hypervisor_mem = 1.0 - mem;
|
||||
};
|
||||
|
||||
private:
|
||||
int oid;
|
||||
@ -94,12 +92,10 @@ private:
|
||||
int max_mem; /**< Total memory capacity (in Mb) */
|
||||
int max_cpu; /**< Total cpu capacity (in percentage) */
|
||||
|
||||
int free_disk; /**< Free disk from the IM monitor */
|
||||
int free_mem; /**< Free memory from the IM monitor */
|
||||
int free_cpu; /**< Free cpu from the IM monitor */
|
||||
|
||||
int running_vms; /**< Number of running VMs in this Host */
|
||||
|
||||
static float hypervisor_mem; /**< Fraction of memory for the VMs */
|
||||
|
||||
void init_attributes();
|
||||
};
|
||||
|
||||
|
@ -54,8 +54,7 @@ protected:
|
||||
machines_limit(0),
|
||||
dispatch_limit(0),
|
||||
host_dispatch_limit(0),
|
||||
cpu_threshold(0),
|
||||
mem_threshold(0),
|
||||
hypervisor_mem(0),
|
||||
client(0)
|
||||
{
|
||||
am.addListener(this);
|
||||
@ -157,14 +156,9 @@ private:
|
||||
unsigned int host_dispatch_limit;
|
||||
|
||||
/**
|
||||
* Threshold value to round up freecpu
|
||||
* Memory reserved for the hypervisor
|
||||
*/
|
||||
float cpu_threshold;
|
||||
|
||||
/**
|
||||
* Threshold value to round up freemem
|
||||
*/
|
||||
float mem_threshold;
|
||||
float hypervisor_mem;
|
||||
|
||||
/**
|
||||
* XML_RPC client
|
||||
|
@ -18,27 +18,7 @@
|
||||
#include "HostXML.h"
|
||||
|
||||
|
||||
void HostXML::get_capacity(int& cpu, int& memory,
|
||||
float cpu_threshold, float mem_threshold) const
|
||||
{
|
||||
vector<string> result;
|
||||
|
||||
cpu = free_cpu;
|
||||
|
||||
/* eg. 96.7 >= 0.9 * 100, Round so 96.7 free is 100 (and CPU = 1, fits)*/
|
||||
if ( cpu >= static_cast<int>(cpu_threshold * static_cast<float>(max_cpu)) )
|
||||
{
|
||||
cpu = static_cast<int>(ceil(static_cast<float>(cpu)/100.0) * 100);
|
||||
}
|
||||
|
||||
memory = free_mem - static_cast<int>(mem_threshold * static_cast<float>(max_mem));
|
||||
|
||||
/* sanity check in case the free_mem goes below the threshold */
|
||||
if ( memory < 0 )
|
||||
{
|
||||
memory = 0;
|
||||
}
|
||||
}
|
||||
float HostXML::hypervisor_mem;
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
@ -55,11 +35,10 @@ void HostXML::init_attributes()
|
||||
max_mem = atoi(((*this)["/HOST/HOST_SHARE/MAX_MEM"])[0].c_str());
|
||||
max_cpu = atoi(((*this)["/HOST/HOST_SHARE/MAX_CPU"])[0].c_str());
|
||||
|
||||
free_disk = atoi(((*this)["/HOST/HOST_SHARE/FREE_DISK"])[0].c_str());
|
||||
free_mem = atoi(((*this)["/HOST/HOST_SHARE/FREE_MEM"])[0].c_str());
|
||||
free_cpu = atoi(((*this)["/HOST/HOST_SHARE/FREE_CPU"])[0].c_str());
|
||||
|
||||
running_vms = atoi(((*this)["/HOST/HOST_SHARE/RUNNING_VMS"])[0].c_str());
|
||||
|
||||
//Reserve memory for the hypervisor
|
||||
max_mem = static_cast<int>(hypervisor_mem * static_cast<float>(max_mem));
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
@ -139,11 +139,9 @@ void Scheduler::start()
|
||||
|
||||
conf.get("MAX_HOST", host_dispatch_limit);
|
||||
|
||||
conf.get("CPU_FREE_THRESHOLD", cpu_threshold);
|
||||
|
||||
conf.get("HYPERVISOR_MEM", mem_threshold);
|
||||
|
||||
conf.get("LIVE_RESCHEDS", live_rescheds);
|
||||
|
||||
conf.get("HYPERVISOR_MEM", hypervisor_mem);
|
||||
|
||||
oss.str("");
|
||||
|
||||
@ -176,7 +174,7 @@ void Scheduler::start()
|
||||
// Pools
|
||||
// -----------------------------------------------------------
|
||||
|
||||
hpool = new HostPoolXML(client);
|
||||
hpool = new HostPoolXML(client, hypervisor_mem);
|
||||
vmpool = new VirtualMachinePoolXML(client,
|
||||
machines_limit,
|
||||
(live_rescheds == 1));
|
||||
@ -429,28 +427,18 @@ void Scheduler::match()
|
||||
|
||||
vm->get_requirements(vm_cpu,vm_memory,vm_disk);
|
||||
|
||||
host->get_capacity(host_cpu, host_memory, cpu_threshold, mem_threshold);
|
||||
|
||||
if ((vm_memory <= host_memory) && (vm_cpu <= host_cpu))
|
||||
if (host->test_capacity(vm_cpu,vm_memory,vm_disk) == true)
|
||||
{
|
||||
if (host->test_capacity(vm_cpu,vm_memory,vm_disk) == true)
|
||||
{
|
||||
vm->add_host(host->get_hid());
|
||||
}
|
||||
vm->add_host(host->get_hid());
|
||||
}
|
||||
else
|
||||
{
|
||||
ostringstream oss;
|
||||
|
||||
oss << "Host " << host->get_hid() << " filtered out. "
|
||||
<< "Not enough capacity: " << endl
|
||||
<< "\t free cpu: " << host_cpu
|
||||
<< " vm cpu: " << vm_cpu << endl
|
||||
<< "\t free mem: " << host_memory
|
||||
<< " vm mem: " << vm_memory << endl;
|
||||
<< "Not enough capacity. " << endl;
|
||||
|
||||
NebulaLog::log("SCHED",Log::DEBUG,oss);
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -41,7 +41,6 @@ void SchedulerTemplate::set_conf_default()
|
||||
# MAX_HOST
|
||||
# DEFAULT_SCHED
|
||||
# LIVE_RESCHEDS
|
||||
# CPU_FREE_THRESHOLD
|
||||
# HYPERVISOR_MEM
|
||||
#-------------------------------------------------------------------------------
|
||||
*/
|
||||
@ -88,12 +87,6 @@ void SchedulerTemplate::set_conf_default()
|
||||
vattribute = new VectorAttribute("DEFAULT_SCHED",vvalue);
|
||||
conf_default.insert(make_pair(attribute->name(),vattribute));
|
||||
|
||||
//CPU_FREE_THRESHOLD
|
||||
value = "0.9";
|
||||
|
||||
attribute = new SingleAttribute("CPU_FREE_THRESHOLD",value);
|
||||
conf_default.insert(make_pair(attribute->name(),attribute));
|
||||
|
||||
//HYPERVISOR_MEM
|
||||
value = "0.1";
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user