1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-03-26 06:50:09 +03:00

Merge branch 'feature-846'

This commit is contained in:
Ruben S. Montero 2013-02-01 02:25:32 +01:00
commit 63e39f51c2
9 changed files with 340 additions and 77 deletions

View File

@ -196,6 +196,15 @@ public:
*/
static int validate_xml(const string &xml_doc);
/**
* Renames the nodes given in the xpath expression
* @param xpath_expr xpath expression to find the nodes to rename
* @param new_name new name for the xml elements
*
* @return the number of nodes renamed
*/
int rename_nodes(const char * xpath_expr, const char * new_name);
// ---------------------------------------------------------
// Lex & bison parser for requirements and rank expressions
// ---------------------------------------------------------

View File

@ -124,6 +124,13 @@ protected:
virtual int schedule();
/**
* Retrieves the pools
*
* @return 0 on success
* -1 on error
* -2 if no VMs need to be scheduled
*/
virtual int set_up_pools();
private:

View File

@ -34,6 +34,13 @@ public:
~VirtualMachinePoolXML(){};
/**
* Retrieves the pending and rescheduling VMs
*
* @return 0 on success
* -1 on error
* -2 if no VMs need to be scheduled
*/
int set_up();
/**
@ -51,10 +58,28 @@ public:
* Dispatch a VM to the given host
* @param vid the VM id
* @param hid the id of the target host
* @param resched the machine is going to be rescheduled
* @param resched the machine is going to be rescheduled
*/
int dispatch(int vid, int hid, bool resched) const;
/**
* Update the VM template
* @param vid the VM id
* @param st the template string
*/
int update(int vid, const string &st) const;
/**
* Update the VM template
* @param the VM
*/
int update(VirtualMachineXML * vm) const
{
string xml;
return update(vm->get_oid(), vm->get_template(xml));
};
protected:
int get_suitable_nodes(vector<xmlNodePtr>& content)

View File

@ -22,6 +22,7 @@
#include "ObjectXML.h"
#include "HostPoolXML.h"
#include "VirtualMachineTemplate.h"
using namespace std;
@ -29,12 +30,14 @@ class VirtualMachineXML : public ObjectXML
{
public:
VirtualMachineXML(const string &xml_doc):ObjectXML(xml_doc)
VirtualMachineXML(const string &xml_doc):
ObjectXML(xml_doc)
{
init_attributes();
};
VirtualMachineXML(const xmlNodePtr node):ObjectXML(node)
VirtualMachineXML(const xmlNodePtr node):
ObjectXML(node)
{
init_attributes();
}
@ -103,22 +106,56 @@ public:
return requirements;
};
/**
* Get the user template of the VM
* @return the template as a XML string
*/
string& get_template(string& xml_str)
{
if (vm_template != 0)
{
vm_template->to_xml(xml_str);
}
else
{
xml_str = "";
}
return xml_str;
}
/**
* Function to write a Virtual Machine in an output stream
*/
friend ostream& operator<<(ostream& os, VirtualMachineXML& vm)
{
if (vm.hosts.empty())
{
return os;
}
vector<VirtualMachineXML::Host *>::reverse_iterator i;
vector<int>::iterator j;
os << "\t PRI\tHID VM: " << vm.oid << endl
<< "\t-----------------------" << endl;
for (i=vm.hosts.rbegin();i!=vm.hosts.rend();i++)
{
os << "\t" << (*i)->priority << "\t" << (*i)->hid << endl;
}
os << endl;
return os;
};
/**
* Adds a message to the VM's USER_TEMPLATE/SCHED_MESSAGE attribute
* @param st Message to set
*/
void log(const string &st);
protected:
/**
@ -175,6 +212,10 @@ protected:
*/
vector<VirtualMachineXML::Host *> hosts;
/**
* The VM user template
*/
VirtualMachineTemplate * vm_template;
};
#endif /* VM_XML_H_ */

View File

@ -26,6 +26,11 @@ int VirtualMachinePoolXML::set_up()
if ( rc == 0 )
{
if (objects.empty())
{
return -2;
}
oss.str("");
oss << "Pending and rescheduling VMs:" << endl;
@ -91,7 +96,6 @@ int VirtualMachinePoolXML::load_info(xmlrpc_c::value &result)
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int VirtualMachinePoolXML::dispatch(int vid, int hid, bool resched) const
{
ostringstream oss;
@ -103,7 +107,7 @@ int VirtualMachinePoolXML::dispatch(int vid, int hid, bool resched) const
}
else
{
oss << "Dispatching ";
oss << "Dispatching ";
}
oss << "virtual machine " << vid << " to host " << hid;
@ -168,3 +172,40 @@ int VirtualMachinePoolXML::dispatch(int vid, int hid, bool resched) const
return 0;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int VirtualMachinePoolXML::update(int vid, const string &st) const
{
xmlrpc_c::value result;
bool success;
try
{
client->call( client->get_endpoint(), // serverUrl
"one.vm.update", // methodName
"sis", // arguments format
&result, // resultP
client->get_oneauth().c_str(), // argument
vid, // VM ID
st.c_str() // Template
);
}
catch (exception const& e)
{
return -1;
}
vector<xmlrpc_c::value> values =
xmlrpc_c::value_array(result).vectorValueValue();
success = xmlrpc_c::value_boolean(values[0]);
if (!success)
{
return -1;
}
return 0;
}

View File

@ -20,13 +20,15 @@
void VirtualMachineXML::init_attributes()
{
vector<string> result;
vector<string> result;
vector<xmlNodePtr> nodes;
oid = atoi(((*this)["/VM/ID"] )[0].c_str());
uid = atoi(((*this)["/VM/UID"])[0].c_str());
gid = atoi(((*this)["/VM/GID"])[0].c_str());
result = ((*this)["/VM/TEMPLATE/MEMORY"]);
if (result.size() > 0)
{
memory = atoi(result[0].c_str());
@ -37,6 +39,7 @@ void VirtualMachineXML::init_attributes()
}
result = ((*this)["/VM/TEMPLATE/CPU"]);
if (result.size() > 0)
{
istringstream iss;
@ -49,6 +52,7 @@ void VirtualMachineXML::init_attributes()
}
result = ((*this)["/VM/TEMPLATE/RANK"]);
if (result.size() > 0)
{
rank = result[0];
@ -59,6 +63,7 @@ void VirtualMachineXML::init_attributes()
}
result = ((*this)["/VM/TEMPLATE/REQUIREMENTS"]);
if (result.size() > 0)
{
requirements = result[0];
@ -66,11 +71,11 @@ void VirtualMachineXML::init_attributes()
else
{
requirements = "";
}
}
result = ((*this)["/VM/HISTORY_RECORDS/HISTORY/HID"]);
if (result.size() > 0)
if (result.size() > 0)
{
hid = atoi(result[0].c_str());
}
@ -88,7 +93,20 @@ void VirtualMachineXML::init_attributes()
else
{
resched = 0;
}
}
if (get_nodes("/VM/USER_TEMPLATE", nodes) > 0)
{
vm_template = new VirtualMachineTemplate;
vm_template->from_xml_node(nodes[0]);
free_nodes(nodes);
}
else
{
vm_template = 0;
}
}
/* -------------------------------------------------------------------------- */
@ -104,6 +122,11 @@ VirtualMachineXML::~VirtualMachineXML()
}
hosts.clear();
if (vm_template != 0)
{
delete vm_template;
}
}
/* -------------------------------------------------------------------------- */
@ -117,7 +140,7 @@ void VirtualMachineXML::add_host(int host_id)
ss = new VirtualMachineXML::Host(host_id);
hosts.push_back(ss);
hosts.push_back(ss);
}
}
@ -222,3 +245,31 @@ void VirtualMachineXML::get_requirements (int& cpu, int& memory, int& disk)
memory = this->memory * 1024; //now in Kilobytes
disk = 0;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void VirtualMachineXML::log(const string &st)
{
if (vm_template == 0 || st.empty())
{
return;
}
char str[26];
time_t the_time = time(NULL);
ostringstream oss;
#ifdef SOLARIS
ctime_r(&(the_time),str,sizeof(char)*26);
#else
ctime_r(&(the_time),str);
#endif
str[24] = '\0'; // Get rid of final enter character
oss << str << " : " << st;
vm_template->replace("SCHED_MESSAGE", oss.str());
}

View File

@ -128,7 +128,7 @@ void Scheduler::start()
conf.get("ONED_PORT", oned_port);
oss.str("");
oss << "http://localhost:" << oned_port << "/RPC2";
oss << "http://localhost:" << oned_port << "/RPC2";
url = oss.str();
conf.get("SCHED_INTERVAL", timer);
@ -142,9 +142,9 @@ void Scheduler::start()
conf.get("LIVE_RESCHEDS", live_rescheds);
conf.get("HYPERVISOR_MEM", hypervisor_mem);
oss.str("");
oss << "Starting Scheduler Daemon" << endl;
oss << "----------------------------------------\n";
oss << " Scheduler Configuration File \n";
@ -176,7 +176,7 @@ void Scheduler::start()
hpool = new HostPoolXML(client, hypervisor_mem);
clpool = new ClusterPoolXML(client);
vmpool = new VirtualMachinePoolXML(client,
vmpool = new VirtualMachinePoolXML(client,
machines_limit,
(live_rescheds == 1));
acls = new AclXML(client);
@ -265,6 +265,17 @@ int Scheduler::set_up_pools()
map<int,int>::const_iterator it;
map<int, int> shares;
//--------------------------------------------------------------------------
//Cleans the cache and get the pending VMs
//--------------------------------------------------------------------------
rc = vmpool->set_up();
if ( rc != 0 )
{
return rc;
}
//--------------------------------------------------------------------------
//Cleans the cache and get the hosts ids
//--------------------------------------------------------------------------
@ -293,18 +304,6 @@ int Scheduler::set_up_pools()
hpool->merge_clusters(clpool);
//--------------------------------------------------------------------------
//Cleans the cache and get the pending VMs
//--------------------------------------------------------------------------
rc = vmpool->set_up();
if ( rc != 0 )
{
return rc;
}
//--------------------------------------------------------------------------
//Cleans the cache and get the ACLs
//--------------------------------------------------------------------------
@ -336,8 +335,13 @@ void Scheduler::match()
int vm_cpu;
int vm_disk;
int oid;
int uid;
int gid;
int n_hosts;
int n_matched;
int n_auth;
int n_error;
string reqs;
@ -359,50 +363,19 @@ void Scheduler::match()
reqs = vm->get_requirements();
oid = vm->get_oid();
uid = vm->get_uid();
gid = vm->get_gid();
n_hosts = 0;
n_matched = 0;
n_auth = 0;
n_error = 0;
for (h_it=hosts.begin(), matched=false; h_it != hosts.end(); h_it++)
{
host = static_cast<HostXML *>(h_it->second);
// -----------------------------------------------------------------
// Evaluate VM requirements
// -----------------------------------------------------------------
if (reqs != "")
{
rc = host->eval_bool(reqs,matched,&error);
if ( rc != 0 )
{
ostringstream oss;
matched = false;
oss << "Error evaluating expresion: " << reqs
<< ", error: " << error;
NebulaLog::log("SCHED",Log::ERROR,oss);
free(error);
}
}
else
{
matched = true;
}
if ( matched == false )
{
ostringstream oss;
oss << "Host " << host->get_hid() <<
" filtered out. It does not fullfil REQUIREMENTS.";
NebulaLog::log("SCHED",Log::DEBUG,oss);
continue;
}
// -----------------------------------------------------------------
// Check if user is authorized
// -----------------------------------------------------------------
@ -420,7 +393,7 @@ void Scheduler::match()
host_perms.oid = host->get_hid();
host_perms.obj_type = PoolObjectSQL::HOST;
matched = acls->authorize(uid,
matched = acls->authorize(uid,
gid,
host_perms,
AuthRequest::MANAGE);
@ -430,7 +403,7 @@ void Scheduler::match()
{
ostringstream oss;
oss << "Host " << host->get_hid()
oss << "VM " << oid << ": Host " << host->get_hid()
<< " filtered out. User is not authorized to "
<< AuthRequest::operation_to_str(AuthRequest::MANAGE)
<< " it.";
@ -438,6 +411,56 @@ void Scheduler::match()
NebulaLog::log("SCHED",Log::DEBUG,oss);
continue;
}
n_auth++;
// -----------------------------------------------------------------
// Evaluate VM requirements
// -----------------------------------------------------------------
if (reqs != "")
{
rc = host->eval_bool(reqs,matched,&error);
if ( rc != 0 )
{
ostringstream oss;
ostringstream error_msg;
matched = false;
n_error++;
error_msg << "Error evaluating REQUIREMENTS expression: '"
<< reqs << "', error: " << error;
oss << "VM " << oid << ": " << error_msg.str();
NebulaLog::log("SCHED",Log::ERROR,oss);
vm->log(error_msg.str());
free(error);
break;
}
}
else
{
matched = true;
}
if ( matched == false )
{
ostringstream oss;
oss << "VM " << oid << ": Host " << host->get_hid() <<
" filtered out. It does not fulfill REQUIREMENTS.";
NebulaLog::log("SCHED",Log::DEBUG,oss);
continue;
}
n_matched++;
// -----------------------------------------------------------------
// Check host capacity
// -----------------------------------------------------------------
@ -446,18 +469,49 @@ void Scheduler::match()
if (host->test_capacity(vm_cpu,vm_memory,vm_disk) == true)
{
vm->add_host(host->get_hid());
vm->add_host(host->get_hid());
n_hosts++;
}
else
{
ostringstream oss;
oss << "Host " << host->get_hid() << " filtered out. "
<< "Not enough capacity. " << endl;
oss << "VM " << oid << ": Host " << host->get_hid()
<< " filtered out. Not enough capacity.";
NebulaLog::log("SCHED",Log::DEBUG,oss);
}
}
// ---------------------------------------------------------------------
// Log scheduling errors to VM user if any
// ---------------------------------------------------------------------
if (n_hosts == 0) //No hosts assigned, let's see why
{
if (n_error == 0) //No syntax error
{
if (hosts.size() == 0)
{
vm->log("No hosts enabled to run VMs");
}
else if (n_auth == 0)
{
vm->log("User is not authorized to use any host");
}
else if (n_matched == 0)
{
vm->log("No host meets the REQUIREMENTS expression");
}
else
{
vm->log("No host with enough capacity to deploy the VM");
}
}
vmpool->update(vm);
}
}
}
@ -539,9 +593,7 @@ void Scheduler::dispatch()
{
vm = static_cast<VirtualMachineXML*>(vm_it->second);
oss << "\t PRI\tHID VM: " << vm->get_oid() << endl
<< "\t-----------------------" << endl
<< *vm << endl;
oss << *vm;
}
NebulaLog::log("SCHED",Log::INFO,oss);

View File

@ -73,19 +73,19 @@ void SchedulerTemplate::set_conf_default()
attribute = new SingleAttribute("MAX_HOST",value);
conf_default.insert(make_pair(attribute->name(),attribute));
//LIVE_RESCHEDS
value = "0";
attribute = new SingleAttribute("LIVE_RESCHEDS",value);
conf_default.insert(make_pair(attribute->name(),attribute));
//DEFAULT_SCHED
//DEFAULT_SCHED
map<string,string> vvalue;
vvalue.insert(make_pair("POLICY","1"));
vattribute = new VectorAttribute("DEFAULT_SCHED",vvalue);
conf_default.insert(make_pair(attribute->name(),vattribute));
conf_default.insert(make_pair(vattribute->name(),vattribute));
//HYPERVISOR_MEM
value = "0.1";
@ -105,7 +105,7 @@ string SchedulerTemplate::get_policy() const
istringstream iss;
vector<const Attribute *> vsched;
const VectorAttribute * sched;
const VectorAttribute * sched;
get("DEFAULT_SCHED", vsched);
@ -123,7 +123,7 @@ string SchedulerTemplate::get_policy() const
case 1: //Striping
rank = "- RUNNING_VMS";
break;
case 2: //Load-aware
rank = "FREE_CPU";
break;
@ -137,4 +137,4 @@ string SchedulerTemplate::get_policy() const
}
return rank;
}
}

View File

@ -541,6 +541,43 @@ void ObjectXML::xml_parse(const string &xml_doc)
}
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int ObjectXML::rename_nodes(const char * xpath_expr, const char * new_name)
{
xmlXPathObjectPtr obj;
obj = xmlXPathEvalExpression(
reinterpret_cast<const xmlChar *>(xpath_expr), ctx);
if (obj == 0 || obj->nodesetval == 0)
{
return 0;
}
xmlNodeSetPtr ns = obj->nodesetval;
int size = ns->nodeNr;
int renamed = 0;
xmlNodePtr cur;
for(int i = 0; i < size; ++i)
{
cur = ns->nodeTab[i];
if ( cur == 0 || cur->type != XML_ELEMENT_NODE )
{
continue;
}
xmlNodeSetName(cur, reinterpret_cast<const xmlChar *>(new_name));
renamed++;
}
xmlXPathFreeObject(obj);
return renamed;
}
/* ************************************************************************ */
/* Host :: Parse functions to compute rank and evaluate requirements */