1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-03-25 02:50:08 +03:00

Feature #4369: New automatic_cluster_requirements to filter out clusters

This commit is contained in:
Carlos Martín 2016-03-11 18:35:00 +01:00
parent 5c54e1c5f2
commit 4697f1eec0
11 changed files with 314 additions and 91 deletions

View File

@ -43,10 +43,13 @@ public:
private:
int oid;
void init_attributes()
{
xpath(oid, "/CLUSTER/ID", -1);
};
void init_attributes();
// Configuration attributes
static const char *cluster_paths[]; /**< paths for search function */
static int cluster_num_paths; /**< number of paths*/
};
#endif /* CLUSTER_XML_H_ */

View File

@ -45,14 +45,6 @@ public:
return static_cast<HostXML *>(PoolXML::get(oid));
};
/**
* For each Host in a cluster, adds the cluster template as a new
* Host xml element
*
* @param clpool Cluster pool
*/
void merge_clusters(ClusterPoolXML * clpool);
protected:
int get_suitable_nodes(vector<xmlNodePtr>& content)

View File

@ -47,6 +47,21 @@ public:
return cluster_ids;
};
bool is_in_cluster(const set<int> &cids) const
{
set<int>::const_iterator i;
for (i = cids.begin(); i != cids.end(); i++)
{
if (cluster_ids.find(*i) != cluster_ids.end())
{
return true;
}
}
return false;
};
/**
* Tests whether a new VM can be hosted by the host or not
* @param cpu needed by the VM (percentage)

View File

@ -116,6 +116,11 @@ public:
return ds_requirements;
}
const string& get_cluster_requirements()
{
return cluster_requirements;
};
void get_requirements (int& cpu, int& memory, long long& disk,
vector<VectorAttribute *> &pci);
@ -155,6 +160,15 @@ public:
match_datastores.add_resource(oid);
}
/**
* Adds a matching cluster
* @param oid of the cluster
*/
void add_match_cluster(int oid)
{
match_clusters.insert(oid);
}
/**
* Returns a vector of matched hosts
*/
@ -171,6 +185,14 @@ public:
return match_datastores.get_resources();
}
/**
* Returns a vector of matched hosts
*/
const set<int>& get_match_clusters()
{
return match_clusters;
}
/**
* Sort the matched hosts for the VM
*/
@ -329,6 +351,8 @@ protected:
ResourceMatch match_datastores;
set<int> match_clusters;
bool only_public_cloud;
/* ----------------------- VIRTUAL MACHINE ATTRIBUTES ------------------- */
@ -357,6 +381,8 @@ protected:
string ds_requirements;
string ds_rank;
string cluster_requirements;
VirtualMachineTemplate * vm_template; /**< The VM template */
VirtualMachineTemplate * user_template; /**< The VM user template */
};

View File

@ -0,0 +1,37 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2015, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#include "ClusterXML.h"
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int ClusterXML::cluster_num_paths = 2;
const char *ClusterXML::cluster_paths[] = {
"/CLUSTER/TEMPLATE/",
"/CLUSTER/"};
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void ClusterXML::init_attributes()
{
xpath(oid, "/CLUSTER/ID", -1);
//-------------------- Init search xpath routes ---------------------------
ObjectXML::paths = cluster_paths;
ObjectXML::num_paths = cluster_num_paths;
}

View File

@ -95,46 +95,3 @@ int HostPoolXML::load_info(xmlrpc_c::value &result)
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void HostPoolXML::merge_clusters(ClusterPoolXML * clpool)
{
// TODO
/*
map<int,ObjectXML*>::iterator it;
ClusterXML* cluster;
HostXML* host;
int cluster_id;
vector<xmlNodePtr> nodes;
for (it=objects.begin(); it!=objects.end(); it++)
{
host = static_cast<HostXML*>(it->second);
cluster_id = host->get_cid();
if(cluster_id != -1) //ClusterPool::NONE_CLUSTER_ID
{
cluster = clpool->get(cluster_id);
if(cluster != 0)
{
nodes.clear();
cluster->get_nodes("/CLUSTER/TEMPLATE", nodes);
if (!nodes.empty())
{
host->add_node("/HOST", nodes[0], "CLUSTER_TEMPLATE");
}
cluster->free_nodes(nodes);
}
}
}
*/
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */

View File

@ -26,13 +26,12 @@
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int HostXML::host_num_paths = 4;
int HostXML::host_num_paths = 3;
const char *HostXML::host_paths[] = {
"/HOST/TEMPLATE/",
"/HOST/HOST_SHARE/",
"/HOST/",
"/HOST/CLUSTER_TEMPLATE/"};
"/HOST/"};
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */

View File

@ -27,6 +27,7 @@ source_files=[
'VirtualMachinePoolXML.cc',
'VirtualMachineXML.cc',
'ClusterPoolXML.cc',
'ClusterXML.cc',
'UserPoolXML.cc',
'DatastorePoolXML.cc',
'DatastoreXML.cc']

View File

@ -30,6 +30,7 @@ void VirtualMachineXML::init_attributes()
int action;
string automatic_requirements;
string automatic_cluster_requirements;
xpath(oid, "/VM/ID", -1);
xpath(uid, "/VM/UID", -1);
@ -92,6 +93,28 @@ void VirtualMachineXML::init_attributes()
ds_requirements = automatic_requirements;
}
// ------------------- CLUSTER REQUIREMENTS --------------------------------
xpath(automatic_cluster_requirements, "/VM/TEMPLATE/AUTOMATIC_CLUSTER_REQUIREMENTS", "");
rc = xpath(cluster_requirements, "/VM/USER_TEMPLATE/SCHED_CLUSTER_REQUIREMENTS", "");
if (rc == 0)
{
if ( !automatic_cluster_requirements.empty() )
{
ostringstream oss;
oss << automatic_cluster_requirements << " & ( " << cluster_requirements << " )";
cluster_requirements = oss.str();
}
}
else if ( !automatic_cluster_requirements.empty() )
{
cluster_requirements = automatic_cluster_requirements;
}
// ---------------- HISTORY HID, DSID, RESCHED & TEMPLATE ------------------
xpath(hid, "/VM/HISTORY_RECORDS/HISTORY/HID", -1);

View File

@ -471,12 +471,6 @@ int Scheduler::set_up_pools()
return rc;
}
//--------------------------------------------------------------------------
//Add to each host the corresponding cluster template
//--------------------------------------------------------------------------
hpool->merge_clusters(clpool);
//--------------------------------------------------------------------------
//Cleans the cache and get the ACLs
//--------------------------------------------------------------------------
@ -513,12 +507,14 @@ int Scheduler::set_up_pools()
* @param n_error number of requirement errors, incremented if needed
* @param n_fits number of hosts with capacity that fits the VM requirements
* @param n_matched number of hosts that fullfil VM sched_requirements
* @param n_cluster_matched number of hosts that fulfill VM sched_cluster_requirements
* @param error, string describing why the host is not valid
* @return true for a positive match
*/
static bool match_host(AclXML * acls, UserPoolXML * upool, VirtualMachineXML* vm,
int vmem, int vcpu, vector<VectorAttribute *>& vpci, HostXML * host,
int &n_auth, int& n_error, int &n_fits, int &n_matched, string &error)
int &n_auth, int& n_error, int &n_fits, int &n_matched,
int &n_cluster_matched, string &error)
{
// -------------------------------------------------------------------------
// Filter current Hosts for resched VMs
@ -570,6 +566,18 @@ static bool match_host(AclXML * acls, UserPoolXML * upool, VirtualMachineXML* vm
n_auth++;
// -------------------------------------------------------------------------
// Check host clusters
// -------------------------------------------------------------------------
if (host->is_in_cluster(vm->get_match_clusters()) != true)
{
error = "Host is not in any of the filtered Clusters.";
return false;
}
n_cluster_matched++;
// -------------------------------------------------------------------------
// Check host capacity
// -------------------------------------------------------------------------
@ -635,12 +643,14 @@ static bool match_host(AclXML * acls, UserPoolXML * upool, VirtualMachineXML* vm
* @param n_error number of requirement errors, incremented if needed
* @param n_matched number of system ds that fullfil VM sched_requirements
* @param n_fits number of system ds with capacity that fits the VM requirements
* @param n_cluster_matched number of system ds that fulfill VM sched_cluster_requirements
* @param error, string describing why the host is not valid
* @return true for a positive match
*/
static bool match_system_ds(AclXML * acls, UserPoolXML * upool,
VirtualMachineXML* vm, long long vdisk, DatastoreXML * ds, int& n_auth,
int& n_error, int& n_fits, int &n_matched, string &error)
int& n_error, int& n_fits, int &n_matched,
int &n_cluster_matched, string &error)
{
// -------------------------------------------------------------------------
// Check if user is authorized
@ -672,6 +682,18 @@ static bool match_system_ds(AclXML * acls, UserPoolXML * upool,
n_auth++;
// -------------------------------------------------------------------------
// Check host clusters
// -------------------------------------------------------------------------
if (ds->is_in_cluster(vm->get_match_clusters()) != true)
{
error = "System DS is not in any of the filtered Clusters.";
return false;
}
n_cluster_matched++;
// -------------------------------------------------------------------------
// Check datastore capacity for shared systems DS (non-shared will be
// checked in a per host basis during dispatch). Resume actions do not
@ -719,6 +741,62 @@ static bool match_system_ds(AclXML * acls, UserPoolXML * upool,
return true;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
/**
* Match clusters for this VM that:
* 1. Meet user/automatic requirements
*
* @param vm the virtual machine
* @param cluster to evaluate vm assignment
* @param n_error number of requirement errors
* @param n_matched number of clusters that fulfill VM sched_cluster_requirements
* @param error, string describing why the cluster is not valid
* @return true for a positive match
*/
static bool match_cluster(VirtualMachineXML* vm, ClusterXML * cluster,
int& n_error, int &n_matched, string &error)
{
// -------------------------------------------------------------------------
// Evaluate VM requirements
// -------------------------------------------------------------------------
if (!vm->get_cluster_requirements().empty())
{
char * estr;
bool matched;
if ( cluster->eval_bool(vm->get_cluster_requirements(), matched, &estr) != 0 )
{
ostringstream oss;
n_error++;
oss << "Error in SCHED_CLUSTER_REQUIREMENTS: '"
<< vm->get_cluster_requirements() << "', error: " << error;
vm->log(oss.str());
free(estr);
}
if (matched == false)
{
ostringstream oss;
oss << "It does not fulfill SCHED_CLUSTER_REQUIREMENTS: "
<< vm->get_cluster_requirements();
error = oss.str();
return false;
}
}
n_matched++;
return true;
}
/* -------------------------------------------------------------------------- */
static void log_match(int vid, const string& msg)
@ -747,24 +825,30 @@ void Scheduler::match_schedule()
int n_auth;
int n_error;
int n_fits;
int n_cluster_matched;
ClusterXML * cluster;
HostXML * host;
DatastoreXML *ds;
string m_error;
map<int, ObjectXML*>::const_iterator vm_it;
map<int, ObjectXML*>::const_iterator h_it;
map<int, ObjectXML*>::const_iterator obj_it;
vector<SchedulerPolicy *>::iterator it;
const map<int, ObjectXML*> pending_vms = vmpool->get_objects();
const map<int, ObjectXML*> clusters = clpool->get_objects();
const map<int, ObjectXML*> hosts = hpool->get_objects();
const map<int, ObjectXML*> datastores = dspool->get_objects();
const map<int, ObjectXML*> users = upool->get_objects();
double total_match_time = 0;
double total_rank_time = 0;
double total_cl_match_time = 0;
double total_host_match_time = 0;
double total_host_rank_time = 0;
double total_ds_match_time = 0;
double total_ds_rank_time = 0;
time_t stime = time(0);
@ -803,17 +887,62 @@ void Scheduler::match_schedule()
}
}
// ---------------------------------------------------------------------
// Match clusters for this VM.
// ---------------------------------------------------------------------
profile(true);
for (obj_it=clusters.begin(); obj_it != clusters.end(); obj_it++)
{
cluster = static_cast<ClusterXML *>(obj_it->second);
if (match_cluster(vm, cluster, n_error, n_matched, m_error))
{
vm->add_match_cluster(cluster->get_oid());
n_resources++;
}
else
{
if ( n_error > 0 )
{
log_match(vm->get_oid(), "Cannot schedule VM. " + m_error);
break;
}
else if (NebulaLog::log_level() >= Log::DDEBUG)
{
ostringstream oss;
oss << "Hosts and System DS in Cluster "
<< cluster->get_oid() << " discarded for VM "
<< vm->get_oid() << ". " << m_error;
NebulaLog::log("SCHED", Log::DDEBUG, oss);
}
}
}
total_cl_match_time += profile(false);
// ---------------------------------------------------------------------
// Log scheduling errors to VM user if any
// ---------------------------------------------------------------------
if (n_resources == 0) //No clusters assigned, let's see why
{
// TODO
}
// ---------------------------------------------------------------------
// Match hosts for this VM.
// ---------------------------------------------------------------------
profile(true);
for (h_it=hosts.begin(); h_it != hosts.end(); h_it++)
for (obj_it=hosts.begin(); obj_it != hosts.end(); obj_it++)
{
host = static_cast<HostXML *>(h_it->second);
host = static_cast<HostXML *>(obj_it->second);
if (match_host(acls, upool, vm, vm_memory, vm_cpu, vm_pci, host,
n_auth, n_error, n_fits, n_matched, m_error))
n_auth, n_error, n_fits, n_matched, n_cluster_matched, m_error))
{
vm->add_match_host(host->get_hid());
@ -837,7 +966,7 @@ void Scheduler::match_schedule()
}
}
total_match_time += profile(false);
total_host_match_time += profile(false);
// ---------------------------------------------------------------------
// Log scheduling errors to VM user if any
@ -855,6 +984,15 @@ void Scheduler::match_schedule()
{
vm->log("User is not authorized to use any host");
}
else if (n_cluster_matched == 0)
{
ostringstream oss;
oss << "No host meets capacity and SCHED_CLUSTER_REQUIREMENTS: "
<< vm->get_cluster_requirements();
vm->log(oss.str());
}
else if (n_fits == 0)
{
ostringstream oss;
@ -893,7 +1031,7 @@ void Scheduler::match_schedule()
vm->sort_match_hosts();
total_rank_time += profile(false);
total_host_rank_time += profile(false);
if (vm->is_resched())//Will use same system DS for migrations
{
@ -906,18 +1044,20 @@ void Scheduler::match_schedule()
// Match datastores for this VM
// ---------------------------------------------------------------------
profile(true);
n_resources = 0;
n_auth = 0;
n_matched = 0;
n_error = 0;
n_fits = 0;
for (h_it=datastores.begin(); h_it != datastores.end(); h_it++)
for (obj_it=datastores.begin(); obj_it != datastores.end(); obj_it++)
{
ds = static_cast<DatastoreXML *>(h_it->second);
ds = static_cast<DatastoreXML *>(obj_it->second);
if (match_system_ds(acls, upool, vm, vm_disk, ds, n_auth, n_error,
n_fits, n_matched, m_error))
n_fits, n_matched, n_cluster_matched, m_error))
{
vm->add_match_datastore(ds->get_oid());
@ -941,6 +1081,8 @@ void Scheduler::match_schedule()
}
}
total_ds_match_time += profile(false);
// ---------------------------------------------------------------------
// Log scheduling errors to VM user if any
// ---------------------------------------------------------------------
@ -965,6 +1107,16 @@ void Scheduler::match_schedule()
{
vm->log("User is not authorized to use any system datastore");
}
else if (n_cluster_matched == 0)
{
ostringstream oss;
oss << "No system datastore meets capacity and "
<< "SCHED_CLUSTER_REQUIREMENTS: "
<< vm->get_cluster_requirements();
vm->log(oss.str());
}
else if (n_fits == 0)
{
ostringstream oss;
@ -999,23 +1151,33 @@ void Scheduler::match_schedule()
// Schedule matched datastores
// ---------------------------------------------------------------------
profile(true);
for (it=ds_policies.begin() ; it != ds_policies.end() ; it++)
{
(*it)->schedule(vm);
}
vm->sort_match_datastores();
total_ds_rank_time += profile(false);
}
ostringstream oss;
if (NebulaLog::log_level() >= Log::DDEBUG)
{
ostringstream oss;
oss << "Match Making statistics:\n"
<< "\tNumber of VMs: \t\t" << pending_vms.size() << endl
<< "\tTotal time: \t\t" << one_util::float_to_str(time(0) - stime) << "s" << endl
<< "\tTotal Match time: \t" << one_util::float_to_str(total_match_time) << "s" << endl
<< "\tTotal Ranking time: \t" << one_util::float_to_str(total_rank_time) << "s";
oss << "Match Making statistics:\n"
<< "\tNumber of VMs: " << pending_vms.size() << endl
<< "\tTotal time: " << one_util::float_to_str(time(0) - stime) << "s" << endl
<< "\tTotal Cluster Match time: " << one_util::float_to_str(total_cl_match_time) << "s" << endl
<< "\tTotal Host Match time: " << one_util::float_to_str(total_host_match_time) << "s" << endl
<< "\tTotal Host Ranking time: " << one_util::float_to_str(total_host_rank_time) << "s" << endl
<< "\tTotal DS Match time: " << one_util::float_to_str(total_ds_match_time) << "s" << endl
<< "\tTotal DS Ranking time: " << one_util::float_to_str(total_ds_rank_time) << "s" << endl;
NebulaLog::log("SCHED", Log::DDEBUG, oss);
NebulaLog::log("SCHED", Log::DDEBUG, oss);
}
if (NebulaLog::log_level() >= Log::DDDEBUG)
{

View File

@ -1462,15 +1462,23 @@ int VirtualMachine::automatic_requirements(string& error_str)
if ( !cluster_ids.empty() )
{
// TODO: create a separate cluster automatic requirements
oss << "CLUSTER_IDS = " << one_util::join(cluster_ids, ',')
<< " & !(PUBLIC_CLOUD = YES)";
}
else
{
oss << "!(PUBLIC_CLOUD = YES)";
set<int>::iterator i = cluster_ids.begin();
oss << "(ID = " << *i;
for (++i; i != cluster_ids.end(); i++)
{
oss << " | ID = " << *i;
}
oss << ")";
obj_template->add("AUTOMATIC_CLUSTER_REQUIREMENTS", oss.str());
}
oss.str("");
oss << "!(PUBLIC_CLOUD = YES)";
if (num_public != 0)
{
set<string>::iterator it = clouds.begin();