1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-03-22 18:50:08 +03:00

feature #3455: slightly restructure logic. Removed detailed output by default

(cherry picked from commit 5f8ec1743cad5fc3cbf4c8a01a7908b439fc5df6)
(cherry picked from commit d425a65e58c4c4dcfdd986e1f841f3a85eccaeee)
This commit is contained in:
Ruben S. Montero 2015-02-26 16:29:42 +01:00
parent 1d02bed6ba
commit 7f2fdee292
3 changed files with 260 additions and 295 deletions

View File

@ -25,7 +25,8 @@ int HostPoolXML::set_up()
if ( rc == 0 )
{
oss.str("");
#ifdef SCHEDDEBUG
oss << "Discovered Hosts (enabled):" << endl;
map<int,ObjectXML*>::iterator it;
@ -34,6 +35,9 @@ int HostPoolXML::set_up()
{
oss << " " << it->first;
}
#else
oss << "Discovered " << objects.size() << "enabled hosts.\n";
#endif
NebulaLog::log("HOST",Log::DEBUG,oss);
}

View File

@ -440,29 +440,8 @@ bool VirtualMachineXML::test_image_datastore_capacity(
{
ds = img_dspool->get(ds_usage_it->first);
if (ds == 0) //Should never reach here
if (ds == 0 || !ds->test_capacity(ds_usage_it->second))
{
ostringstream oss;
oss << "Image Datastore " << ds_usage_it->first << " not found.";
NebulaLog::log("SCHED",Log::INFO,oss);
return false;
}
if (!ds->test_capacity(ds_usage_it->second))
{
ostringstream oss;
oss << "VM " << oid
<< ": Image Datastore " << ds_usage_it->first
<< " does not have enough free storage.";
NebulaLog::log("SCHED",Log::INFO,oss);
log(oss.str());
return false;
}
}

View File

@ -451,6 +451,199 @@ int Scheduler::set_up_pools()
return 0;
};
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
/**
* Match hosts for this VM that:
* 1. Fulfills ACL
* 2. Meets user/policy requirements
* 3. Have enough capacity to host the VM
*
* @param vm the virtual machine
* @param vm_memory vm requirement
* @param vm_cpu vm requirement
* @param host to evaluate vm assgiment
* @param n_auth number of hosts authorized for the user, incremented if needed
* @param n_error number of requirement errors, incremented if needed
* @param n_fits number of hosts with capacity that fits the VM requirements
* @param n_matched number of hosts that fullfil VM sched_requirements
* @param error, string describing why the host is not valid
* @return true for a positive match
*/
static bool match_host(AclXML * acls, VirtualMachineXML* vm, int vmem, int vcpu,
HostXML * host, int &n_auth, int& n_error, int &n_fits, int &n_matched,
string &error)
{
// -------------------------------------------------------------------------
// Filter current Hosts for resched VMs
// -------------------------------------------------------------------------
if (vm->is_resched() && vm->get_hid() == host->get_hid())
{
error = "VM cannot be migrated to its current Host.";
return false;
}
// -------------------------------------------------------------------------
// Check that VM can be deployed in local hosts
// -------------------------------------------------------------------------
if (vm->is_only_public_cloud() && !host->is_public_cloud())
{
error = "VM requires a Public Cloud Host, but it's local.";
return false;
}
// -------------------------------------------------------------------------
// Check if user is authorized
// -------------------------------------------------------------------------
if ( vm->get_uid() != 0 && vm->get_gid() != 0 )
{
PoolObjectAuth hperms;
hperms.oid = host->get_hid();
hperms.cid = host->get_cid();
hperms.obj_type = PoolObjectSQL::HOST;
// Only include the VM group ID
set<int> gids;
gids.insert(vm->get_gid());
if ( !acls->authorize(vm->get_uid(), gids, hperms, AuthRequest::MANAGE))
{
error = "Permission denied.";
return false;
}
}
n_auth++;
// -------------------------------------------------------------------------
// Check host capacity
// -------------------------------------------------------------------------
if (host->test_capacity(vcpu,vmem) != true)
{
error = "Not enough capacity.";
return false;
}
n_fits++;
// -------------------------------------------------------------------------
// Evaluate VM requirements
// -------------------------------------------------------------------------
if (!vm->get_requirements().empty())
{
char * estr;
bool matched;
if ( host->eval_bool(vm->get_requirements(), matched, &estr) != 0 )
{
ostringstream oss;
n_error++;
oss << "Error in SCHED_REQUIREMENTS: '" << vm->get_requirements()
<< "', error: " << estr;
vm->log(oss.str());
error = oss.str();
free(estr);
return false;
}
if (matched == false)
{
error = "It does not fulfill SCHED_REQUIREMENTS.";
return false;
}
}
n_matched++;
return true;
};
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
/**
* Match system DS's for this VM that:
* 1. Meet user/policy requirements
* 2. Have enough capacity to host the VM
*
* @param vm the virtual machine
* @param vdisk vm requirement
* @param ds to evaluate vm assgiment
* @param n_error number of requirement errors, incremented if needed
* @param n_matched number of system ds that fullfil VM sched_requirements
* @param n_fits number of system ds with capacity that fits the VM requirements
* @param error, string describing why the host is not valid
* @return true for a positive match
*/
static bool match_system_ds(VirtualMachineXML* vm, long long vdisk,
DatastoreXML * ds, int& n_error, int& n_fits, int &n_matched, string &error)
{
// -------------------------------------------------------------------------
// Check datastore capacity for shared systems DS (non-shared will be
// checked in a per host basis during dispatch)
// -------------------------------------------------------------------------
if (ds->is_shared() && ds->is_monitored() && !ds->test_capacity(vdisk))
{
error = "Not enough capacity.";
return false;
}
n_fits++;
// -------------------------------------------------------------------------
// Evaluate VM requirements
// -------------------------------------------------------------------------
if (!vm->get_ds_requirements().empty())
{
char * estr;
bool matched;
if ( ds->eval_bool(vm->get_ds_requirements(), matched, &estr) != 0 )
{
ostringstream oss;
n_error++;
oss << "Error in SCHED_DS_REQUIREMENTS: '"
<< vm->get_ds_requirements() << "', error: " << error;
vm->log(oss.str());
free(estr);
}
if (matched == false)
{
error = "It does not fulfill SCHED_DS_REQUIREMENTS.";
return false;
}
}
n_matched++;
return true;
}
static void log_match(int vid, const string& msg)
{
ostringstream oss;
oss << "Match-making results for VM " << vid << ":\n\t" << msg << endl;
NebulaLog::log("SCHED", Log::DEBUG, oss);
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
@ -462,13 +655,11 @@ void Scheduler::match_schedule()
int vm_cpu;
long long vm_disk;
int oid;
int uid;
int gid;
int n_resources;
int n_matched;
int n_auth;
int n_error;
int n_fits;
string reqs;
string ds_reqs;
@ -476,207 +667,67 @@ void Scheduler::match_schedule()
HostXML * host;
DatastoreXML *ds;
char * error;
bool matched;
int rc;
string m_error;
map<int, ObjectXML*>::const_iterator vm_it;
map<int, ObjectXML*>::const_iterator h_it;
vector<SchedulerPolicy *>::iterator it;
const map<int, ObjectXML*> pending_vms = vmpool->get_objects();
const map<int, ObjectXML*> hosts = hpool->get_objects();
const map<int, ObjectXML*> datastores = dspool->get_objects();
const map<int, ObjectXML*> pending_vms = vmpool->get_objects();
const map<int, ObjectXML*> hosts = hpool->get_objects();
const map<int, ObjectXML*> datastores = dspool->get_objects();
for (vm_it=pending_vms.begin(); vm_it != pending_vms.end(); vm_it++)
{
vm = static_cast<VirtualMachineXML*>(vm_it->second);
reqs = vm->get_requirements();
oid = vm->get_oid();
uid = vm->get_uid();
gid = vm->get_gid();
vm->get_requirements(vm_cpu,vm_memory,vm_disk);
n_resources = 0;
n_resources = 0;
n_fits = 0;
n_matched = 0;
n_auth = 0;
n_error = 0;
//--------------------------------------------------------------
//----------------------------------------------------------------------
// Test Image Datastore capacity, but not for migrations
//--------------------------------------------------------------
//----------------------------------------------------------------------
if (!vm->is_resched())
{
if (vm->test_image_datastore_capacity(img_dspool) == false)
{
if (vm->is_public_cloud())
if (vm->is_public_cloud()) //No capacity needed for public cloud
{
// Image DS do not have capacity, but if the VM ends
// in a public cloud host, image copies will not
// be performed.
vm->set_only_public_cloud();
}
else
{
log_match(vm->get_oid(), "Cannot schedule VM, image datastore "
"does not have enough capacity.");
continue;
}
}
}
// ---------------------------------------------------------------------
// Match hosts for this VM that:
// 1. Fulfills ACL
// 2. Meets user/policy requirements
// 3. Have enough capacity to host the VM
// Match hosts for this VM.
// ---------------------------------------------------------------------
for (h_it=hosts.begin(), matched=false; h_it != hosts.end(); h_it++)
for (h_it=hosts.begin(); h_it != hosts.end(); h_it++)
{
host = static_cast<HostXML *>(h_it->second);
// -----------------------------------------------------------------
// Check if user is authorized
// -----------------------------------------------------------------
matched = false;
if ( uid == 0 || gid == 0 )
{
matched = true;
}
else
{
PoolObjectAuth host_perms;
host_perms.oid = host->get_hid();
host_perms.cid = host->get_cid();
host_perms.obj_type = PoolObjectSQL::HOST;
// Even if the owner is in several groups, this request only
// uses the VM group ID
set<int> gids;
gids.insert(gid);
matched = acls->authorize(uid,
gids,
host_perms,
AuthRequest::MANAGE);
}
if ( matched == false )
{
ostringstream oss;
oss << "VM " << oid << ": Host " << host->get_hid()
<< " filtered out. User is not authorized to "
<< AuthRequest::operation_to_str(AuthRequest::MANAGE)
<< " it.";
NebulaLog::log("SCHED",Log::DEBUG,oss);
continue;
}
n_auth++;
// -----------------------------------------------------------------
// Check that VM can be deployed in local hosts
// -----------------------------------------------------------------
if (vm->is_only_public_cloud() && !host->is_public_cloud())
{
ostringstream oss;
oss << "VM " << oid << ": Host " << host->get_hid()
<< " filtered out. VM can only be deployed in a Public Cloud Host, but this one is local.";
NebulaLog::log("SCHED",Log::DEBUG,oss);
continue;
}
// -----------------------------------------------------------------
// Filter current Hosts for resched VMs
// -----------------------------------------------------------------
if (vm->is_resched() && vm->get_hid() == host->get_hid())
{
ostringstream oss;
oss << "VM " << oid << ": Host " << host->get_hid()
<< " filtered out. VM cannot be migrated to its current Host.";
NebulaLog::log("SCHED",Log::DEBUG,oss);
continue;
}
// -----------------------------------------------------------------
// Evaluate VM requirements
// -----------------------------------------------------------------
if (!reqs.empty())
{
rc = host->eval_bool(reqs,matched,&error);
if ( rc != 0 )
{
ostringstream oss;
ostringstream error_msg;
matched = false;
n_error++;
error_msg << "Error in SCHED_REQUIREMENTS: '" << reqs
<< "', error: " << error;
oss << "VM " << oid << ": " << error_msg.str();
NebulaLog::log("SCHED",Log::ERROR,oss);
vm->log(error_msg.str());
free(error);
break;
}
}
else
{
matched = true;
}
if ( matched == false )
{
ostringstream oss;
oss << "VM " << oid << ": Host " << host->get_hid() <<
" filtered out. It does not fulfill SCHED_REQUIREMENTS.";
NebulaLog::log("SCHED",Log::DEBUG,oss);
continue;
}
n_matched++;
// -----------------------------------------------------------------
// Check host capacity
// -----------------------------------------------------------------
if (host->test_capacity(vm_cpu,vm_memory) == true)
if (match_host(acls, vm, vm_memory, vm_cpu, host, n_auth, n_error,
n_fits, n_matched, m_error))
{
vm->add_match_host(host->get_hid());
n_resources++;
n_resources++;
}
else
else if ( n_error > 0 )
{
ostringstream oss;
oss << "VM " << oid << ": Host " << host->get_hid()
<< " filtered out. Not enough capacity.";
NebulaLog::log("SCHED",Log::DEBUG,oss);
log_match(vm->get_oid(), "Cannot schedule VM. " + m_error);
break;
}
}
@ -705,7 +756,7 @@ void Scheduler::match_schedule()
vm->log(oss.str());
}
else
else if (n_fits == 0)
{
vm->log("No host with enough capacity to deploy the VM");
}
@ -713,6 +764,8 @@ void Scheduler::match_schedule()
vmpool->update(vm);
log_match(vm->get_oid(), "Cannot schedule VM, there is no suitable host.");
continue;
}
@ -726,109 +779,38 @@ void Scheduler::match_schedule()
}
vm->sort_match_hosts();
if (vm->is_resched())
if (vm->is_resched())//Will use same system DS for migrations
{
// Do not schedule storage for migrations, the VMs needs to be
// deployed in the same system DS
vm->add_match_datastore(vm->get_dsid());
continue;
}
// ---------------------------------------------------------------------
// Match datastores for this VM that:
// 2. Meets requirements
// 3. Have enough capacity to host the VM
// Match datastores for this VM
// ---------------------------------------------------------------------
ds_reqs = vm->get_ds_requirements();
n_resources = 0;
n_resources = 0;
n_matched = 0;
n_error = 0;
n_fits = 0;
for (h_it=datastores.begin(), matched=false; h_it != datastores.end(); h_it++)
for (h_it=datastores.begin(); h_it != datastores.end(); h_it++)
{
ds = static_cast<DatastoreXML *>(h_it->second);
// -----------------------------------------------------------------
// Evaluate VM requirements
// -----------------------------------------------------------------
if (!ds_reqs.empty())
if (match_system_ds(vm, vm_disk, ds, n_error, n_fits, n_matched, m_error))
{
rc = ds->eval_bool(ds_reqs, matched, &error);
if ( rc != 0 )
{
ostringstream oss;
ostringstream error_msg;
matched = false;
n_error++;
error_msg << "Error in SCHED_DS_REQUIREMENTS: '" << ds_reqs
<< "', error: " << error;
oss << "VM " << oid << ": " << error_msg.str();
NebulaLog::log("SCHED",Log::ERROR,oss);
vm->log(error_msg.str());
free(error);
break;
}
}
else
{
matched = true;
}
if ( matched == false )
{
ostringstream oss;
oss << "VM " << oid << ": Datastore " << ds->get_oid() <<
" filtered out. It does not fulfill SCHED_DS_REQUIREMENTS.";
NebulaLog::log("SCHED",Log::DEBUG,oss);
continue;
}
n_matched++;
// -----------------------------------------------------------------
// Check datastore capacity
// -----------------------------------------------------------------
if (ds->is_shared() && ds->is_monitored())
{
if (ds->test_capacity(vm_disk))
{
vm->add_match_datastore(ds->get_oid());
n_resources++;
}
else
{
ostringstream oss;
oss << "VM " << oid << ": Datastore " << ds->get_oid()
<< " filtered out. Not enough capacity.";
NebulaLog::log("SCHED",Log::DEBUG,oss);
}
}
else
{
// All non shared system DS are valid candidates, the
// capacity will be checked later for each host
vm->add_match_datastore(ds->get_oid());
n_resources++;
}
else if (n_error > 0)
{
log_match(vm->get_oid(), "Cannot schedule VM. " + m_error);
break;
}
}
// ---------------------------------------------------------------------
@ -837,18 +819,15 @@ void Scheduler::match_schedule()
if (n_resources == 0)
{
// For a public cloud VM, 0 system DS is not a problem
if (vm->is_public_cloud())
if (vm->is_public_cloud())//Public clouds don't need a system DS
{
vm->set_only_public_cloud();
continue;
}
else
else//No datastores assigned, let's see why
{
//No datastores assigned, let's see why
if (n_error == 0) //No syntax error
if (n_error == 0)//No syntax error
{
if (datastores.size() == 0)
{
@ -863,7 +842,7 @@ void Scheduler::match_schedule()
vm->log(oss.str());
}
else
else if (n_fits == 0)
{
vm->log("No system datastore with enough capacity for the VM");
}
@ -873,6 +852,9 @@ void Scheduler::match_schedule()
vmpool->update(vm);
log_match(vm->get_oid(), "Cannot schedule VM, there is no suitable "
"system ds.");
continue;
}
}
@ -888,6 +870,22 @@ void Scheduler::match_schedule()
vm->sort_match_datastores();
}
#ifdef SCHEDDEBUG
ostringstream oss;
oss << "Scheduling Results:" << endl;
for (map<int, ObjectXML*>::const_iterator vm_it=pending_vms.begin();
vm_it != pending_vms.end(); vm_it++)
{
vm = static_cast<VirtualMachineXML*>(vm_it->second);
oss << *vm;
}
NebulaLog::log("SCHED", Log::DEBUG, oss);
#endif
}
/* -------------------------------------------------------------------------- */
@ -915,22 +913,6 @@ void Scheduler::dispatch()
const map<int, ObjectXML*> pending_vms = vmpool->get_objects();
//--------------------------------------------------------------------------
// Print the VMs to schedule and the selected hosts for each one
//--------------------------------------------------------------------------
oss << "Scheduling Results:" << endl;
for (map<int, ObjectXML*>::const_iterator vm_it=pending_vms.begin();
vm_it != pending_vms.end(); vm_it++)
{
vm = static_cast<VirtualMachineXML*>(vm_it->second);
oss << *vm;
}
NebulaLog::log("SCHED", Log::INFO, oss);
//--------------------------------------------------------------------------
// Dispatch each VM till we reach the dispatch limit
//--------------------------------------------------------------------------