diff --git a/src/scheduler/include/DatastoreXML.h b/src/scheduler/include/DatastoreXML.h index 904d6054b1..2b0d252ecb 100644 --- a/src/scheduler/include/DatastoreXML.h +++ b/src/scheduler/include/DatastoreXML.h @@ -65,6 +65,15 @@ public: return cluster_id; }; + /** + * Returns true if the DS contains the SHARED = YES attribute + * @return true if the DS is shared + */ + bool is_shared() + { + return shared; + }; + private: int oid; @@ -72,6 +81,8 @@ private: long long free_mb; /**< Free disk for VMs (in MB). */ + bool shared; + static const char *ds_paths[]; /**< paths for search function */ static int ds_num_paths; /**< number of paths*/ diff --git a/src/scheduler/include/HostXML.h b/src/scheduler/include/HostXML.h index 2beeed0998..f5191607e9 100644 --- a/src/scheduler/include/HostXML.h +++ b/src/scheduler/include/HostXML.h @@ -18,6 +18,7 @@ #ifndef HOST_XML_H_ #define HOST_XML_H_ +#include #include "ObjectXML.h" using namespace std; @@ -87,6 +88,22 @@ public: running_vms--; }; + /** + * Tests whether a new VM can be hosted by the local system DS or not + * @param dsid DS id + * @param vm_disk_mb System disk needed by the VM (in MB) + * @return true if the share can host the VM + */ + bool test_ds_capacity(int dsid, long long vm_disk_mb); + + /** + * Adds a new VM to the given local sytem DS share by incrementing the disk + * counter + * @param dsid DS id + * @param vm_disk_mb System disk needed by the VM (in MB) + */ + void add_ds_capacity(int dsid, long long vm_disk_mb); + /** * Search the Object for a given attribute in a set of object specific * routes. Overwrite ObjectXML function to deal with pseudo-attributes @@ -110,14 +127,17 @@ private: int cluster_id; // Host share values - long long disk_usage; /**< Disk allocated to VMs (in Mb). */ + long long disk_usage; /**< Disk allocated to VMs (in MB). */ long long mem_usage; /**< Memory allocated to VMs (in KB) */ long long cpu_usage; /**< CPU allocated to VMs (in percentage) */ - long long max_disk; /**< Total disk capacity (in Mb) */ + long long max_disk; /**< Total disk capacity (in MB) */ long long max_mem; /**< Total memory capacity (in KB) */ long long max_cpu; /**< Total cpu capacity (in percentage) */ + map ds_free_disk; /**< Free MB for local system DS */ + long long ds_location_free_mb; // TODO: merge with host share max_disk? + long long running_vms; /**< Number of running VMs in this Host */ static float hypervisor_mem; /**< Fraction of memory for the VMs */ diff --git a/src/scheduler/src/pool/DatastoreXML.cc b/src/scheduler/src/pool/DatastoreXML.cc index 11f40afc1b..ec7781e425 100644 --- a/src/scheduler/src/pool/DatastoreXML.cc +++ b/src/scheduler/src/pool/DatastoreXML.cc @@ -17,6 +17,7 @@ #include #include "DatastoreXML.h" +#include "NebulaUtil.h" /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ @@ -37,6 +38,11 @@ void DatastoreXML::init_attributes() cluster_id = atoi(((*this)["/DATASTORE/CLUSTER_ID"] )[0].c_str() ); free_mb = atoll(((*this)["/DATASTORE/FREE_MB"])[0].c_str()); + string shared_st; + this->xpath(shared_st, "/DATASTORE/TEMPLATE/SHARED", "YES"); + + shared = one_util::toupper(shared_st) == "YES"; + ObjectXML::paths = ds_paths; ObjectXML::num_paths = ds_num_paths; } diff --git a/src/scheduler/src/pool/HostXML.cc b/src/scheduler/src/pool/HostXML.cc index 28caa2a919..0f7dc1d346 100644 --- a/src/scheduler/src/pool/HostXML.cc +++ b/src/scheduler/src/pool/HostXML.cc @@ -53,6 +53,22 @@ void HostXML::init_attributes() //Reserve memory for the hypervisor max_mem = static_cast(hypervisor_mem * static_cast(max_mem)); + ds_location_free_mb = atoll(((*this)["/HOST/TEMPLATE/DS_LOCATION_FREE_MB"])[0].c_str()); + + vector ds_ids = (*this)["/HOST/TEMPLATE/DS/ID"]; + vector ds_free_mb = (*this)["/HOST/TEMPLATE/DS/FREE_MB"]; + + int id; + long long disk; + + for (size_t i = 0; i < ds_ids.size() && i < ds_free_mb.size(); i++) + { + id = atoi(ds_ids[i].c_str()); + disk = atoll(ds_free_mb[i].c_str()); + + ds_free_disk[id] = disk; + } + //Init search xpath routes ObjectXML::paths = host_paths; @@ -99,3 +115,29 @@ int HostXML::search(const char *name, int& value) /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ + +bool HostXML::test_ds_capacity(int dsid, long long vm_disk_mb) +{ + if (ds_free_disk.count(dsid) == 0) + { + ds_free_disk[dsid] = ds_location_free_mb; + } + + return (vm_disk_mb < ds_free_disk[dsid]); +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +void HostXML::add_ds_capacity(int dsid, long long vm_disk_mb) +{ + if (ds_free_disk.count(dsid) == 0) + { + ds_free_disk[dsid] = ds_location_free_mb; + } + + ds_free_disk[dsid] -= vm_disk_mb; +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ diff --git a/src/scheduler/src/sched/Scheduler.cc b/src/scheduler/src/sched/Scheduler.cc index 9b48af04fc..12593686d2 100644 --- a/src/scheduler/src/sched/Scheduler.cc +++ b/src/scheduler/src/sched/Scheduler.cc @@ -705,21 +705,31 @@ void Scheduler::match_schedule() // Check datastore capacity // ----------------------------------------------------------------- - // TODO: non shared DS - if (ds->test_capacity(vm_disk)) + if (ds->is_shared()) { - vm->add_match_datastore(ds->get_oid()); + if (ds->test_capacity(vm_disk)) + { + vm->add_match_datastore(ds->get_oid()); - n_hosts++; + n_hosts++; + } + else + { + ostringstream oss; + + oss << "VM " << oid << ": Datastore " << ds->get_oid() + << " filtered out. Not enough capacity."; + + NebulaLog::log("SCHED",Log::DEBUG,oss); + } } else { - ostringstream oss; + // All non shared system DS are valid candidates, the + // capacity will be checked later for each host - oss << "VM " << oid << ": Datastore " << ds->get_oid() - << " filtered out. Not enough capacity."; - - NebulaLog::log("SCHED",Log::DEBUG,oss); + vm->add_match_datastore(ds->get_oid()); + n_hosts++; } } @@ -779,6 +789,7 @@ void Scheduler::dispatch() int cpu, mem; long long dsk; int hid, dsid, cid; + bool test_cap_result; unsigned int dispatched_vms = 0; @@ -879,9 +890,33 @@ void Scheduler::dispatch() //-------------------------------------------------------------- // Test datastore capacity, but not for migrations //-------------------------------------------------------------- - if (!vm->is_resched() && ds->test_capacity(dsk) != true) + + if (!vm->is_resched()) { - continue; + if (ds->is_shared()) + { + test_cap_result = ds->test_capacity(dsk); + } + else + { + test_cap_result = host->test_ds_capacity(ds->get_oid(), dsk); + + if (test_cap_result == false) + { + ostringstream oss; + + oss << "VM " << vm->get_oid() << ": Local Datastore " + << ds->get_oid() << " in Host " << host->get_hid() + << " filtered out. Not enough capacity."; + + NebulaLog::log("SCHED",Log::DEBUG,oss); + } + } + + if (test_cap_result != true) + { + continue; + } } //-------------------------------------------------------------- @@ -913,17 +948,22 @@ void Scheduler::dispatch() continue; } - // TODO: non shared DS - // DS capacity is only added for new deployments, not for migrations if (!vm->is_resched()) { - ds->add_capacity(dsk); + if (ds->is_shared()) + { + ds->add_capacity(dsk); + } + else + { + host->add_ds_capacity(ds->get_oid(), dsk); + } } host->add_capacity(cpu,mem); - // TODO update img & system DS free space + // TODO update img DS free space host_vms[hid]++;