1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-02-02 09:47:00 +03:00

Feature #4464: Refresh VM cluster requirements

Cluster requirements are recalculated:

- on release from hold
- on resume from undeployed/stopped
- on resched
- on migrate
This commit is contained in:
Carlos Martín 2016-08-30 18:32:29 +02:00
parent 2f70d56268
commit b9588846ef
8 changed files with 285 additions and 27 deletions

View File

@ -230,6 +230,23 @@ public:
*/
static void cluster_acl_filter(ostringstream& filter,
PoolObjectSQL::ObjectType auth_object, const vector<int>& cids);
/**
* Returns the Datastore Clusters performing a DB query
* @param oid Datastore ID
* @param cluster_ids Will contain the Cluster IDs
* @return 0 on success
*/
int query_datastore_clusters(int oid, set<int> &cluster_ids);
/**
* Returns the VNet Clusters performing a DB query
* @param oid VNet ID
* @param cluster_ids Will contain the Cluster IDs
* @return 0 on success
*/
int query_vnet_clusters(int oid, set<int> &cluster_ids);
private:
/**
* VNC configuration for clusters
@ -244,6 +261,11 @@ private:
{
return new Cluster(-1,"",0, &vnc_conf);
};
int get_clusters_cb(
void * _cluster_ids, int num, char **values, char **names);
};
#endif /*CLUSTER_POOL_H_*/

View File

@ -1213,10 +1213,19 @@ public:
* on the Datastores and VirtualNetworks requested
*
* @param cluster_ids set of Cluster IDs
* @param refresh update the Cluster IDs from the source DS or VNet
* @param error_str Returns the error reason, if any
* @return 0 on success
*/
int get_cluster_requirements(set<int>& cluster_ids, string& error_str);
int get_cluster_requirements(set<int>& cluster_ids, bool refresh, string& error_str);
/**
* Adds automatic placement requirements: Datastore and Cluster
*
* @param error_str Returns the error reason, if any
* @return 0 on success
*/
int automatic_requirements(string& error_str);
/**
* Checks if the resize parameters are valid
@ -2037,14 +2046,6 @@ private:
*/
int parse_requirements(string& error_str);
/**
* Adds automatic placement requirements: Datastore and Cluster
*
* @param error_str Returns the error reason, if any
* @return 0 on success
*/
int automatic_requirements(string& error_str);
/**
* Parse the "GRAPHICS" attribute and generates a default PORT if not
* defined

View File

@ -210,3 +210,68 @@ void ClusterPool::cluster_acl_filter(ostringstream& filter,
filter << fc;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int ClusterPool::query_datastore_clusters(int oid, set<int> &cluster_ids)
{
ostringstream oss;
set_callback(static_cast<Callbackable::Callback>(&ClusterPool::get_clusters_cb),
static_cast<void *>(&cluster_ids));
oss << "SELECT cid FROM " << Cluster::datastore_table << " WHERE oid = " << oid;
int rc = db->exec(oss, this);
unset_callback();
if ( rc != 0 )
{
return -1;
}
return 0;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int ClusterPool::query_vnet_clusters(int oid, set<int> &cluster_ids)
{
ostringstream oss;
set_callback(static_cast<Callbackable::Callback>(&ClusterPool::get_clusters_cb),
static_cast<void *>(&cluster_ids));
oss << "SELECT cid FROM " << Cluster::network_table << " WHERE oid = " << oid;
int rc = db->exec(oss, this);
unset_callback();
if ( rc != 0 )
{
return -1;
}
return 0;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int ClusterPool::get_clusters_cb(
void * _cluster_ids, int num, char **values, char **names)
{
if ( num == 0 || values == 0 || values[0] == 0 )
{
return -1;
}
int cluster_id = atoi(values[0]);
static_cast<set<int>*>(_cluster_ids)->insert(cluster_id);
return 0;
}

View File

@ -522,20 +522,40 @@ int DispatchManager::release(
if (vm->get_state() == VirtualMachine::HOLD)
{
int rc = vm->automatic_requirements(error_str);
if (rc != 0)
{
vmpool->update(vm);
goto error_requirements;
}
vm->set_state(VirtualMachine::PENDING);
vmpool->update(vm);
}
else
{
goto error;
goto error_state;
}
vm->unlock();
return 0;
error:
error_requirements:
oss.str("");
oss << "Could not release VM " << vid
<< ", error updating requirements. " << error_str;
NebulaLog::log("DiM",Log::ERROR,oss);
error_str = oss.str();
vm->unlock();
return -2;
error_state:
oss.str("");
oss << "Could not release VM " << vid
<< ", wrong state " << vm->state_str() << ".";
@ -666,6 +686,15 @@ int DispatchManager::resume(
if (vm->get_state() == VirtualMachine::STOPPED ||
vm->get_state() == VirtualMachine::UNDEPLOYED )
{
int rc = vm->automatic_requirements(error_str);
if (rc != 0)
{
vmpool->update(vm);
goto error_requirements;
}
vm->set_state(VirtualMachine::PENDING);
vmpool->update(vm);
@ -682,14 +711,25 @@ int DispatchManager::resume(
}
else
{
goto error;
goto error_state;
}
vm->unlock();
return 0;
error:
error_requirements:
oss.str("");
oss << "Could not resume VM " << vid
<< ", error updating requirements. " << error_str;
NebulaLog::log("DiM",Log::ERROR,oss);
error_str = oss.str();
vm->unlock();
return -2;
error_state:
oss.str("");
oss << "Could not resume VM " << vid
<< ", wrong state " << vm->state_str() << ".";
@ -787,19 +827,42 @@ int DispatchManager::resched(
(vm->get_lcm_state() == VirtualMachine::RUNNING ||
vm->get_lcm_state() == VirtualMachine::UNKNOWN))
{
if (do_resched)
{
int rc = vm->automatic_requirements(error_str);
if (rc != 0)
{
vmpool->update(vm);
goto error_requirements;
}
}
vm->set_resched(do_resched);
vmpool->update(vm);
}
else
{
goto error;
goto error_state;
}
vm->unlock();
return 0;
error:
error_requirements:
oss.str("");
oss << "Could not set rescheduling flag for VM " << vid
<< ", error updating requirements. " << error_str;
NebulaLog::log("DiM",Log::ERROR,oss);
error_str = oss.str();
vm->unlock();
return -2;
error_state:
oss.str("");
oss << "Could not set rescheduling flag for VM " << vid
<< ", wrong state " << vm->state_str() << ".";
@ -1028,6 +1091,8 @@ int DispatchManager::delete_recreate(VirtualMachine * vm, string& error)
vmpool->update_history(vm);
}
// Automatic requirements are not recalculated on purpose
vm->set_state(VirtualMachine::LCM_INIT);
vm->set_state(VirtualMachine::PENDING);

View File

@ -304,6 +304,8 @@ void DispatchManager::resubmit_action(int vid)
if (vm->get_lcm_state() == VirtualMachine::CLEANUP_RESUBMIT)
{
// Automatic requirements are not recalculated on purpose
vm->set_state(VirtualMachine::LCM_INIT);
vm->set_state(VirtualMachine::PENDING);

View File

@ -2188,6 +2188,8 @@ void LifeCycleManager::disk_lock_success(int vid)
}
else
{
// Automatic requirements are not recalculated on purpose
vm->set_state(VirtualMachine::PENDING);
}
}

View File

@ -979,6 +979,7 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
DispatchManager * dm = nd.get_dm();
DatastorePool * dspool = nd.get_dspool();
VirtualMachinePool * vmpool = nd.get_vmpool();
VirtualMachine * vm;
string hostname;
@ -995,7 +996,7 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
bool c_is_public_cloud;
set<int> cluster_ids;
string tmp_str;
string error_str;
bool auth = false;
bool ds_migr;
@ -1156,7 +1157,20 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
return;
}
vm->get_cluster_requirements(cluster_ids, tmp_str);
int rc = vm->automatic_requirements(error_str);
vmpool->update(vm);
if (rc != 0)
{
vm->unlock();
att.resp_msg = error_str;
failure_response(ACTION, att);
return;
}
vm->get_cluster_requirements(cluster_ids, false, error_str);
vm->unlock();

View File

@ -1475,20 +1475,84 @@ static int check_and_set_cluster_id(
/* ------------------------------------------------------------------------ */
int VirtualMachine::get_cluster_requirements(set<int>& cluster_ids, string& error_str)
void update_os_file(VectorAttribute * os,
const string& base_name)
{
ClusterPool * clpool = Nebula::instance().get_clpool();
int ds_id;
set<int> cluster_ids;
string base_name_ds_id = base_name + "_DS_DSID";
string base_name_cluster = base_name + "_DS_CLUSTER_ID";
if (os->vector_value(base_name_ds_id, ds_id) != 0)
{
return;
}
clpool->query_datastore_clusters(ds_id, cluster_ids);
os->replace(base_name_cluster, one_util::join(cluster_ids, ','));
}
/* ------------------------------------------------------------------------ */
void update_disk_cluster_id(VectorAttribute* disk)
{
ClusterPool * clpool = Nebula::instance().get_clpool();
int ds_id;
set<int> cluster_ids;
if (disk->vector_value("DATASTORE_ID", ds_id) != 0)
{
return;
}
clpool->query_datastore_clusters(ds_id, cluster_ids);
disk->replace("CLUSTER_ID", one_util::join(cluster_ids, ','));
}
/* ------------------------------------------------------------------------ */
void update_nic_cluster_id(VectorAttribute* nic)
{
ClusterPool * clpool = Nebula::instance().get_clpool();
int vn_id;
set<int> cluster_ids;
if (nic->vector_value("NETWORK_ID", vn_id) != 0)
{
return;
}
clpool->query_vnet_clusters(vn_id, cluster_ids);
nic->replace("CLUSTER_ID", one_util::join(cluster_ids, ','));
}
/* ------------------------------------------------------------------------ */
int VirtualMachine::get_cluster_requirements(
set<int>& cluster_ids, bool refresh, string& error_str)
{
ostringstream oss;
int num_vatts;
vector<const VectorAttribute *> vatts;
vector<VectorAttribute*> vatts;
int incomp_id;
int rc;
// Get cluster id from the KERNEL and INITRD (FILE Datastores)
const VectorAttribute * osatt = obj_template->get("OS");
VectorAttribute * osatt = obj_template->get("OS");
if ( osatt != 0 )
{
if (refresh)
{
update_os_file(osatt, "KERNEL");
}
rc = check_and_set_cluster_id("KERNEL_DS_CLUSTER_ID", osatt, cluster_ids);
if ( rc != 0 )
@ -1496,6 +1560,11 @@ int VirtualMachine::get_cluster_requirements(set<int>& cluster_ids, string& erro
goto error_kernel;
}
if (refresh)
{
update_os_file(osatt, "INITRD");
}
rc = check_and_set_cluster_id("INITRD_DS_CLUSTER_ID", osatt, cluster_ids);
if ( rc != 0 )
@ -1509,6 +1578,11 @@ int VirtualMachine::get_cluster_requirements(set<int>& cluster_ids, string& erro
for(int i=0; i<num_vatts; i++)
{
if (refresh)
{
update_disk_cluster_id(vatts[i]);
}
rc = check_and_set_cluster_id("CLUSTER_ID", vatts[i], cluster_ids);
if ( rc != 0 )
@ -1525,6 +1599,11 @@ int VirtualMachine::get_cluster_requirements(set<int>& cluster_ids, string& erro
for(int i=0; i<num_vatts; i++)
{
if (refresh)
{
update_nic_cluster_id(vatts[i]);
}
rc = check_and_set_cluster_id("CLUSTER_ID", vatts[i], cluster_ids);
if ( rc != 0 )
@ -1546,6 +1625,11 @@ int VirtualMachine::get_cluster_requirements(set<int>& cluster_ids, string& erro
continue;
}
if (refresh)
{
update_nic_cluster_id(vatts[i]);
}
rc = check_and_set_cluster_id("CLUSTER_ID", vatts[i], cluster_ids);
if ( rc != 0 )
@ -1561,8 +1645,8 @@ error_disk:
if (rc == -1)
{
oss << "Incompatible clusters in DISK. Datastore for DISK "<< incomp_id
<< " is not the same as the one used by other VM elements (cluster "
<< one_util::join(cluster_ids, ',') << ")";
<< " is not in the same cluster as the one used by other VM elements "
<< "(cluster " << one_util::join(cluster_ids, ',') << ")";
}
else
{
@ -1602,8 +1686,8 @@ error_nic:
if (rc == -1)
{
oss << "Incompatible clusters in NIC. Network for NIC "<< incomp_id
<< " is not the same as the one used by other VM elements (cluster "
<< one_util::join(cluster_ids, ',') << ")";
<< " is not in the same cluster as the one used by other VM elements "
<< "(cluster " << one_util::join(cluster_ids, ',') << ")";
}
else
{
@ -1617,8 +1701,8 @@ error_pci:
if (rc == -1)
{
oss << "Incompatible clusters in PCI (TYPE=NIC). Network for PCI "<< incomp_id
<< " is not the same as the one used by other VM elements (cluster "
<< one_util::join(cluster_ids, ',') << ")";
<< " is not in the same cluster as the one used by other VM elements "
<< "(cluster " << one_util::join(cluster_ids, ',') << ")";
}
else
{
@ -1643,7 +1727,10 @@ int VirtualMachine::automatic_requirements(string& error_str)
set<int> cluster_ids;
set<string> clouds;
int rc = get_cluster_requirements(cluster_ids, error_str);
obj_template->erase("AUTOMATIC_REQUIREMENTS");
obj_template->erase("AUTOMATIC_DS_REQUIREMENTS");
int rc = get_cluster_requirements(cluster_ids, true, error_str);
if (rc != 0)
{