1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-08-14 05:49:26 +03:00

F #1550: New Scheduler Framework for OpenNebula & DRS scheduler

This commits introduces a new framework for OpenNebula, it is based on
standard the OpenNebula middleware access driver (mad) model. Benefits
compared to the current implementation:

* Less overhead on API server and streamlined communications
* Ease integration with third party schedulers
* New "optimize" action to balance the workload of a cluster
* Embedded Plan Manager to apply optimization and deployment plans with
  custom parameters for the scheduling window
* Custom optimization policies per cluster

This commits introduces 2 schedulers:

* Rank scheduler based on the match-making algorithm present in 6.x
  series
* OpenNebula DRS an optimization algorithm based on an ILP solver for
  placement and cluster optimization

Also new API calls has been added for onecluster as well as the CLI
updates to expose them.

co-authored-by: MarioRobres <mrobres@opennebula.io>
co-authored-by: Pavel Czerny <pczerny@opennebula.io>
co-authored-by: Mirko Stojiljkovic <mstojiljkovic@opennebula.io>
co-authored-by: Valentyn Bohdan <vbohdan@opennebula.io>
This commit is contained in:
Ruben S. Montero
2025-02-27 16:33:05 +01:00
parent 74313b9f5a
commit eda55567b1
175 changed files with 13465 additions and 4456 deletions

5
.gitignore vendored
View File

@ -10,8 +10,6 @@ share/install_gems/Gemfile.lock
share/man/*
!share/man/build.sh
src/im_mad/collectd/collectd
src/scheduler/src/sched/.xmlrpc_test/
src/scheduler/src/sched/mm_sched
src/sunstone/locale/languages/*.js
src/mad/utils/tty_expect
src/nebula/.xmlrpc_test/
@ -22,6 +20,9 @@ src/svncterm_server/genfont
src/svncterm_server/svncterm_server
src/im_mad/remotes/status.db
src/schedm_mad/remotes/rank/src/sched/place
src/schedm_mad/remotes/one_drs/vendor/lib/
*.class
src/oca/java/bin
src/oca/java/jar

View File

@ -111,7 +111,8 @@ main_env.Append(LIBPATH=[
cwd+'/src/ipamm',
cwd+'/src/data_model',
cwd+'/src/protocol',
cwd+'/src/sam'
cwd+'/src/sam',
cwd+'/src/schedm'
])
# Compile flags
@ -291,8 +292,6 @@ else:
main_env.Replace(mysql='yes')
shutil.rmtree('.xmlrpc_test', True)
shutil.rmtree('src/nebula/.xmlrpc_test', True)
shutil.rmtree('src/scheduler/.xmlrpc_test', True)
# libxml2
main_env.ParseConfig('xml2-config --libs --cflags')
@ -323,7 +322,7 @@ build_scripts = [
'src/im/SConstruct',
'src/image/SConstruct',
'src/dm/SConstruct',
'src/scheduler/SConstruct',
'src/schedm_mad/remotes/rank/SConstruct',
'src/vnm/SConstruct',
'src/vn_template/SConstruct',
'src/hm/SConstruct',
@ -347,6 +346,7 @@ build_scripts = [
'src/onedb/SConstruct',
'src/protocol/SConstruct',
'src/sam/SConstruct',
'src/schedm/SConstruct',
svncterm_path,
'share/context/SConstruct'
]

View File

@ -22,7 +22,6 @@
#include "BitMap.h"
#include "ObjectCollection.h"
/**
* The Cluster class.
*/
@ -79,6 +78,34 @@ public:
get_template_attribute("RESERVED_MEM", mem);
}
/**
* Sets an error message in the Cluster template:
* @param mod OpenNebula module to log big error messages
* @param nam of the error message attribute
* @param msg
*/
void set_error_message(const char *mod, const std::string& nam, const std::string& msg);
// *************************************************************************
// Plan and ONE DRS related attributes
// *************************************************************************
/**
* Load Cluster Plan from DB
*/
void load_plan();
/**
* @return if plans needs to be autoapplied
*/
bool is_autoapply()
{
bool aaply = false;
obj_template->get("ONEDRS_AUTOAPPLY", aaply);
return aaply;
}
// *************************************************************************
// DataBase implementation (Public)
// *************************************************************************
@ -151,6 +178,8 @@ private:
BitMap<65536> vnc_bitmap;
std::string plan_xml;
// *************************************************************************
// DataBase implementation (Private)
// *************************************************************************

View File

@ -21,7 +21,6 @@
#include "PoolSQL.h"
#include "OneDB.h"
class ClusterPool : public PoolSQL
{
public:

View File

@ -72,7 +72,7 @@ public:
enum DatastoreState
{
READY = 0, /** < Datastore ready to use */
DISABLED = 1 /** < System Datastore can not be used */
DISABLED = 1 /** < System Datastore cannot be used */
};
/**
@ -223,6 +223,15 @@ public:
*/
bool get_avail_mb(long long &avail) const;
/**
* @return true if the datastored has been monitored
*/
bool is_monitored()
{
return (free_mb != 0 || total_mb != 0 || used_mb != 0);
}
/**
* Returns true if the DS contains the SHARED = YES attribute
* @return true if the DS is shared
@ -250,6 +259,14 @@ public:
*/
bool is_concurrent_forget() const;
/**
* @return true if the datastore is enabled (only for system ds)
*/
bool is_enabled() const
{
return state == READY;
};
/**
* Enable or disable the DS. Only for System DS.
* @param enable true to enable

View File

@ -42,7 +42,6 @@ public:
* A call to the start() method is needed to start the driver
* @param c the command to execute the driver
* @param a the arguments for the command
* @param th true to execute driver action in a thread
* @param ct max number of concurrent threads
*/
Driver(const std::string& c, const std::string& a, int ct)

View File

@ -348,8 +348,39 @@ public:
*/
void load_monitoring();
/**
* Return host montioring information
*/
const HostMonitoringTemplate& get_monitoring() const
{
return monitoring;
}
void update_zombies(const std::set<int>& ids);
/**
* Search the Object for a given attribute in a set of object specific
* routes.
* @param name of the attribute
* @param value of the attribute
*
* @return -1 if the element was not found
*/
int search(const char *name, std::string& value) override
{
return __search(name, value);
}
int search(const char *name, int& value) override
{
return __search(name, value);
}
int search(const char *name, float& value) override
{
return __search(name, value);
}
private:
friend class HostPool;
@ -401,6 +432,45 @@ private:
void update_wilds();
/* ---------------------------------------------------------------------- */
/* Functions to search for values in the HostXML object */
/* ---------------------------------------------------------------------- */
/**
* Search the Object for a given attribute in a set of object specific
* routes. Override ObjectXML function to deal with pseudo-attributes
* - CURRENT_VMS. value is the VM ID to search in the set of VMS
* running VMs in the host. If the VM_ID is found value is not modified
* otherwise is set to -1
*/
template<typename T>
int __search(const char *name, T& value)
{
std::string s_name(name);
if (s_name == "CURRENT_VMS")
{
std::vector<T> results;
xpaths(results, "/HOST/VMS/ID");
for (const auto& vm_id : results)
{
if (vm_id == value)
{
return 0; //VMID found in VMS value is VMID
}
}
value = -1; //VMID not found in VMS value is -1
return 0;
}
else
{
return ObjectXML::search(name, value);
}
}
// *************************************************************************
// DataBase implementation (Private)
// *************************************************************************

View File

@ -19,7 +19,7 @@
#include "Template.h"
class HostShareCapacity;
struct HostShareCapacity;
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */

View File

@ -1,124 +0,0 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2024, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#include <curl/curl.h>
#include <string>
/* Class for sedning Http requests and receiving responses.
* Note: Very simple implementation:
* - no consistency check
* - no checks if the response is in correct format
* - almost no error handling
*/
class HttpRequest
{
public:
HttpRequest();
~HttpRequest();
/** Send a POST request, receive response as JSON
* @param url Address
* @param data Data to send in json format
* @param response In case of success full response from server in json format,
* the caller should extract the data from { json : {...} }.
* Contains error string in case of failure.
* @return 0 on success, -1 otherwise
*/
int post_json(const std::string& url, const std::string& data, std::string& response);
int post_json(const std::string& data, std::string& response)
{
return post_json(_url, data, response);
}
/** Send a GET request, receive response as JSON
* @param url Address
* @param response In case of success full response from server in json format,
* the caller should extract the data from { json : {...} }.
* Contains error string in case of failure.
* @return 0 on success, -1 otherwise
*/
int get_json(const std::string& url, std::string& response);
int get_json(std::string& response)
{
return get_json(_url, response);
}
bool is_initialized() const
{
return !_url.empty();
}
/**
* Set server url adress, in the form "scheme://host:port/path "
*/
void set_server(const std::string& url)
{
_url = url;
}
/**
* Set maximum time in seconds that transfer operation can take.
* 0 -> Use curl default value
* See curl CURLOPT_TIMEOUT for more info
*/
void set_timeout(long timeout)
{
_timeout = timeout;
}
/**
* Set proxy server, including protocol and port. Example "http://example.com:1234"
* See curl CURLOPT_PROXY for more info
*/
void set_proxy(const std::string& proxy)
{
_proxy = proxy;
}
private:
/**
* Callback method for writing response of curl operation to string
* See curl CURLOPT_WRITEFUNCTION for more info
*/
static size_t write_to_string(void *ptr, size_t size, size_t count, void *str);
/**
* Check curl response, eventually set error message
* @param curl Pointer to curl handle
* @param error Contains error message if any
* @return 0 on success, -1 otherwise
*/
static int check_http_code(CURL* curl, std::string& msg);
/**
* Server url adress
*/
std::string _url;
/**
* Maximum time in seconds that transfer operation can take.
* 0 -> Use curl default value
*/
long _timeout = 0;
/**
* Curl proxy server, including the protocol and port.
*/
std::string _proxy;
};

View File

@ -45,8 +45,8 @@
*
*/
template<typename E, //Enum class for the message types
bool encode = false, //Payload is base64 encoded
bool compress = false, //Payload is compressed
bool encode = false, //Payload is base64 encoded
bool encrypt = false, //Payload is encrypted
bool has_timestamp = false> //Message includes timestamp
class Message
@ -236,6 +236,8 @@ int Message<E, compress, encode, encrypt, has_timestamp>
is >> _timestamp >> std::ws;
}
is.clear(); //Clear failbit
getline(is, buffer);
if (buffer.empty())

View File

@ -39,6 +39,7 @@ class HostPool;
class ImagePool;
class MarketPlacePool;
class MarketPlaceAppPool;
class PlanPool;
class ScheduledActionPool;
class SecurityGroupPool;
class VdcPool;
@ -61,11 +62,13 @@ class InformationManager;
class IPAMManager;
class LifeCycleManager;
class MarketPlaceManager;
class PlanManager;
class RaftManager;
class RequestManager;
class ScheduledActionManager;
class TransferManager;
class VirtualMachineManager;
class SchedulerManager;
/**
* This is the main class for the OpenNebula daemon oned. It stores references
@ -194,6 +197,11 @@ public:
return sapool;
}
PlanPool * get_planpool() const
{
return plpool;
}
// --------------------------------------------------------------
// Manager Accessors
// --------------------------------------------------------------
@ -278,6 +286,16 @@ public:
return sam;
}
SchedulerManager * get_sm() const
{
return sm;
}
PlanManager * get_planm() const
{
return planm;
}
// --------------------------------------------------------------
// Environment & Configuration
// --------------------------------------------------------------
@ -636,10 +654,10 @@ public:
, vmpool(0), hpool(0), vnpool(0), upool(0), ipool(0), gpool(0), tpool(0)
, dspool(0), clpool(0), docpool(0), zonepool(0), secgrouppool(0)
, vdcpool(0), vrouterpool(0), marketpool(0), apppool(0), vmgrouppool(0)
, vntpool(0), hkpool(0), bjpool(0), sapool(0)
, vntpool(0), hkpool(0), bjpool(0), sapool(0), plpool(0)
, lcm(0), vmm(0), im(0), tm(0), dm(0), rm(0), hm(0)
, hl(0), authm(0), aclm(0), imagem(0), marketm(0), ipamm(0), raftm(0), frm(0)
, sam(0)
, sam(0), sm(0), planm(0)
{
};
@ -713,6 +731,7 @@ private:
HookPool * hkpool;
BackupJobPool * bjpool;
ScheduledActionPool* sapool;
PlanPool * plpool;
// ---------------------------------------------------------------
// Nebula Managers
@ -734,6 +753,8 @@ private:
RaftManager * raftm;
FedReplicaManager * frm;
ScheduledActionManager *sam;
SchedulerManager * sm;
PlanManager * planm;
// ---------------------------------------------------------------
// Implementation functions

View File

@ -37,12 +37,7 @@ public:
// ---------------------- Constructors ------------------------------------
ObjectXML()
: paths(nullptr)
, num_paths(0)
, xml(nullptr)
, ctx(nullptr)
{}
ObjectXML(): xml(nullptr) , ctx(nullptr) {};
/**
* Constructs an object using a XML document
@ -211,7 +206,7 @@ public:
{
xpaths(results, name);
}
else if (num_paths == 0)
else if (paths.empty())
{
results.clear();
}
@ -221,7 +216,7 @@ public:
xpath << paths[0] << name;
for (int i = 1; i < num_paths ; i++)
for (size_t i = 1; i < paths.size() ; i++)
{
xpath << '|' << paths[i] << name;
}
@ -375,12 +370,7 @@ protected:
/**
* Array of paths to look for attributes in search methods
*/
const char **paths;
/**
* Number of elements in paths array
*/
int num_paths;
std::vector<std::string> paths;
private:
/**

View File

@ -87,6 +87,13 @@ namespace one_db
extern const char * cluster_bitmap_table;
/* ---------------------------------------------------------------------- */
/* Plan tables */
/* ---------------------------------------------------------------------- */
extern const char * plan_table;
extern const char * plan_db_names;
extern const char * plan_db_bootstrap;
/* ---------------------------------------------------------------------- */
/* ACL tables */
/* ---------------------------------------------------------------------- */

294
include/Plan.h Normal file
View File

@ -0,0 +1,294 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2024, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#ifndef PLAN_H_
#define PLAN_H_
#include "ObjectSQL.h"
#include "ObjectXML.h"
// State of the plan and also state of individual action
enum PlanState
{
NONE = -1,
READY = 0,
APPLYING = 1,
DONE = 2,
ERROR = 3,
TIMEOUT = 4
};
class PlanAction
{
public:
std::string to_xml() const;
int from_xml_node(const xmlNodePtr node);
PlanState state() const
{
return _state;
}
void state(PlanState state)
{
_state = state;
}
int vm_id() const
{
return _vm_id;
}
int host_id() const
{
return _host_id;
}
int ds_id() const
{
return _ds_id;
}
const std::string& operation() const
{
return _operation;
}
const std::vector<std::pair<int, int>>& nics() const
{
return _nics;
}
time_t timestamp() const
{
return _timestamp;
}
void timestamp(time_t t)
{
_timestamp = t;
}
private:
std::string _operation;
PlanState _state = PlanState::READY;
int _vm_id = -1;
int _host_id = -1;
int _ds_id = -1;
time_t _timestamp = 0;
// Nics in the format <nic_id, network_id>
std::vector<std::pair<int, int>> _nics;
};
/**
* The Plan class,
* It represents planned VM deployment or optimization actions
*/
class Plan : public ObjectSQL, public ObjectXML
{
public:
Plan() = default;
/**
* Constructor for the Plan.
* @param cid Cluster ID of associated cluster. -1 for global plan
*/
Plan(int cid):_cid(cid) {};
~Plan()
{
if (!ro && _mutex != nullptr)
{
_mutex->unlock();
}
};
/**
* Print the Plan into a string in XML format.
* @return generated string
*/
std::string to_xml() const;
/**
* Rebuilds the object from an xml formatted string
* @param xml_str The xml-formatted string
*
* @return 0 on success, -1 otherwise
*/
int from_xml(const std::string& xml);
int cid() const
{
return _cid;
}
PlanState state() const
{
return _state;
}
void state(PlanState state)
{
_state = state;
}
void clear()
{
_state = PlanState::NONE;
_actions.clear();
}
/**
* Check if plan is completed and set appropriate state:
* DONE, TIMEOUT or ERROR depending on the state of the actions
*/
void check_completed();
// -------------------------------------------------------------------------
// Plan Actions
// -------------------------------------------------------------------------
const std::vector<PlanAction>& actions() const
{
return _actions;
}
PlanAction* get_next_action();
/**
* Mark action as finished, return false if the action is not in the plan
*/
bool action_finished(int vid, PlanState state);
/**
* Set the state of actions to TIMEOUT if they exceed the specified timeout.
*
* @param timeout The timeout value in seconds.
*/
void timeout_actions(int timeout);
/**
* Count the number of actions per host and cluster
*
* @param cluster_actions Number of actions in the cluster
* @param host_actions Map of host_id to number of actions
*/
void count_actions(int &cluster_actions, std::map<int, int>& host_actions);
private:
friend class PlanPool;
/**
* ID of the Plan, same as the associated Cluster ID. -1 is for placement
* plans
*/
int _cid = -1;
/**
* Plan state
*/
PlanState _state = PlanState::NONE;
/**
* List of actions which should be exectuted
*/
std::vector<PlanAction> _actions;
/**
* The mutex for the PoolObject. This implementation assumes that the mutex
* IS LOCKED when the class destructor is called.
*/
std::mutex * _mutex = nullptr;
/**
* Attribute for check if is a read only object
*/
bool ro = false;
/**
* Rebuilds the internal attributes using xpath
*/
int rebuild_attributes();
// -------------------------------------------------------------------------
// Database Interface
// -------------------------------------------------------------------------
/**
* Bootstraps the database table(s) associated to the Cluster
* @return 0 on success
*/
static int bootstrap(SqlDB * db);
/**
* Callback function to unmarshall a Plan object
* @param num the number of columns read from the DB
* @para names the column names
* @para vaues the column values
* @return 0 on success
*/
int select_cb(void *nil, int num, char **values, char **names);
/**
* Reads the Plan (identified with its _cid) from the database.
* @param db pointer to the db
* @return 0 on success
*/
int select(SqlDB * db) override;
/**
* Writes the Plan in the database.
* @param db pointer to the db
* @return 0 on success
*/
int insert(SqlDB * db, std::string& error_str) override
{
error_str.clear();
return insert_replace(db, false);
}
/**
* Updates the Plan in the database.
* @param db pointer to the db
* @return 0 on success
*/
int update(SqlDB * db) override
{
return insert_replace(db, true);
}
int insert_replace(SqlDB *db, bool replace);
/**
* Removes the Plan from the database.
* @param db pointer to the db
* @return 0 on success
*/
int drop(SqlDB * db) override;
};
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
#endif /*PLAN_H_*/

102
include/PlanManager.h Normal file
View File

@ -0,0 +1,102 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2024, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#ifndef PLAN_MANAGER_H_
#define PLAN_MANAGER_H_
#include "Listener.h"
#include "Plan.h"
#include "OneDB.h"
#include <vector>
class ClusterPool;
class PlanPool;
class VirtualMachinePool;
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
class PlanManager
{
public:
PlanManager(time_t timer,
int _max_actions_per_host,
int _max_actions_per_cluster,
int _live_resched,
int _cold_migrate_mode,
int _timeout);
void finalize();
void add_plan(const std::string& xml);
/**
* Start applying plan actions
*
* @param cid Cluster ID
* @return 0 on success, -1 otherwise
*/
int start_plan(int cid, std::string& error);
void action_success(int cid, int vid)
{
action_finished(cid, vid, PlanState::DONE);
}
void action_failure(int cid, int vid)
{
action_finished(cid, vid, PlanState::ERROR);
}
private:
/**
* Timer to periodically checks and starts the placement actions.
*/
Timer timer_thread;
ClusterPool *cluster_pool;
PlanPool *plan_pool;
VirtualMachinePool *vm_pool;
/**
* Dispatch Options
*/
int max_actions_per_host;
int max_actions_per_cluster;
bool live_resched;
int cold_migrate_mode;
int action_timeout;
/**
* Periodically called method to start the placement actions.
*/
void timer_action();
bool start_action(PlanAction& action);
void action_finished(int cid, int vid, PlanState state);
void execute_plan(Plan& plan);
void execute_plans();
};
#endif /*PLAN_MANAGER_H*/

109
include/PlanPool.h Normal file
View File

@ -0,0 +1,109 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2024, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#ifndef PLAN_POOL_H_
#define PLAN_POOL_H_
#include "PoolSQLCache.h"
#include "Plan.h"
class SqlDB;
class PlanPool
{
public:
PlanPool(SqlDB * _db);
/**
* Gets an object from the pool (if needed the object is loaded from the
* database). The object is locked, other threads can't access the same
* object. The lock is released by destructor.
* @param oid the object unique identifier
* @return a pointer to the object, nullptr in case of failure
*/
std::unique_ptr<Plan> get(int id);
/**
* Gets a read only object from the pool (if needed the object is loaded from the
* database). No object lock, other threads may work with the same object.
* @param oid the object unique identifier
* @return a pointer to the object, nullptr in case of failure
*/
std::unique_ptr<Plan> get_ro(int id);
/**
* Bootstraps the database table(s) associated to the Hook pool
* @return 0 on success
*/
static int bootstrap(SqlDB *_db)
{
return Plan::bootstrap(_db);
};
/**
* Updates the plan in the database.
* @param plan pointer to the Plan object
* @return 0 on success
*/
int update(Plan* plan)
{
return plan->update(db);
}
/**
* Drops the object's data in the data base. The object mutex SHOULD be
* locked.
* @param objsql a pointer to the object
* @return 0 on success, -1 DB error
*/
int drop(Plan* plan)
{
return plan->drop(db);
}
/**
* Get id of plans ready to be executed
* @return a vector with the active plan IDs
*/
std::vector<int> get_active_plans() const;
private:
/**
* The mutex for the plan. This implementation assumes that the mutex
* IS LOCKED when the class destructor is called.
*/
std::mutex _mutex;
/**
* Pointer to the database.
*/
SqlDB * db;
/**
* Tablename for this pool
*/
std::string table;
/**
* The pool cache is implemented with a Map of SQL object pointers,
* using the OID as key.
*/
PoolSQLCache cache;
};
#endif // PLAN_POOL_H_

View File

@ -110,6 +110,7 @@ public:
case VNTEMPLATE: return "VNTEMPLATE"; break;
case HOOK: return "HOOK"; break;
case BACKUPJOB: return "BACKUPJOB"; break;
case SCHEDULEDACTION:return "SCHEDULEDACTION"; break;
default: return "";
}
};
@ -137,6 +138,7 @@ public:
else if ( type == "VNTEMPLATE" ) return VNTEMPLATE;
else if ( type == "HOOK" ) return HOOK;
else if ( type == "BACKUPJOB" ) return BACKUPJOB;
else if ( type == "SCHEDULEDACTION" )return SCHEDULEDACTION;
else return NONE;
};

View File

@ -183,9 +183,8 @@ enum class VMManagerMessages : unsigned short int
using vm_msg_t = Message<VMManagerMessages, false, false, false, false>;
/**
* Virtual Machine Driver messages
* Hook Manager Driver messages
*/
enum class HookManagerMessages : unsigned short int
{
@ -200,5 +199,20 @@ enum class HookManagerMessages : unsigned short int
using hook_msg_t = Message<HookManagerMessages, false, false, false, false>;
/**
* Hook Manager Driver messages
*/
enum class SchedulerManagerMessages : unsigned short int
{
UNDEFINED = 0,
INIT,
FINALIZE,
PLACE,
OPTIMIZE,
LOG,
ENUM_MAX
};
using scheduler_msg_t = Message<SchedulerManagerMessages, false, true, false, false>;
#endif /*PROTOCOL_MESSAGES_H*/

View File

@ -23,6 +23,9 @@
#include "DatastorePool.h"
#include "VirtualNetworkPool.h"
class PlanPool;
class PlanManager;
/* ------------------------------------------------------------------------- */
/* ------------------------------------------------------------------------- */
/* ------------------------------------------------------------------------- */
@ -355,8 +358,77 @@ public:
}
};
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
/* ------------------------------------------------------------------------- */
/* ------------------------------------------------------------------------- */
class RequestManagerPlan: public Request
{
protected:
RequestManagerPlan(const std::string& method_name,
const std::string& help,
const std::string& params)
: Request(method_name, params, help)
{
Nebula& nd = Nebula::instance();
clpool = nd.get_clpool();
plpool = nd.get_planpool();
planm = nd.get_planm();
auth_object = PoolObjectSQL::CLUSTER;
auth_op = AuthRequest::ADMIN;
}
ClusterPool *clpool;
PlanPool *plpool;
PlanManager *planm;
};
/* ------------------------------------------------------------------------- */
/* ------------------------------------------------------------------------- */
class ClusterOptimize : public RequestManagerPlan
{
public:
ClusterOptimize()
: RequestManagerPlan("one.cluster.optimize",
"Create an optimization plan for Cluster",
"A:si")
{}
void request_execute(xmlrpc_c::paramList const& paramList,
RequestAttributes& att) override;
};
/* ------------------------------------------------------------------------- */
/* ------------------------------------------------------------------------- */
class ClusterPlanExecute : public RequestManagerPlan
{
public:
ClusterPlanExecute()
: RequestManagerPlan("one.cluster.planexecute",
"Start execution of optimization plan",
"A:si")
{}
void request_execute(xmlrpc_c::paramList const& paramList,
RequestAttributes& att) override;
};
/* ------------------------------------------------------------------------- */
/* ------------------------------------------------------------------------- */
class ClusterPlanDelete : public RequestManagerPlan
{
public:
ClusterPlanDelete()
: RequestManagerPlan("one.cluster.plandelete",
"Deletes an optimization plan",
"A:si")
{}
void request_execute(xmlrpc_c::paramList const& paramList,
RequestAttributes& att) override;
};
#endif

View File

@ -69,7 +69,7 @@ protected:
object->to_xml(str);
};
virtual void load_monitoring(PoolObjectSQL *obj) const
virtual void load_extended_data(PoolObjectSQL *obj) const
{
}
};
@ -98,7 +98,7 @@ protected:
static_cast<VirtualMachine *>(object)->to_xml_extended(str);
};
void load_monitoring(PoolObjectSQL *obj) const override
void load_extended_data(PoolObjectSQL *obj) const override
{
static_cast<VirtualMachine*>(obj)->load_monitoring();
}
@ -205,7 +205,7 @@ public:
};
protected:
void load_monitoring(PoolObjectSQL *obj) const override
void load_extended_data(PoolObjectSQL *obj) const override
{
static_cast<Host*>(obj)->load_monitoring();
}
@ -291,6 +291,11 @@ public:
pool = nd.get_clpool();
auth_object = PoolObjectSQL::CLUSTER;
};
void load_extended_data(PoolObjectSQL *obj) const override
{
static_cast<Cluster*>(obj)->load_plan();
}
};
/* ------------------------------------------------------------------------- */

View File

@ -69,7 +69,7 @@ protected:
RequestAttributes& att,
PoolObjectAuth& vm_perms);
int get_host_information(
ErrorCode get_host_information(
int hid,
std::string& name,
std::string& vmm,
@ -78,14 +78,14 @@ protected:
PoolObjectAuth& host_perms,
RequestAttributes& att);
int get_ds_information(
ErrorCode get_ds_information(
int ds_id,
std::set<int>& ds_cluster_ids,
std::string& tm_mad,
RequestAttributes& att,
bool& ds_migr);
int get_default_ds_information(
ErrorCode get_default_ds_information(
int cluster_id,
int& ds_id,
std::string& tm_mad,
@ -148,6 +148,13 @@ public:
~VirtualMachineDeploy() = default;
ErrorCode request_execute(RequestAttributes& att,
int vid,
int hid,
bool enforce,
int ds_id,
const std::string& str_tmpl);
protected:
void request_execute(xmlrpc_c::paramList const& _paramList,
RequestAttributes& att) override;
@ -169,6 +176,14 @@ public:
~VirtualMachineMigrate() = default;
ErrorCode request_execute(RequestAttributes& att,
int vid,
int hid,
bool live,
bool enforce,
int ds_id,
int poweroff);
protected:
void request_execute(xmlrpc_c::paramList const& _paramList,
RequestAttributes& att) override;

152
include/SchedulerManager.h Normal file
View File

@ -0,0 +1,152 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2024, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#ifndef SCHEDULER_MANAGER_H_
#define SCHEDULER_MANAGER_H_
#include "ProtocolMessages.h"
#include "DriverManager.h"
#include "Listener.h"
#include "SchedulerManagerDriver.h"
#include <vector>
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
// TODO Make this compatible with HA configuration: Do not generate place/optimize
// request if we are not the leader.
class SchedulerManager :
public DriverManager<SchedulerManagerDriver>,
public Listener
{
public:
SchedulerManager( time_t wtime, unsigned int wlength, time_t retry,
const std::string& mad_location);
virtual ~SchedulerManager() = default;
/**
* This functions starts the associated listener thread, and creates a
* new thread for the Scheduler Manager. This thread will wait in
* an action loop till it receives ACTION_FINALIZE.
* @return 0 on success.
*/
int start();
/**
* Loads Scheduler Manager driver defined in configuration file
* @param _mads configuration of drivers
*/
int load_drivers(const std::vector<const VectorAttribute*>& _mads);
/**
* Send a placement request to the scheduler
*/
void trigger_place();
/**
* Send an optization request for a cluster
* @param cluster_id of the cluster to load balance
*/
void trigger_optimize(int cluster_id);
private:
/**
* The timer action will periodically will check placement requests.
* This thread will recover schedule windows from "missing events" or VMs
* waiting for resources.
*/
constexpr static time_t timer_period = 5;
std::unique_ptr<Timer> timer_thread;
std::mutex wnd_mtx;
time_t wnd_start;
time_t wnd_length;
time_t last_place;
time_t max_wnd_time;
time_t max_wnd_length;
time_t retry_time;
/**
* Generic name for the Scheduler driver
*/
static constexpr const char * driver_name = "sched_exe";
/**
* Returns a pointer to a the Scheduler Manager driver. The driver is
* searched by its name and owned by oneadmin with uid=0.
* @param name of the driver
* @return the Scheduler driver owned by uid 0
* name or 0 in not found
*/
const SchedulerManagerDriver * get() const
{
return DriverManager::get_driver(driver_name);
}
// -------------------------------------------------------------------------
// Protocol implementation to process scheduler messages
// -------------------------------------------------------------------------
/**
*
*/
static void _undefined(std::unique_ptr<scheduler_msg_t> msg);
/**
*
*/
void _place(std::unique_ptr<scheduler_msg_t> msg);
/**
*
*/
void _optimize(std::unique_ptr<scheduler_msg_t> msg);
/**
*
*/
static void _log(std::unique_ptr<scheduler_msg_t> msg);
// -------------------------------------------------------------------------
// -------------------------------------------------------------------------
/**
*
*/
void timer_action();
/**
*
*/
void finalize_action() override
{
static const int drivers_timeout = 10;
DriverManager::stop(drivers_timeout);
};
};
#endif /*SCHEDULER_MANAGER_H*/

View File

@ -0,0 +1,139 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2024, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#ifndef SCHEDULER_MANAGER_DRIVER_H_
#define SCHEDULER_MANAGER_DRIVER_H_
#include "ProtocolMessages.h"
#include "Driver.h"
#include "NebulaLog.h"
struct SchedRequest;
class VirtualMachinePool;
class HostPool;
class DatastorePool;
class VirtualNetworkPool;
class UserPool;
class ClusterPool;
class VMGroupPool;
/**
* SchedulerManagerDriver provides a base class to implement Scheduler drivers
* This class implements the protocol and reconnect functions from the Driver
* interface.
*
* This class also includes the logic to generate scheduler inputs (object pools)
* and SCHED_REQUIREMENTS filtering.
*
* This class can be used as base to implement the interface of specific scheculers
* that needs to adapt the default logic.
*/
class SchedulerManagerDriver : public Driver<scheduler_msg_t>
{
public:
SchedulerManagerDriver(const std::string& c, const std::string& a, int ct);
virtual ~SchedulerManagerDriver() = default;
void place() const;
void optimize(int cluster_id) const;
void log_vm(int id, const std::string& msg) const;
void log_cluster(int cluster_id, const std::string& msg) const;
protected:
friend class SchedulerManager;
/**
* Renders a scheduler request in the provided stream to send it to the
* scheduler.
* @return 0 on success
*/
int scheduler_message(SchedRequest& sr, std::ostringstream& oss) const;
/* ---------------------------------------------------------------------- */
/* Match-making functions */
/* ---------------------------------------------------------------------- */
int setup_place_pools(SchedRequest& sr) const;
int setup_optimize_pools(int cluster_id, SchedRequest& sr) const;
/**
* Creates a match-making request.
* @param sr, includes the set of VMs and resources to generate the match-making
*/
void match(SchedRequest& sr, const std::string& ebase) const;
/* ---------------------------------------------------------------------- */
/* Driver Interface */
/* ---------------------------------------------------------------------- */
/**
* TODO Sends a deploy request to the MAD: "DEPLOY ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void place(const std::ostringstream& msg) const
{
write_drv(SchedulerManagerMessages::PLACE, -1, msg);
}
/**
* TODO Sends a shutdown request to the MAD: "SHUTDOWN ID XML_DRV_MSG"
* @param oid the virtual machine id.
* @param drv_msg xml data for the mad operation
*/
void optimize(const int cluster_id, const std::ostringstream& msg) const
{
write_drv(SchedulerManagerMessages::OPTIMIZE, cluster_id, msg);
}
/**
* TODO
*/
void write_drv(SchedulerManagerMessages type,
const int id,
const std::ostringstream& msg) const
{
scheduler_msg_t drv_msg(type, "", id, msg.str());
write(drv_msg);
}
private:
//Internal reference to OpenNebula pools
VirtualMachinePool * vmpool;
HostPool *hpool;
DatastorePool *dspool;
VirtualNetworkPool *vnpool;
UserPool *upool;
ClusterPool *clpool;
VMGroupPool *vmgpool;
};
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
#endif /*SCHEDULER_MANAGER_DRIVER_H_*/

View File

@ -0,0 +1,434 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2024, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#ifndef SCHEDULER_MANAGER_MATCH_H_
#define SCHEDULER_MANAGER_MATCH_H_
#include "VirtualMachinePool.h"
#include "DatastorePool.h"
#include "HostPool.h"
#include "VirtualNetworkPool.h"
#include "UserPool.h"
#include "ClusterPool.h"
#include <vector>
#include <map>
#include <memory>
#include <sstream>
/**
* This class represents a generic OpenNebula pool for structuring scheduling
* data
*/
template <typename P, typename O>
class SchedPool
{
public:
SchedPool(P * p, const std::string& n):pool(p), name(n){};
~SchedPool() = default;
//List of relevant ids in the pool
std::vector<int> ids;
//Simpe cache to store object references
std::map<int, std::unique_ptr<O>> objects;
//Reference to the OpenNebula pool
P * pool;
//Pool name
std::string name;
/**
* Gets and object from the pool. It loads the object from DB and stores
* the reference in the cache if not found.
*/
O * get(int id)
{
O * object;
auto it = objects.find(id);
if ( it == objects.end() )
{
if (std::unique_ptr<O> pobj = pool->get_ro(id))
{
object = pobj.get();
objects.insert({id, std::move(pobj)});
}
else
{
return nullptr;
}
}
else
{
object = it->second.get();
}
return object;
}
/**
* executes callable on each object ID
*/
void each_id(const std::function<void(int)>& func)
{
for (auto i: ids)
{
func(i);
}
}
/**
* Sets an object in the pool
*/
void set(int id, std::unique_ptr<O> object)
{
objects[id] = std::move(object);
}
/**
* Deletes an object from the Pool if condition is true
*/
void delete_if(const std::function<bool(int)>& test)
{
for(auto it = ids.begin(); it != ids.end(); )
{
if (test(*it))
{
objects.erase(*it);
it = ids.erase(it);
}
else
{
++it;
}
}
}
/**
* Dumps all objects included in the dump_ids set into the given stream.
*/
void to_xml(std::ostringstream& oss)
{
to_xml(oss, ids);
}
template <typename C>
void to_xml(std::ostringstream& oss, const C& dump_ids)
{
oss << "<" << name << ">";
for (int id: dump_ids)
{
O * obj = get(id);
if ( obj != nullptr )
{
oss << *obj;
}
}
oss << "</" << name << ">";
}
};
/**
* This class represents a the set of resource matches for scheduling VMs.
* A match consists of:
* - Hosts
* - Datastores
* - Vnets per NIC using NETWORK_MODE auto
*
* The class stores also a union of all sets to conform SCHEDULER_DRIVER_MESSAGES
* with relevant objects.
*/
struct SchedMatch
{
// The struct represents the ID of the objects that matches the requirements
// of a VM
struct match
{
std::set<int> host_ids;
std::set<int> ds_ids;
std::map<int, std::set<int>> vnet_ids;
};
// Match pool, contains matches for all VMs
std::map<int, match> requirements;
// All VMs needed to do the match
std::set<int> vms;
// ID set of matched resources for any VM
std::set<int> match_host;
std::set<int> match_ds;
std::set<int> match_net;
std::set<int> match_vmgroups;
/**
* Add a host ID to the list of matching hosts for this VM
*/
void add_host(int vmid, int hostid)
{
auto it = requirements.find(vmid);
if ( it == requirements.end() )
{
requirements[vmid] = { {hostid}, {} };
}
else
{
it->second.host_ids.insert(hostid);
}
match_host.insert(hostid);
}
/**
* Add a datastore ID to the list of matching system datastore for this VM
*/
void add_ds(int vmid, int dsid)
{
auto it = requirements.find(vmid);
if ( it == requirements.end() )
{
//Discard DS match without a previous Host match
return;
}
it->second.ds_ids.insert(dsid);
match_ds.insert(dsid);
}
/**
* Initialize the VM vnet match map
*/
void init_net(int vm_id, std::set<int>& nic_ids)
{
auto it = requirements.find(vm_id);
if ( it == requirements.end() )
{
return;
}
for (int nic_id : nic_ids)
{
it->second.vnet_ids[nic_id] = {};
}
}
/**
* Add a vnet ID to the list of matching vnets for this VM NIC
*/
void add_net(int vm_id, int nic_id, int net_id)
{
auto it = requirements.find(vm_id);
if ( it == requirements.end() )
{
return;
}
auto jt = it->second.vnet_ids.find(nic_id);
if ( jt == it->second.vnet_ids.end() )
{
it->second.vnet_ids[nic_id] = { net_id };
}
else
{
jt->second.insert(net_id);
}
match_net.insert(net_id);
}
// -------------------------------------------------------------------------
// Functions to check VM matches
// -------------------------------------------------------------------------
bool is_host_matched(int id)
{
auto it = requirements.find(id);
return it != requirements.end() && !it->second.host_ids.empty();
}
bool is_ds_matched(int id)
{
auto it = requirements.find(id);
return it != requirements.end() && !it->second.ds_ids.empty();
}
bool is_net_matched(int id)
{
auto it = requirements.find(id);
if (it == requirements.end())
{
return false;
}
if ( it->second.vnet_ids.empty() )
{
//empty map means VM has not auto nics
return true;
}
for (const auto& kv : it->second.vnet_ids )
{
if (kv.second.empty())
{
return false;
}
}
return true;
}
/**
* Render VM requirements as an XML document
*/
void to_xml(std::ostringstream& oss)
{
oss << "<REQUIREMENTS>";
for (const auto& pair : requirements)
{
oss << "<VM>";
oss << "<ID>" << pair.first << "</ID>";
oss << "<HOSTS>";
for (int hid : pair.second.host_ids)
{
oss << "<ID>" << hid << "</ID>";
}
oss << "</HOSTS>";
oss << "<DATASTORES>";
for (int did : pair.second.ds_ids)
{
oss << "<ID>" << did << "</ID>";
}
oss << "</DATASTORES>";
for (const auto& vpair: pair.second.vnet_ids)
{
oss << "<NIC>";
oss << "<ID>" << vpair.first << "</ID>";
oss << "<VNETS>";
for (int vid : vpair.second)
{
oss << "<ID>" << vid << "</ID>";
}
oss << "</VNETS>";
oss << "</NIC>";
}
oss << "</VM>";
}
oss << "</REQUIREMENTS>";
}
};
/**
* This class is used to build a Scheduler driver request message. It includes
* the relevant pool objects and the matchmaking results for each VM.
*/
struct SchedRequest
{
SchedRequest(VirtualMachinePool * vmp,
HostPool * hp,
DatastorePool *dp,
VirtualNetworkPool *vn,
UserPool * up,
ClusterPool *cp):
vmpool(vmp, "VM_POOL"),
hpool(hp, "HOST_POOL"),
dspool(dp, "DATASTORE_POOL"),
vnpool(vn, "VNET_POOL"),
upool(up, "USER_POOL"),
clpool(cp, "CLUSTER_POOL")
{};
SchedPool<VirtualMachinePool, VirtualMachine> vmpool;
SchedPool<HostPool, Host> hpool;
SchedPool<DatastorePool, Datastore> dspool;
SchedPool<VirtualNetworkPool, VirtualNetwork> vnpool;
SchedPool<UserPool, User> upool;
SchedPool<ClusterPool, Cluster> clpool;
SchedMatch match;
void merge_cluster_to_host()
{
for (auto hid : hpool.ids)
{
std::vector<xmlNodePtr> nodes;
auto host = hpool.get(hid);
host->load_monitoring();
ObjectXML monitoring(host->get_monitoring().to_xml());
monitoring.get_nodes("/MONITORING", nodes);
if (!nodes.empty())
{
host->remove_nodes("/HOST/MONITORING");
host->add_node("/HOST", nodes[0], "MONITORING");
monitoring.free_nodes(nodes);
}
int cid = host->get_cluster_id();
if (auto cluster = clpool.get(cid))
{
nodes.clear();
cluster->get_nodes("/CLUSTER/TEMPLATE", nodes);
if (!nodes.empty())
{
host->add_node("/HOST", nodes[0], "CLUSTER_TEMPLATE");
cluster->free_nodes(nodes);
}
}
}
}
};
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
#endif /*SCHEDULER_MANAGER_MATCH_H_*/

View File

@ -65,6 +65,11 @@ public:
return PoolSQL::get<VMGroup>(oid);
}
std::unique_ptr<VMGroup> get_ro(int oid)
{
return PoolSQL::get_ro<VMGroup>(oid);
}
/**
* Gets an object from the pool (if needed the object is loaded from the
* database). The object is locked, other threads can't access the same

View File

@ -33,7 +33,8 @@
class AuthRequest;
class Snapshots;
class HostShareCapacity;
struct HostShareCapacity;
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
@ -249,6 +250,11 @@ public:
resched = do_sched ? 1 : 0;
};
bool is_resched()
{
return resched == 1;
}
// -------------------------------------------------------------------------
// Log & Print
// -------------------------------------------------------------------------
@ -1156,6 +1162,16 @@ public:
return nics.get_nic(nic_id);
}
/**
* Returns a set of the NIC IDs that uses NETWORK_MODE auto
* @param ids a set of NIC IDs
* @return the number of ids in the set
*/
int get_auto_nics(std::set<int>& ids)
{
return nics.get_auto_nics(ids);
}
/**
* Returns a set of the security group IDs in use in this VM.
* @param sgs a set of security group IDs
@ -1201,6 +1217,12 @@ public:
*/
void release_vmgroup();
/**
* @return the ID of the VMGroup, -1 if none
*/
int vmgroup_id();
// ------------------------------------------------------------------------
// Virtual Router related functions
// ------------------------------------------------------------------------

View File

@ -143,6 +143,16 @@ public:
return rc == 0 && floating && only;
}
/**
* Check if the NIC uses auto mode
*/
bool is_auto() const
{
std::string net_mode = vector_value("NETWORK_MODE");
return one_util::icasecmp(net_mode, "AUTO");
}
/*
* Set nic NAME attribute if not empty, defaults to NAME = NIC${NIC_ID}
*/
@ -360,6 +370,13 @@ public:
*/
void get_security_groups(std::set<int>& sgs);
/**
* Returns a set of the NIC IDs that uses NETWORK_MODE auto
* @param ids a set of NIC IDs
* @return the number of ids in the set
*/
int get_auto_nics(std::set<int>& ids);
/* ---------------------------------------------------------------------- */
/* Network Manager Interface */
/* ---------------------------------------------------------------------- */

View File

@ -255,11 +255,7 @@ else
CHOWN_DIRS="$ROOT"
fi
SHARE_DIRS="$SHARE_LOCATION/examples \
$SHARE_LOCATION/examples/external_scheduler \
$SHARE_LOCATION/examples/host_hooks \
$SHARE_LOCATION/examples/network_hooks \
$SHARE_LOCATION/websockify \
SHARE_DIRS="$SHARE_LOCATION/websockify \
$SHARE_LOCATION/websockify/websockify \
$SHARE_LOCATION/oneprovision \
$SHARE_LOCATION/schemas \
@ -294,6 +290,7 @@ ETC_DIRS="$ETC_LOCATION/vmm_exec \
$ETC_LOCATION/fireedge/sunstone/groupadmin \
$ETC_LOCATION/fireedge/sunstone/cloud \
$ETC_LOCATION/alertmanager \
$ETC_LOCATION/schedulers \
$ETC_LOCATION/prometheus"
LIB_DIRS="$LIB_LOCATION/ruby \
@ -306,6 +303,7 @@ LIB_DIRS="$LIB_LOCATION/ruby \
$LIB_LOCATION/ruby/onedb/local \
$LIB_LOCATION/ruby/onedb/patches \
$LIB_LOCATION/ruby/vendors \
$LIB_LOCATION/python \
$LIB_LOCATION/mads \
$LIB_LOCATION/sh \
$LIB_LOCATION/sh/override \
@ -517,7 +515,13 @@ VAR_DIRS="$VAR_LOCATION/remotes \
$VAR_LOCATION/remotes/ipam/equinix \
$VAR_LOCATION/remotes/ipam/scaleway \
$VAR_LOCATION/remotes/ipam/vultr \
$VAR_LOCATION/remotes/ipam/aws"
$VAR_LOCATION/remotes/ipam/aws \
$VAR_LOCATION/remotes/scheduler/dummy \
$VAR_LOCATION/remotes/scheduler/rank \
$VAR_LOCATION/remotes/scheduler/one_drs \
$VAR_LOCATION/remotes/scheduler/one_drs/lib \
$VAR_LOCATION/remotes/scheduler/one_drs/lib/mapper \
$VAR_LOCATION/remotes/scheduler/one_drs/lib/models"
SUNSTONE_DIRS="$SUNSTONE_LOCATION/routes \
$SUNSTONE_LOCATION/models \
@ -714,6 +718,13 @@ INSTALL_FILES=(
IPAM_DRIVER_SCALEWAY_SCRIPTS:$VAR_LOCATION/remotes/ipam/scaleway
IPAM_DRIVER_VULTR_SCRIPTS:$VAR_LOCATION/remotes/ipam/vultr
IPAM_DRIVER_EC2_SCRIPTS:$VAR_LOCATION/remotes/ipam/aws
SCHEDULER_DRIVER_DUMMY_SCRIPTS:$VAR_LOCATION/remotes/scheduler/dummy
SCHEDULER_DRIVER_RANK_SCRIPTS:$VAR_LOCATION/remotes/scheduler/rank
SCHEDULER_DRIVER_ONEDRS_SCRIPTS:$VAR_LOCATION/remotes/scheduler/one_drs
SCHEDULER_DRIVER_ONEDRS_LIB:$VAR_LOCATION/remotes/scheduler/one_drs/lib
SCHEDULER_DRIVER_ONEDRS_MAPPER:$VAR_LOCATION/remotes/scheduler/one_drs/lib/mapper
SCHEDULER_DRIVER_ONEDRS_MODELS:$VAR_LOCATION/remotes/scheduler/one_drs/lib/models
SCHEDULER_DRIVER_ONEDRS_VENDOR:$LIB_LOCATION/python
NETWORK_FILES:$VAR_LOCATION/remotes/vnm
NETWORK_HOOKS_PRE_FILES:$VAR_LOCATION/remotes/vnm/hooks/pre
NETWORK_HOOKS_CLEAN_FILES:$VAR_LOCATION/remotes/vnm/hooks/clean
@ -729,9 +740,6 @@ INSTALL_FILES=(
NETWORK_VCENTER_FILES:$VAR_LOCATION/remotes/vnm/vcenter
NETWORK_ELASTIC_FILES:$VAR_LOCATION/remotes/vnm/elastic
NETWORK_NODEPORT_FILES:$VAR_LOCATION/remotes/vnm/nodeport
EXAMPLE_SHARE_FILES:$SHARE_LOCATION/examples
EXAMPLE_HOST_HOOKS_SHARE_FILES:$SHARE_LOCATION/examples/host_hooks
EXAMPLE_EXTERNAL_SCHED_FILES:$SHARE_LOCATION/examples/external_scheduler
WEBSOCKIFY_SHARE_RUN_FILES:$SHARE_LOCATION/websockify
WEBSOCKIFY_SHARE_MODULE_FILES:$SHARE_LOCATION/websockify/websockify
INSTALL_GEMS_SHARE_FILES:$SHARE_LOCATION
@ -947,6 +955,7 @@ INSTALL_ONEHEM_ETC_FILES=(
INSTALL_ETC_FILES=(
ETC_FILES:$ETC_LOCATION
SCHED_RANK_ETC_FILES:$ETC_LOCATION/schedulers
ETC_FILES:$SHARE_LOCATION/conf
EC2_ETC_FILES:$ETC_LOCATION
VCENTER_ETC_FILES:$ETC_LOCATION
@ -962,7 +971,6 @@ INSTALL_ETC_FILES=(
#-------------------------------------------------------------------------------
BIN_FILES="src/nebula/oned \
src/scheduler/src/sched/mm_sched \
src/cli/onevm \
src/cli/oneacct \
src/cli/oneshowback \
@ -1081,7 +1089,9 @@ MADS_LIB_FILES="src/mad/sh/madcommon.sh \
src/market_mad/one_market.rb \
src/market_mad/one_market \
src/ipamm_mad/one_ipam \
src/ipamm_mad/one_ipam.rb"
src/ipamm_mad/one_ipam.rb \
src/schedm_mad/one_sched \
src/schedm_mad/one_sched.rb"
#-------------------------------------------------------------------------------
# Common library files for VMM drivers
@ -2172,6 +2182,56 @@ MARKETPLACE_DRIVER_LXC_SCRIPTS="src/market_mad/remotes/linuxcontainers/import \
src/market_mad/remotes/linuxcontainers/monitor \
src/market_mad/remotes/linuxcontainers/lxd.rb"
#-------------------------------------------------------------------------------
# Scheduler drivers, to be installed under $REMOTES_LOCATION/sched
# - Rank scheduler $REMOTES_LOCATION/scheduler/rank
# - OpenNebula DRS, $REMOTES_LOCATION/scheduler/one-drs
#-------------------------------------------------------------------------------
SCHEDULER_DRIVER_RANK_SCRIPTS="src/schedm_mad/remotes/rank/src/sched/place \
src/schedm_mad/remotes/rank/optimize"
SCHEDULER_DRIVER_ONEDRS_SCRIPTS="src/schedm_mad/remotes/one_drs/place \
src/schedm_mad/remotes/one_drs/optimize"
SCHEDULER_DRIVER_DUMMY_SCRIPTS="src/schedm_mad/remotes/dummy/place \
src/schedm_mad/remotes/dummy/optimize"
SCHEDULER_DRIVER_ONEDRS_VENDOR="src/schedm_mad/remotes/one_drs/vendor/lib/PuLP-2.9.0.dist-info/ \
src/schedm_mad/remotes/one_drs/vendor/lib/bin/ \
src/schedm_mad/remotes/one_drs/vendor/lib/pulp/ \
src/schedm_mad/remotes/one_drs/vendor/lib/typing_extensions-4.12.2.dist-info/ \
src/schedm_mad/remotes/one_drs/vendor/lib/typing_extensions.py \
src/schedm_mad/remotes/one_drs/vendor/lib/xsdata/ \
src/schedm_mad/remotes/one_drs/vendor/lib/xsdata-24.12.dist-info/"
SCHEDULER_DRIVER_ONEDRS_LIB="src/schedm_mad/remotes/one_drs/lib/optimizer_parser.py \
src/schedm_mad/remotes/one_drs/lib/optimizer_serializer.py \
src/schedm_mad/remotes/one_drs/lib/xsd_parser.sh \
src/schedm_mad/remotes/one_drs/lib/__init__.py"
SCHEDULER_DRIVER_ONEDRS_MAPPER="src/schedm_mad/remotes/one_drs/lib/mapper/ilp_optimizer.py \
src/schedm_mad/remotes/one_drs/lib/mapper/model.py \
src/schedm_mad/remotes/one_drs/lib/mapper/mapper.py \
src/schedm_mad/remotes/one_drs/lib/mapper/__init__.py"
SCHEDULER_DRIVER_ONEDRS_MODELS="src/schedm_mad/remotes/one_drs/lib/models/__init__.py \
src/schedm_mad/remotes/one_drs/lib/models/cluster.py \
src/schedm_mad/remotes/one_drs/lib/models/datastore.py \
src/schedm_mad/remotes/one_drs/lib/models/datastore_pool.py \
src/schedm_mad/remotes/one_drs/lib/models/host.py \
src/schedm_mad/remotes/one_drs/lib/models/host_pool.py \
src/schedm_mad/remotes/one_drs/lib/models/plan.py \
src/schedm_mad/remotes/one_drs/lib/models/requirements.py \
src/schedm_mad/remotes/one_drs/lib/models/scheduler_driver_action.py \
src/schedm_mad/remotes/one_drs/lib/models/shared.py \
src/schedm_mad/remotes/one_drs/lib/models/vm_group.py \
src/schedm_mad/remotes/one_drs/lib/models/vm_group_pool.py \
src/schedm_mad/remotes/one_drs/lib/models/vm.py \
src/schedm_mad/remotes/one_drs/lib/models/vm_pool_extended.py \
src/schedm_mad/remotes/one_drs/lib/models/vnet.py \
src/schedm_mad/remotes/one_drs/lib/models/vnet_pool_extended.py"
#-------------------------------------------------------------------------------
# Migration scripts for onedb command, to be installed under $LIB_LOCATION
#-------------------------------------------------------------------------------
@ -2196,9 +2256,11 @@ ETC_FILES="share/etc/oned.conf \
share/etc/defaultrc \
share/etc/guacd \
src/tm_mad/tmrc \
src/scheduler/etc/sched.conf \
src/monitor/etc/monitord.conf "
SCHED_RANK_ETC_FILES="src/schedm_mad/remotes/rank/etc/rank.conf \
src/schedm_mad/remotes/one_drs/etc/one_drs.conf"
EC2_ETC_FILES="src/vmm_mad/remotes/ec2/ec2_driver.conf \
src/vmm_mad/remotes/ec2/ec2_driver.default"
@ -2229,25 +2291,6 @@ AUTH_ETC_FILES="src/authm_mad/remotes/server_x509/server_x509_auth.conf \
src/authm_mad/remotes/ldap/ldap_auth.conf \
src/authm_mad/remotes/x509/x509_auth.conf"
#-------------------------------------------------------------------------------
# Sample files, to be installed under $SHARE_LOCATION/examples
#-------------------------------------------------------------------------------
EXAMPLE_SHARE_FILES="share/examples/vm.template \
share/examples/private.net \
share/examples/public.net"
#-------------------------------------------------------------------------------
# Sample files, to be installed under $SHARE_LOCATION/examples/external_scheduler
#-------------------------------------------------------------------------------
EXAMPLE_EXTERNAL_SCHED_FILES="share/examples/external_scheduler/external_scheduler_server.rb"
#-------------------------------------------------------------------------------
# Sample files, to be installed under $SHARE_LOCATION/examples/host_hooks
#-------------------------------------------------------------------------------
EXAMPLE_HOST_HOOKS_SHARE_FILES="share/examples/host_hooks/error_hook"
#-------------------------------------------------------------------------------
# Files required to interact with the websockify server
#-------------------------------------------------------------------------------
@ -3087,7 +3130,10 @@ XSD_FILES="share/doc/xsd/acct.xsd \
share/doc/xsd/marketplaceapp_pool.xsd
share/doc/xsd/monitoring_data.xsd
share/doc/xsd/opennebula_configuration.xsd
share/doc/xsd/plan.xsd
share/doc/xsd/raftstatus.xsd
share/doc/xsd/requirements.xsd
share/doc/xsd/scheduler_driver_action.xsd
share/doc/xsd/security_group.xsd
share/doc/xsd/security_group_pool.xsd
share/doc/xsd/shared.xsd
@ -3100,10 +3146,12 @@ XSD_FILES="share/doc/xsd/acct.xsd \
share/doc/xsd/vm_group.xsd
share/doc/xsd/vm_group_pool.xsd
share/doc/xsd/vm_pool.xsd
share/doc/xsd/vm_pool_extended.xsd
share/doc/xsd/vmtemplate.xsd
share/doc/xsd/vmtemplate_pool.xsd
share/doc/xsd/vnet.xsd
share/doc/xsd/vnet_pool.xsd
share/doc/xsd/vnet_pool_extended.xsd
share/doc/xsd/vntemplate.xsd
share/doc/xsd/vntemplate_pool.xsd
share/doc/xsd/vrouter.xsd

View File

@ -4,6 +4,8 @@
<xs:complexType>
<xs:sequence>
<xs:element name="ACTION_TIMEOUT" type="xs:integer" minOccurs="0" maxOccurs="1"/>
<xs:element name="API_LIST_ORDER" type="xs:string" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="AUTH_MAD" minOccurs="0" maxOccurs="unbounded">
@ -29,6 +31,7 @@
</xs:element>
<xs:element name="CLUSTER_ENCRYPTED_ATTR" type="xs:string" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="COLD_MIGRATE_MODE" type="xs:integer" minOccurs="0" maxOccurs="1"/>
<xs:element name="CONTEXT_RESTRICTED_DIRS" type="xs:string" minOccurs="0" maxOccurs="1"/>
<xs:element name="CONTEXT_SAFE_DIRS" type="xs:string" minOccurs="0" maxOccurs="1"/>
<xs:element name="DATASTORE_CAPACITY_CHECK" type="xs:string" minOccurs="0" maxOccurs="unbounded"/>
@ -200,6 +203,7 @@
<xs:element name="KEEPALIVE_MAX_CONN" type="xs:integer" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="KEEPALIVE_TIMEOUT" type="xs:integer" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="LISTEN_ADDRESS" type="xs:string" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="LIVE_RESCHEDS" type="xs:integer" minOccurs="0" maxOccurs="1"/>
<xs:element name="LOG" minOccurs="0" maxOccurs="unbounded">
<xs:complexType>
@ -243,6 +247,8 @@
</xs:complexType>
</xs:element>
<xs:element name="MAX_ACTIONS_PER_CLUSTER" type="xs:integer" minOccurs="0" maxOccurs="1"/>
<xs:element name="MAX_ACTIONS_PER_HOST" type="xs:integer" minOccurs="0" maxOccurs="1"/>
<xs:element name="MAX_BACKUPS" type="xs:integer" minOccurs="0" maxOccurs="1"/>
<xs:element name="MAX_BACKUPS_HOST" type="xs:integer" minOccurs="0" maxOccurs="1"/>
<xs:element name="MAX_CONN" type="xs:integer" minOccurs="0" maxOccurs="1"/>
@ -272,6 +278,20 @@
</xs:element>
<xs:element name="RPC_LOG" type="xs:string" minOccurs="0" maxOccurs="1"/>
<xs:element name="SCHED_MAD" minOccurs="0" maxOccurs="1">
<xs:complexType>
<xs:all>
<xs:element name="ARGUMENTS" type="xs:string"/>
<xs:element name="EXECUTABLE" type="xs:string"/>
</xs:all>
</xs:complexType>
</xs:element>
<xs:element name="SCHED_MAX_WND_LENGTH" type="xs:string" minOccurs="0" maxOccurs="1"/>
<xs:element name="SCHED_MAX_WND_TIME" type="xs:string" minOccurs="0" maxOccurs="1"/>
<xs:element name="SCHED_RETRY_TIME" type="xs:string" minOccurs="0" maxOccurs="1"/>
<xs:element name="SCRIPTS_REMOTE_DIR" type="xs:string" minOccurs="0" maxOccurs="1"/>
<xs:element name="SESSION_EXPIRATION_TIME" type="xs:integer" minOccurs="0" maxOccurs="1"/>
<xs:element name="SHOWBACK_ONLY_RUNNING" type="xs:string" minOccurs="0" maxOccurs="1"/>

29
share/doc/xsd/plan.xsd Normal file
View File

@ -0,0 +1,29 @@
<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified"
targetNamespace="http://opennebula.org/XMLSchema" xmlns="http://opennebula.org/XMLSchema">
<xs:element name="PLAN">
<xs:complexType>
<xs:sequence>
<xs:element name="ID" type="xs:integer" minOccurs="1" maxOccurs="1"/>
<xs:element name="ACTION" minOccurs="1" maxOccurs="unbounded">
<xs:complexType>
<xs:sequence>
<xs:element name="VM_ID" type="xs:integer" minOccurs="1" maxOccurs="1"/>
<xs:element name="OPERATION" type="xs:string" minOccurs="1" maxOccurs="1"/>
<xs:element name="HOST_ID" type="xs:integer" minOccurs="0" maxOccurs="1"/>
<xs:element name="DS_ID" type="xs:integer" minOccurs="0" maxOccurs="1"/>
<xs:element name="NIC" minOccurs="0" maxOccurs="unbounded">
<xs:complexType>
<xs:sequence>
<xs:element name="NIC_ID" type="xs:integer" minOccurs="1" maxOccurs="1"/>
<xs:element name="NETWORK_ID" type="xs:integer" minOccurs="1" maxOccurs="1"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>

View File

@ -0,0 +1,45 @@
<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified"
targetNamespace="http://opennebula.org/XMLSchema" xmlns="http://opennebula.org/XMLSchema">
<xs:element name="REQUIREMENTS">
<xs:complexType>
<xs:sequence>
<xs:element name="VM" minOccurs="0" maxOccurs="unbounded">
<xs:complexType>
<xs:sequence>
<xs:element name="ID" type="xs:integer" minOccurs="1" maxOccurs="1"/>
<xs:element name="HOSTS" minOccurs="1" maxOccurs="1">
<xs:complexType>
<xs:sequence>
<xs:element name="ID" type="xs:integer" maxOccurs="unbounded"/>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="NIC" minOccurs="0" maxOccurs="unbounded">
<xs:complexType>
<xs:sequence>
<xs:element name="ID" type="xs:integer" maxOccurs="1"/>
<xs:element name="VNETS" minOccurs="1" maxOccurs="1">
<xs:complexType>
<xs:sequence>
<xs:element name="ID" type="xs:integer" maxOccurs="unbounded"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="DATASTORES" minOccurs="1" maxOccurs="1">
<xs:complexType>
<xs:sequence>
<xs:element name="ID" type="xs:integer" maxOccurs="unbounded"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>

View File

@ -0,0 +1,24 @@
<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified"
targetNamespace="http://opennebula.org/XMLSchema" xmlns="http://opennebula.org/XMLSchema">
<xs:include schemaLocation="vm_pool_extended.xsd"/>
<xs:include schemaLocation="host_pool.xsd"/>
<xs:include schemaLocation="datastore_pool.xsd"/>
<xs:include schemaLocation="vnet_pool_extended.xsd"/>
<xs:include schemaLocation="vm_group_pool.xsd"/>
<xs:include schemaLocation="cluster.xsd"/>
<xs:include schemaLocation="requirements.xsd"/>
<xs:element name="SCHEDULER_DRIVER_ACTION">
<xs:complexType>
<xs:sequence>
<xs:element ref="VM_POOL" minOccurs="1" maxOccurs="1"/>
<xs:element ref="HOST_POOL" minOccurs="1" maxOccurs="1"/>
<xs:element ref="DATASTORE_POOL" minOccurs="1" maxOccurs="1"/>
<xs:element ref="VNET_POOL" minOccurs="1" maxOccurs="1"/>
<xs:element ref="VM_GROUP_POOL" minOccurs="1" maxOccurs="1"/>
<xs:element ref="CLUSTER" minOccurs="1" maxOccurs="1"/>
<xs:element ref="REQUIREMENTS" minOccurs="1" maxOccurs="1"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>

View File

@ -141,7 +141,7 @@
<xs:element name="NIC_DEFAULT" minOccurs="0" maxOccurs="1"/>
<xs:element name="NUMA_NODE" minOccurs="0" maxOccurs="1"/>
<xs:element name="OS" minOccurs="0" maxOccurs="1"/>
<xs:element name="PCI" minOccurs="0" maxOccurs="1"/>
<xs:element name="PCI" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="RAW" minOccurs="0" maxOccurs="1"/>
<xs:element name="SECURITY_GROUP_RULE" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="SNAPSHOT" minOccurs="0" maxOccurs="unbounded">
@ -164,7 +164,7 @@
<xs:element name="TOPOLOGY" minOccurs="0" maxOccurs="1"/>
<xs:element name="VCPU" type="xs:string" minOccurs="0" maxOccurs="1"/>
<xs:element name="VCPU_MAX" type="xs:string" minOccurs="0" maxOccurs="1"/>
<xs:element name="VMGROUP" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="VMGROUP" minOccurs="0" maxOccurs="1"/>
<xs:element name="VMID" type="xs:string"/>
<xs:element name="VROUTER_ID" type="xs:string" minOccurs="0" maxOccurs="1"/>
<xs:element name="VROUTER_KEEPALIVED_ID" type="xs:string" minOccurs="0" maxOccurs="1"/>

View File

@ -0,0 +1,12 @@
<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified"
targetNamespace="http://opennebula.org/XMLSchema" xmlns="http://opennebula.org/XMLSchema">
<xs:include schemaLocation="vm.xsd"/>
<xs:element name="VM_POOL">
<xs:complexType>
<xs:sequence maxOccurs="1" minOccurs="1">
<xs:element ref="VM" maxOccurs="unbounded" minOccurs="0"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>

View File

@ -0,0 +1,12 @@
<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified"
targetNamespace="http://opennebula.org/XMLSchema" xmlns="http://opennebula.org/XMLSchema">
<xs:include schemaLocation="vnet.xsd"/>
<xs:element name="VNET_POOL">
<xs:complexType>
<xs:sequence maxOccurs="1" minOccurs="1">
<xs:element ref="VNET" maxOccurs="unbounded" minOccurs="0"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>

View File

@ -597,6 +597,62 @@ IPAM_MAD = [
ARGUMENTS = "-t 1 -i dummy,aws,equinix,vultr,scaleway"
]
#*******************************************************************************
# Scheduler
#*******************************************************************************
# SCHED_MAD: Scheduler driver to manage different schedulers
# * executable: path of the scheduler driver executable, can be an
# absolute path or relative to /usr/lib/one/mads/
#
# * arguments: for the scheduler driver
# -t number of threads, i.e. number of operations at the same time
# -s scheduler to use:
# * rank, OpenNebula default match-making algorithm
# * one_drs, optimize VM placement and load-balance clusters
#
# Scheduler Window
# OpenNebula will create an schedule window when a request for VM placement
# is received (i.e. new pending or resched VM). A placement request will be sent
# to the scheduler when:
# * The window has been open for more than SCHED_MAX_WND_TIME
# * The number of VMs waiting for placement is greater than SCHED_MAX_WND_LENGTH
#
# The window will be closed when the place request is made.
#
# SCHED_RETRY_TIME. For VMs that cannot be placed (i.e. no resources available)
# a new placement request will be made every RETRY_TIME.
#
# MAX_ACTIONS_PER_HOST: Max number of scheduled deploy/migrate actions per host
#
# MAX_ACTIONS_PER_CLUSTER: Max number of scheduled deploy/migrate actions per cluster
#
# ACTION_TIMEOUT: Time in sec to mark pending actions as TIMEOUT failure
#
# LIVE_RESCHEDS: Perform live (1) or cold migrations (0) when rescheduling a VM
#
# COLD_MIGRATE_MODE: Type of cold migration, see documentation for one.vm.migrate
# 0 = save - default
# 1 = poweroff
# 2 = poweroff-hard
#*******************************************************************************
SCHED_MAD = [
EXECUTABLE = "one_sched",
ARGUMENTS = "-t 15 -s rank"
]
SCHED_MAX_WND_TIME = 10
SCHED_MAX_WND_LENGTH = 7
SCHED_RETRY_TIME = 60
MAX_ACTIONS_PER_HOST = 2
MAX_ACTIONS_PER_CLUSTER = 10
ACTION_TIMEOUT = 300
LIVE_RESCHEDS = 0
COLD_MIGRATE_MODE = 0
#*******************************************************************************
# Hook Manager Configuration
#*******************************************************************************

View File

@ -1,38 +0,0 @@
#!/usr/bin/ruby
require 'sinatra'
# Testing application for External Scheduler
# Returns valid response for the POST request
before do
content_type 'application/json'
end
# get '/' do
# 'Hello get!'
# end
post '/' do
body = request.body.read
data = JSON.parse body
# puts JSON.pretty_generate(data)
vms = []
response = { :VMS => vms }
# Go through all Virtual Machines
data['VMS'].each do |vm|
hosts = vm['HOST_IDS']
next if hosts.nil? || hosts.empty?
# Randomize the host based on the VM ID
host_id = hosts[vm['ID'].to_i % hosts.size]
vms << { :ID => vm['ID'], :HOST_ID => host_id }
end
# puts JSON.pretty_generate(response)
response.to_json
end

View File

@ -1,8 +0,0 @@
ARGUMENTS = "$TEMPLATE -m -p 5"
ARGUMENTS_STDIN = "yes"
COMMAND = "ft/host_error.rb"
NAME = "host_error"
STATE = "ERROR"
REMOTE = "no"
RESOURCE = HOST
TYPE = state

View File

@ -1,34 +0,0 @@
{
"name": "my_first_service",
"roles": [
{
"name": "frontend",
"cardinality": 1,
"parents": [
"mysql_server",
"kvm_host",
"nfs_server"
],
"vm_template": 3
},
{
"name": "nfs_server",
"cardinality": 1,
"vm_template": 2
},
{
"name": "mysql_server",
"cardinality": 1,
"vm_template": 5
},
{
"name": "kvm_host",
"parents": [
"nfs_server"
],
"cardinality": 3,
"vm_template": 7
}
],
"deployment": "none"
}

View File

@ -1,57 +0,0 @@
{
"name": "scalability",
"deployment": "straight",
"roles": [
{
"name": "frontend",
"cardinality": 1,
"vm_template": 0,
"min_vms" : 1,
"max_vms" : 5,
"elasticity_policies" : [
{
// +2 VMs when the exp. is true for 3 times in a row,
// separated by 10 seconds
"type" : "CHANGE",
"expression" : "ATT > 50",
"adjust" : 2,
"period_number" : 3,
"period" : 10
},
{
// -10 percent VMs when the exp. is true.
// If 10 percent is less than 2, -2 VMs.
"type" : "PERCENTAGE_CHANGE",
"expression" : "ATT < 10",
"adjust" : -10,
"min_adjust_step" : 2
},
{
// Set cardinality to 4 when the exp. is true
"type" : "CARDINALITY",
"expression" : "SOME_ATT <= 50 && OTHER_ATT > 90",
"adjust" : 4
}
],
"scheduled_policies" : [
{
// Set cardinality to 2 each 10 minutes
"type" : "CARDINALITY",
"recurrence" : "*/10 * * * *",
"adjust" : 2
},
{
// +10 percent at the given date and time
"type" : "PERCENTAGE_CHANGE",
"start_time" : "2nd oct 2013 15:45",
"adjust" : 10
}
]
}
]
}

View File

@ -1,35 +0,0 @@
{
"name": "my_first_service",
"deployment": "straight",
"roles": [
{
"name": "frontend",
"cardinality": 1,
"parents": [
"mysql_server",
"kvm_host",
"nfs_server"
],
"vm_template": 3
},
{
"name": "nfs_server",
"cardinality": 1,
"vm_template": 2
},
{
"name": "mysql_server",
"cardinality": 1,
"vm_template": 5
},
{
"name": "kvm_host",
"parents": [
"nfs_server"
],
"cardinality": 3,
"vm_template": 7
}
]
}

View File

@ -1,5 +0,0 @@
NAME = "Private LAN"
TYPE = "RANGED"
BRIDGE = "eth0"
NETWORK_SIZE = 250
NETWORK_ADDRESS= "10.0.0.0"

View File

@ -1,9 +0,0 @@
NAME = "Public LAN"
TYPE = "FIXED"
BRIDGE= "eth1"
LEASES= [IP="130.10.0.1",MAC="50:20:20:20:20:20"]
LEASES= [IP="130.10.0.2",MAC="50:20:20:20:20:21"]
LEASES= [IP="130.10.0.3",MAC="50:20:20:20:20:22"]
LEASES= [IP="130.10.0.4"]
LEASES= [IP="130.10.0.5"]
LEASES= [IP="130.10.0.6"]

View File

@ -1,50 +0,0 @@
#---------------------------------------
# VM definition example
#---------------------------------------
NAME = "vm-example"
CPU = 0.5
MEMORY = 128
# --- kernel & boot device ---
OS = [
kernel = "/vmlinuz",
initrd = "/initrd.img",
root = "sda1" ]
# --- 2 disks ---
DISK = [
source = "/local/xen/domains/etch/disk.img",
target = "sda1",
readonly = "no" ]
DISK = [
source = "/local/xen/domains/etch/swap.img",
target = "sda2",
readonly = "no" ]
DISK = [ IMAGE ="Ubuntu Server" ]
# --- 1 NIC ---
NIC = [ mac="00:ff:72:17:20:27"]
NIC = [ NETWORK="Private LAN"]
# --- VNC server ---
GRAPHICS = [
type = "vnc",
listen = "127.0.0.1",
port = "5"]
# --- Context ---
CONTEXT = [
hostname = "$NAME",
ip_private = "$NIC[IP, NETWORK=\"Private LAN\"]",
ip_gen = "10.0.0.$VM_ID"]

View File

@ -1,9 +0,0 @@
/var/log/one/sched.log {
dateext
dateformat -%Y%m%d-%s
weekly
rotate 52
missingok
notifempty
copytruncate
}

View File

@ -1,21 +0,0 @@
[Unit]
Description=OpenNebula Cloud Scheduler Daemon
After=syslog.target network.target remote-fs.target
After=opennebula.service
AssertFileNotEmpty=/var/lib/one/.one/one_auth
[Service]
Type=simple
Group=oneadmin
User=oneadmin
ExecStartPre=-/usr/sbin/logrotate -f /etc/logrotate.d/opennebula-scheduler -s /var/lib/one/.logrotate.status
ExecStartPre=-/bin/sh -c 'for file in /var/log/one/sched*.log; do if [ ! -f "$file.gz" ]; then gzip -9 "$file"; fi; done'
ExecStart=/usr/bin/mm_sched
StartLimitInterval=60
StartLimitBurst=3
Restart=on-failure
RestartSec=5
SyslogIdentifier=opennebula-scheduler
[Install]
WantedBy=multi-user.target

View File

@ -3,7 +3,7 @@ Description=OpenNebula Cloud Controller Daemon
After=syslog.target network.target remote-fs.target
After=mariadb.service mysql.service
After=opennebula-ssh-agent.service
Wants=opennebula-scheduler.service opennebula-hem.service
Wants=opennebula-hem.service
Wants=opennebula-ssh-agent.service
Wants=opennebula-ssh-socks-cleaner.timer
Wants=opennebula-showback.timer

View File

@ -18,7 +18,6 @@
if [ -z "$ONE_LOCATION" ]; then
ONE_PID=/var/run/one/oned.pid
ONE_SCHEDPID=/var/run/one/sched.pid
ONE_CONF=/etc/one/oned.conf
ONE_DB=/var/lib/one/one.db
ONE_LOG=/var/log/one/oned.log
@ -28,13 +27,11 @@ if [ -z "$ONE_LOCATION" ]; then
ONE_MONITOR_LOG=/var/log/one/monitor.log
ONED=/usr/bin/oned
ONE_SCHEDULER=/usr/bin/mm_sched
ONE_HEM=/usr/bin/onehem-server
LOCK_FILE=/var/lock/one/one
else
ONE_PID=$ONE_LOCATION/var/oned.pid
ONE_SCHEDPID=$ONE_LOCATION/var/sched.pid
ONE_CONF=$ONE_LOCATION/etc/oned.conf
ONE_DB=$ONE_LOCATION/var/one.db
ONE_LOG=$ONE_LOCATION/var/oned.log
@ -44,7 +41,6 @@ else
ONE_MONITOR_LOG=$ONE_LOCATION/var/monitor.log
ONED=$ONE_LOCATION/bin/oned
ONE_SCHEDULER=$ONE_LOCATION/bin/mm_sched
ONE_HEM=$ONE_LOCATION/bin/onehem-server
LOCK_FILE=$ONE_LOCATION/var/lock/.lock
@ -63,7 +59,7 @@ setup()
mkdir -p $ONE_PID_DIR
if [ ! -w $ONE_PID_DIR ]; then
echo "$ONE_PID_DIR is not writable, cannot start oned or scheduler."
echo "$ONE_PID_DIR is not writable, cannot start oned."
exit 1
fi
@ -76,14 +72,6 @@ setup()
exit 1
fi
fi
if [ -f $ONE_SCHEDPID ]; then
ONESCHEDPID=`cat $ONE_SCHEDPID`
ps $ONESCHEDPID > /dev/null 2>&1
if [ $? -eq 0 ]; then
echo "The scheduler is still running (PID:$ONESCHEDPID). Please try 'one stop' first."
exit 1
fi
fi
echo "Stale .lock detected. Erasing it."
rm $LOCK_FILE
@ -97,8 +85,6 @@ stop()
{
stop_oned
stop_sched
stop_hem
}
@ -122,14 +108,6 @@ stop_oned()
fi
}
stop_sched()
{
if [ -f $ONE_SCHEDPID ]; then
kill `cat $ONE_SCHEDPID` > /dev/null 2>&1
rm -f $ONE_SCHEDPID > /dev/null 2>&1
fi
}
stop_hem()
{
onehem-server stop > /dev/null 2>&1
@ -145,11 +123,6 @@ start()
exit 1
fi
if [ ! -x "$ONE_SCHEDULER" ]; then
echo "Can not find $ONE_SCHEDULER."
exit 1
fi
if [ ! -x "$ONE_HEM" ]; then
echo "Can not find $ONE_HEM."
exit 1
@ -169,9 +142,6 @@ start()
# Start the one daemon
start_oned
# Start the scheduler
start_sched
# Start hook execution manager server
start_hem
@ -187,13 +157,6 @@ start()
STARTED="false"
fi
ps `cat $ONE_SCHEDPID` > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "scheduler failed to start"
STARTED="false"
fi
if [ "$STARTED" == "false" ]; then
stop
exit -1
@ -220,24 +183,6 @@ start_oned()
fi
}
start_sched()
{
if [ "$BACKUP" = "true" ];then
[ -f "$ONE_SCHED_LOG" ] && mv $ONE_SCHED_LOG{,.$(date '+%Y%m%d%H%M%S')}
fi
$ONE_SCHEDULER&
LASTRC=$?
LASTPID=$!
if [ $LASTRC -ne 0 ]; then
echo "Error executing $ONE_SCHEDULER"
exit 1
else
echo $LASTPID > $ONE_SCHEDPID
fi
}
start_hem()
{
if [ "$BACKUP" = "true" ];then
@ -280,19 +225,8 @@ case "$1" in
setup
start
;;
start-sched)
start_sched
;;
stop-sched)
stop_sched
;;
restart-sched)
stop_sched
sleep 1
start_sched
;;
*)
echo "Usage: one [-f] {start|stop|restart|start-sched|stop-sched|restart-sched}" >&2
echo "Usage: one [-f] {start|stop|restart}" >&2
echo "Options:" >&2
echo " -f Do not backup log files." >&2
exit 3

View File

@ -104,8 +104,8 @@
<fileName>src/rm/RequestManagerUser.cc</fileName>
</suppress>
<suppress>
<id>nullPointer</id>
<fileName>src/scheduler/include/Scheduler.h</fileName>
<id>knownConditionTrueFalse</id>
<fileName>src/cluster/ClusterPool.cc</fileName>
</suppress>
<suppress>
<id>uninitMemberVar</id>
@ -120,8 +120,8 @@
<fileName>src/schedule/ScheduledActionManager.cc</fileName>
</suppress>
<suppress>
<id>missingOverride</id>
<fileName>src/scheduler/include/*XML.h</fileName>
<id>returnDanglingLifetime</id>
<fileName>include/SchedulerManagerMatch.h</fileName>
</suppress>
</suppressions>

View File

@ -14,9 +14,9 @@
# To disable specific error messages use cppcheck-suppressions.xml file
SOURCES="src"
INCLUDES="-I include -I src/monitor/include -I src/scheduler/include"
INCLUDES="-I include -I src/monitor/include"
DEFINES="-DSQLITE_DB -DMYSQL_DB -DSYSTEMD"
ENABLE="--enable=performance,information,warning,portability,style"
ENABLE="--enable=performance,information,warning,portability,style,missingInclude"
IGNORE="-i .xmlrpc_test/ -i src/sunstone/ -i src/svncterm_server/ -i src/fireedge -i src/parsers -i src/vmm/LibVirtDriverKVM.cc"
SUPRESS="--suppress-xml=share/smoke_tests/config/cppcheck-suppressions.xml"
OTHERS="--std=c++17 --error-exitcode=2 -q -j 4"

View File

@ -71,6 +71,6 @@ end
if pass == secret
exit 0
else
OpenNebula.error_message "Invalid credentials"
OpenNebula::DriverLogger.report "Invalid credentials"
exit -1
end

View File

@ -73,13 +73,13 @@ begin
server_auth = OpenNebula::ServerCipherAuth.new_driver
rc = server_auth.authenticate(user, pass, secret)
rescue => e
OpenNebula.error_message e.message
OpenNebula::DriverLogger.report e.message
exit -1
end
if rc == true
exit 0
else
OpenNebula.error_message rc
OpenNebula::DriverLogger.report rc
exit -1
end

View File

@ -75,13 +75,13 @@ begin
rc = server_auth.authenticate(user, pass, dsecret)
rescue => e
OpenNebula.error_message e.message
OpenNebula::DriverLogger.report e.message
exit -1
end
if rc == true
exit 0
else
OpenNebula.error_message rc
OpenNebula::DriverLogger.report rc
exit -1
end

View File

@ -71,7 +71,7 @@ end
begin
ssh_auth = OpenNebula::SshAuth.new(:public_key=>pass)
rescue Exception => e
OpenNebula.error_message e.message
OpenNebula::DriverLogger.report e.message
exit -1
end
@ -80,6 +80,6 @@ rc = ssh_auth.authenticate(user,secret)
if rc == true
exit 0
else
OpenNebula.error_message rc
OpenNebula::DriverLogger.report rc
exit -1
end

View File

@ -80,13 +80,13 @@ begin
rc = x509_auth.authenticate(user, pass, token)
rescue => e
OpenNebula.error_message e.message
OpenNebula::DriverLogger.report e.message
exit(-1)
end
if rc == true
exit 0
else
OpenNebula.error_message rc
OpenNebula::DriverLogger.report rc
exit(-1)
end

View File

@ -157,6 +157,54 @@ class OneClusterHelper < OpenNebulaHelper::OneHelper
cluster.datastore_ids.each do |id|
puts format('%-15s', id)
end
plan_state = cluster.plan_state
return if plan_state == -1
puts
CLIHelper.print_header(format('PLAN: %s', Cluster::PLAN_STATE[plan_state]), false)
table = CLIHelper::ShowTable.new(nil, self) do
column :VM, 'VM ID', :size => 6 do |d|
d['VM_ID']
end
column :ACTION, 'Action', :left, :size => 10 do |d|
d['OPERATION']
end
column :HOST, 'Host ID', :right, :size => 6 do |d|
if d['HOST_ID'] != '-1'
d['HOST_ID']
else
'-'
end
end
column :DS, 'Datastore ID', :right, :size => 6 do |d|
if d['DS_ID'] != '-1'
d['DS_ID']
else
'-'
end
end
column :STATE, 'Action state', :size => 8 do |d|
Cluster::PLAN_STATE[d['STATE'].to_i]
end
column :START_TIME, 'Action start time', :right, :size => 16 do |d|
if d['TIMESTAMP'] != '0'
OpenNebulaHelper.time_to_str(d['TIMESTAMP'])
else
'-'
end
end
default :VM, :ACTION, :HOST, :DS, :STATE, :START_TIME
end
table.show(cluster.plan_actions)
end
end

View File

@ -233,4 +233,38 @@ CommandParser::CmdParser.new(ARGV) do
o.rename(args[1])
end
end
optimize_desc = <<-EOT.unindent
Create optimization plan for Cluster
EOT
# TODO: Consider optional parameters
# - autostart: Automatically start applying the new optimization plan
# - force: Delete old plan if exists
command :optimize, optimize_desc, :clusterid do
helper.perform_action(args[0], options, 'creating optimization plan') do |o|
o.optimize
end
end
planexecute_desc = <<-EOT.unindent
Start applying the optimization plan
EOT
command :planexecute, planexecute_desc, :clusterid do
helper.perform_action(args[0], options, 'cluster optimization started') do |o|
o.plan_execute
end
end
plandelete_desc = <<-EOT.unindent
Delete the optimization plan
EOT
# TODO: We should allow to delete a placement plan with id -1
command :plandelete, plandelete_desc, :clusterid do
helper.perform_action(args[0], options, 'plan deleted') do |o|
o.plan_delete
end
end
end

View File

@ -19,6 +19,7 @@
#include "Nebula.h"
#include "OneDB.h"
#include "DatastorePool.h"
#include "PlanPool.h"
#include <sstream>
@ -41,7 +42,7 @@ Cluster::Cluster(
{
if (cl_template)
{
obj_template = move(cl_template);
obj_template = std::move(cl_template);
}
else
{
@ -241,12 +242,13 @@ string& Cluster::to_xml(string& xml) const
oss <<
"<CLUSTER>" <<
"<ID>" << oid << "</ID>" <<
"<NAME>" << name << "</NAME>" <<
"<ID>" << oid << "</ID>" <<
"<NAME>" << name << "</NAME>" <<
hosts.to_xml(host_collection_xml) <<
datastores.to_xml(ds_collection_xml) <<
vnets.to_xml(vnet_collection_xml) <<
obj_template->to_xml(template_xml) <<
plan_xml <<
"</CLUSTER>";
xml = oss.str();
@ -298,3 +300,43 @@ int Cluster::from_xml(const string& xml)
return 0;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void Cluster::set_error_message(const char *mod, const string& name, const string& message)
{
static const int MAX_ERROR_MSG_LENGTH = 100;
SingleAttribute * attr;
ostringstream error_value;
error_value << one_util::log_time() << ": " << message.substr(0, MAX_ERROR_MSG_LENGTH);
if (message.length() >= MAX_ERROR_MSG_LENGTH)
{
error_value << "... see more details in oned.log";
NebulaLog::error(mod, message);
}
attr = new SingleAttribute(name, error_value.str());
obj_template->erase(name);
obj_template->set(attr);
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void Cluster::load_plan()
{
auto plpool = Nebula::instance().get_planpool();
auto plan = plpool->get_ro(oid);
if (plan->state() != PlanState::NONE)
{
plan_xml = plan->to_xml();
}
}

View File

@ -19,6 +19,7 @@
#include "NebulaLog.h"
#include "ClusterTemplate.h"
#include "DatastorePool.h"
#include "Plan.h"
#include <stdexcept>
@ -370,7 +371,3 @@ int ClusterPool::del_from_cluster(PoolObjectSQL::ObjectType type, Cluster* clust
return 0;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */

View File

@ -1,156 +0,0 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2024, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#include "HttpRequest.h"
#include <iostream>
using namespace std;
HttpRequest::HttpRequest()
{
curl_global_init(CURL_GLOBAL_ALL);
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
HttpRequest::~HttpRequest()
{
curl_global_cleanup();
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int HttpRequest::post_json(const std::string& url, const std::string& data, std::string& response)
{
auto curl = curl_easy_init();
if (!curl) return -1;
auto headers = curl_slist_append(nullptr, "Accept: application/json");
headers = curl_slist_append(headers, "Content-Type: application/json");
curl_easy_setopt(curl, CURLOPT_URL, url.c_str());
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, data.c_str());
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_to_string);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &response);
if (_timeout != 0)
{
curl_easy_setopt(curl, CURLOPT_TIMEOUT, _timeout);
}
if (!_proxy.empty())
{
curl_easy_setopt(curl, CURLOPT_PROXY, _proxy);
}
auto ec = curl_easy_perform(curl);
if (ec != CURLE_OK)
{
response = curl_easy_strerror(ec);
curl_slist_free_all(headers);
curl_easy_cleanup(curl);
return -1;
}
auto rc = check_http_code(curl, response);
curl_slist_free_all(headers);
curl_easy_cleanup(curl);
return rc;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int HttpRequest::get_json(const std::string& url, std::string& response)
{
auto curl = curl_easy_init(); // todo use RAII to automatically clean up
if (!curl) return -1;
auto headers = curl_slist_append(nullptr, "Accept: application/json");
curl_easy_setopt(curl, CURLOPT_URL, url.c_str());
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 1L);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_to_string);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &response);
if (_timeout != 0)
{
curl_easy_setopt(curl, CURLOPT_TIMEOUT, _timeout);
}
if (!_proxy.empty())
{
curl_easy_setopt(curl, CURLOPT_PROXY, _proxy);
}
auto ec = curl_easy_perform(curl);
if (ec != CURLE_OK)
{
response = curl_easy_strerror(ec);
curl_slist_free_all(headers);
curl_easy_cleanup(curl);
return -1;
}
auto rc = check_http_code(curl, response);
curl_slist_free_all(headers);
curl_easy_cleanup(curl);
return rc;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
size_t HttpRequest::write_to_string(void *ptr, size_t size, size_t count, void *str)
{
((string*)str)->assign((char*)ptr, 0, size*count);
return size*count;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int HttpRequest::check_http_code(CURL* curl, std::string& msg)
{
long http_code = 0;
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_code);
if (http_code != 200)
{
msg = "Http code " + to_string(http_code) + ": " + msg;
return -1;
}
return 0;
}

View File

@ -24,7 +24,6 @@ lib_name='nebula_common'
source_files=[
'Attribute.cc',
'ExtendedAttribute.cc',
'HttpRequest.cc',
'NebulaService.cc',
'NebulaUtil.cc',
'SSLUtil.cc'

View File

@ -50,7 +50,7 @@ Datastore::Datastore(
{
if (ds_template)
{
obj_template = move(ds_template);
obj_template = std::move(ds_template);
}
else
{
@ -60,6 +60,12 @@ Datastore::Datastore(
set_umask(umask);
group_u = 1;
//-------------------- Init search xpath routes ---------------------------
ObjectXML::paths = {
"/DATASTORE/TEMPLATE/",
"/DATASTORE/"
};
}
/* ------------------------------------------------------------------------ */

View File

@ -31,6 +31,7 @@
#include "VirtualRouterPool.h"
#include "SecurityGroupPool.h"
#include "ScheduledActionPool.h"
#include "SchedulerManager.h"
using namespace std;
@ -575,6 +576,8 @@ int DispatchManager::release(int vid, const RequestAttributes& ra,
vm->set_state(VirtualMachine::PENDING);
vmpool->update(vm.get());
Nebula::instance().get_sm()->trigger_place();
}
else
{
@ -727,6 +730,8 @@ int DispatchManager::resume(int vid, const RequestAttributes& ra,
vm->set_state(VirtualMachine::PENDING);
vmpool->update(vm.get());
Nebula::instance().get_sm()->trigger_place();
}
else if (vm->get_state() == VirtualMachine::SUSPENDED)
{
@ -859,7 +864,13 @@ int DispatchManager::resched(int vid, bool do_resched,
}
vm->set_resched(do_resched);
vmpool->update(vm.get());
if (do_resched)
{
Nebula::instance().get_sm()->trigger_place();
}
}
else
{
@ -1129,6 +1140,7 @@ int DispatchManager::delete_recreate(unique_ptr<VirtualMachine> vm,
vmpool->update(vm.get());
Nebula::instance().get_sm()->trigger_place();
break;
case VirtualMachine::POWEROFF:

View File

@ -19,6 +19,7 @@
#include "Quotas.h"
#include "Nebula.h"
#include "VirtualMachinePool.h"
#include "SchedulerManager.h"
using namespace std;
@ -314,6 +315,8 @@ void DispatchManager::trigger_resubmit(int vid)
vm->set_deploy_id(""); //reset the deploy-id
vmpool->update(vm.get());
Nebula::instance().get_sm()->trigger_place();
}
});
}

View File

@ -51,6 +51,17 @@ Host::Host(
replace_template_attribute("IM_MAD", im_mad_name);
replace_template_attribute("VM_MAD", vmm_mad_name);
//-------------------- Init search xpath routes ---------------------------
ObjectXML::paths = {
"/HOST/TEMPLATE/",
"/HOST/HOST_SHARE/",
"/HOST/HOST_SHARE/DATASTORES/",
"/HOST/MONITORING/CAPACITY/",
"/HOST/MONITORING/SYSTEM/",
"/HOST/",
"/HOST/CLUSTER_TEMPLATE/"
};
}
/* ------------------------------------------------------------------------ */

View File

@ -99,15 +99,15 @@ class InformationManagerDriver < OpenNebulaDriver
end
end
result, info = do_action(input[:im_mad],
do_action(input[:im_mad],
input[:host_id],
input[:hostname],
:START_MONITOR,
:stdin => input[:stdin],
:script_name => 'run_monitord_client',
:respond => false)
write_respond(:START_MONITOR, result, input[:host_id], info)
:respond => false,
:zip => true,
:base64 => true)
end
def stop_monitor(_not_used, _hostid, _timestamp, zaction64)
@ -115,15 +115,15 @@ class InformationManagerDriver < OpenNebulaDriver
return if rc == -1
result, info = do_action(input[:im_mad],
do_action(input[:im_mad],
input[:host_id],
input[:hostname],
:STOP_MONITOR,
:script_name => 'stop_monitord_client',
:stdin => input[:stdin],
:respond => false)
write_respond(:STOP_MONITOR, result, input[:host_id], info)
:respond => false,
:zip => true,
:base64 => true)
end
private
@ -145,11 +145,10 @@ class InformationManagerDriver < OpenNebulaDriver
config_xml.add_element(hid_elem)
[0, {:im_mad => im_mad,
:host_id => host_id,
:hostname => hostname,
:stdin => config_xml.to_s}]
[0, { :im_mad => im_mad,
:host_id => host_id,
:hostname => hostname,
:stdin => config_xml.to_s }]
rescue StandardError => e
write_respond(msg_type,
RESULT[:failure],
@ -161,42 +160,21 @@ class InformationManagerDriver < OpenNebulaDriver
# Sends a log message to ONE. The +message+ can be multiline, it will
# be automatically splitted by lines.
def log(id, message, not_used=true)
msg = message.strip
msg.each_line do |line|
severity = 'I'
m = line.match(/^(ERROR|DEBUG|INFO):(.*)$/)
if m
line = m[2]
case m[1]
when 'ERROR'
severity = 'E'
when 'DEBUG'
severity = 'D'
when 'INFO'
severity = 'I'
else
severity = 'I'
end
end
write_respond('LOG', severity, id, line.strip)
end
end
def write_respond(action="-", result=RESULT[:failure], id="-", info="-")
info = Zlib::Deflate.deflate(info, Zlib::BEST_COMPRESSION)
info = Base64.strict_encode64(info)
@stdout_mutex.synchronize {
STDOUT.puts "#{action} #{result} #{id} #{Time.now.to_i} #{info}"
STDOUT.flush
def log_method(num, _options = {})
lambda {|message, all = true|
log(num, message, all, :compress => true, :encode => true)
}
end
# rubocop:disable Metrics/ParameterLists
def send_message(action = '-', result = RESULT[:failure], id = '-', info = '-')
@send_mutex.synchronize do
STDOUT.puts "#{action} #{result} #{id} #{Time.now.to_i} #{info}"
STDOUT.flush
end
end
# rubocop:enable Metrics/ParameterLists
end
# Information Manager main program

View File

@ -29,6 +29,8 @@
#include "ImagePool.h"
#include "DatastorePool.h"
#include "VirtualMachinePool.h"
#include "SchedulerManager.h"
#include "PlanManager.h"
using namespace std;
@ -304,6 +306,7 @@ void LifeCycleManager::trigger_deploy_success(int vid)
Template quota_tmpl;
int uid = vm->get_uid();
int gid = vm->get_gid();
int cid = vm->get_cid();
//----------------------------------------------------
// RUNNING STATE
@ -375,6 +378,9 @@ void LifeCycleManager::trigger_deploy_success(int vid)
{
Quotas::quota_del(Quotas::VM, uid, gid, &quota_tmpl);
}
auto planm = Nebula::instance().get_planm();
planm->action_success(cid, vid);
});
}
@ -392,6 +398,8 @@ void LifeCycleManager::trigger_deploy_failure(int vid)
return;
}
int cid = vm->get_cid();
time_t the_time = time(0);
if ( vm->get_lcm_state() == VirtualMachine::MIGRATE )
@ -448,6 +456,9 @@ void LifeCycleManager::trigger_deploy_failure(int vid)
vmpool->update_history(vm.get());
vmpool->update(vm.get());
auto planm = Nebula::instance().get_planm();
planm->action_failure(cid, vid);
});
}
@ -470,6 +481,8 @@ void LifeCycleManager::trigger_shutdown_success(int vid)
Template quota_tmpl;
int uid = vm->get_uid();
int gid = vm->get_gid();
int cid = vm->get_cid();
auto state = vm->get_lcm_state();
if ( vm->get_lcm_state() == VirtualMachine::SHUTDOWN )
{
@ -557,6 +570,12 @@ void LifeCycleManager::trigger_shutdown_success(int vid)
{
Quotas::quota_del(Quotas::VM, uid, gid, &quota_tmpl);
}
if (state != VirtualMachine::SAVE_MIGRATE)
{
auto planm = Nebula::instance().get_planm();
planm->action_success(cid, vid);
}
});
}
@ -601,6 +620,13 @@ void LifeCycleManager::trigger_shutdown_failure(int vid)
{
vm->log("LCM", Log::ERROR, "shutdown_failure_action, VM in a wrong state");
}
int cid = vm->get_cid();
vm.reset();
auto planm = Nebula::instance().get_planm();
planm->action_failure(cid, vid);
});
}
@ -2246,7 +2272,8 @@ void LifeCycleManager::trigger_disk_lock_success(int vid)
{
case Image::USED:
case Image::USED_PERS:
ready.push_back(make_tuple(id, image->get_source(), image->get_format()));
ready.push_back(make_tuple(id, image->get_source(),
image->get_format()));
break;
case Image::ERROR:
@ -2266,6 +2293,7 @@ void LifeCycleManager::trigger_disk_lock_success(int vid)
}
}
bool do_place = false;
for (const auto& rit : ready)
{
@ -2287,6 +2315,9 @@ void LifeCycleManager::trigger_disk_lock_success(int vid)
// Automatic requirements are not recalculated on purpose
vm->set_state(VirtualMachine::PENDING);
do_place = true;
}
}
else if (error.size() > 0)
@ -2299,6 +2330,11 @@ void LifeCycleManager::trigger_disk_lock_success(int vid)
}
vmpool->update(vm.get());
if ( do_place )
{
Nebula::instance().get_sm()->trigger_place();
}
});
}

View File

@ -25,6 +25,14 @@ module DriverExecHelper
:failure => 'FAILURE'
}
# Log severity map to oned char log level
SEVERITY_MAP = {
'ERROR' => 'E',
'INFO' => 'I',
'WARN' => 'W',
'DEBUG' => 'D'
}
def self.failed?(rc_str)
rc_str == RESULT[:failure]
end
@ -118,45 +126,83 @@ module DriverExecHelper
# Sends a log message to ONE. The +message+ can be multiline, it will
# be automatically splitted by lines.
def log(number, message, all = true)
def log(number, message, all = true, options = {})
msg = message.strip
msg.each_line do |line|
all ? severity='I' : severity=nil
m = line.match(/^(ERROR|DEBUG|INFO):(.*)$/)
if line.match(/^(ERROR|DEBUG|INFO):(.*)$/)
line=Regexp.last_match(2)
case Regexp.last_match(1)
when 'ERROR'
severity='E'
when 'DEBUG'
severity='D'
when 'INFO'
severity='I'
end
if m
severity = SEVERITY_MAP[m[1]]
line = m[2]
elsif all
severity = 'I'
else
severity = nil
end
send_message('LOG', severity, number, line.strip) if severity
line.strip!
line = Zlib::Deflate.deflate(line, Zlib::BEST_COMPRESSION) if options[:compress]
line = Base64.strict_encode64(line) if options[:encode]
send_message('LOG', severity, number, line) if severity
end
end
# Generates a proc with that calls log with a hardcoded number. It will
# be used to add loging to command actions
def log_method(num)
def log_method(num, options = {})
lambda {|message, all = true|
log(num, message, all)
log(num, message, all, options)
}
end
# This method returns the result in terms
def get_info_from_execution(command_exe)
#This methods process the command output:
# - Log messages written to STDERR are sent to oned as LOG commands
# - In case of error, non log lines in STDERR are merged and returned as info
# - In case of success, STDOUT is returned as info
#
# TODO Review calls to method to encode/zip info as with LOG messages
def get_info_from_execution(command_exe, opts = {})
o = {
:compress => false,
:encode => false
}.merge(opts)
err_info = ''
command_exe.stderr.each_line do |line|
line.strip!
m = line.match(/^(ERROR|INFO|WARN|DEBUG):\s+(\d+)?\s*(.*)$/)
if !m
err_info << line
next
end
sev = SEVERITY_MAP[m[1]]
id = begin
Integer(m[2])
rescue StandardError
'-1'
end
msg = m[3].strip
msg = Zlib::Deflate.deflate(msg, Zlib::BEST_COMPRESSION) if o[:compress]
msg = Base64.strict_encode64(msg) if o[:encode]
send_message('LOG', sev, id, msg)
end
if command_exe.code == 0
result = RESULT[:success]
info = command_exe.stdout
else
result = RESULT[:failure]
info = command_exe.stderr.to_s.tr("\n", ' ').strip
info = err_info.tr("\n", ' ').strip
end
info = '-' if info.empty?
@ -164,7 +210,6 @@ module DriverExecHelper
[result, info]
end
#
#
# Simple parser for the config file generated by OpenNebula
def read_configuration

View File

@ -100,7 +100,7 @@ class OpenNebulaDriver < ActionManager
# if options[:is_local] is not specified (nil)
# we rely uniquely in actions_is_local?
if action_is_local?(aname) or options[:is_local]
if action_is_local?(aname) || options[:is_local]
stdin = Base64.strict_encode64(options[:stdin].to_s)
execution = LocalCommand.run(command,
log_method(id),

View File

@ -33,6 +33,7 @@
#include "ImagePool.h"
#include "MarketPlacePool.h"
#include "MarketPlaceAppPool.h"
#include "PlanPool.h"
#include "ScheduledActionPool.h"
#include "SecurityGroupPool.h"
#include "UserPool.h"
@ -56,9 +57,11 @@
#include "IPAMManager.h"
#include "LifeCycleManager.h"
#include "MarketPlaceManager.h"
#include "PlanManager.h"
#include "RaftManager.h"
#include "RequestManager.h"
#include "ScheduledActionManager.h"
#include "SchedulerManager.h"
#include "TransferManager.h"
#include "VirtualMachineManager.h"
@ -96,6 +99,10 @@ Nebula::~Nebula()
if (!cache)
{
if (planm) planm->finalize();
if (sm) sm->finalize();
if (sam) sam->finalize();
if (vmm) vmm->finalize();
@ -180,6 +187,8 @@ Nebula::~Nebula()
delete raftm;
delete frm;
delete sam;
delete sm;
delete planm;
delete logdb;
delete fed_logdb;
delete system_db;
@ -187,6 +196,7 @@ Nebula::~Nebula()
delete hkpool;
delete bjpool;
delete sapool;
delete plpool;
};
/* -------------------------------------------------------------------------- */
@ -583,6 +593,7 @@ void Nebula::start(bool bootstrap_only)
rc += HookLog::bootstrap(logdb);
rc += BackupJobPool::bootstrap(logdb);
rc += ScheduledActionPool::bootstrap(logdb);
rc += PlanPool::bootstrap(logdb);
// Create the system tables only if bootstrap went well
if (rc == 0)
@ -886,6 +897,8 @@ void Nebula::start(bool bootstrap_only)
sapool = new ScheduledActionPool(logdb);
plpool = new PlanPool(logdb);
default_user_quota.select();
default_group_quota.select();
@ -1186,6 +1199,55 @@ void Nebula::start(bool bootstrap_only)
sam = new ScheduledActionManager(timer_period, max_backups, max_backups_host);
}
// ---- Scheduler Manager ----
if (!cache)
{
time_t wnd_time;
unsigned int wnd_length;
time_t retry;
nebula_configuration->get("SCHED_MAX_WND_TIME", wnd_time);
nebula_configuration->get("SCHED_MAX_WND_LENGTH", wnd_length);
nebula_configuration->get("SCHED_RETRY_TIME", retry);
sm = new SchedulerManager(wnd_time, wnd_length, retry, mad_location);
vector<const VectorAttribute *> sched_mads;
nebula_configuration->get("SCHED_MAD", sched_mads);
if (sm->load_drivers(sched_mads) != 0)
{
goto error_mad;
}
rc = sm->start();
if ( rc != 0 )
{
throw runtime_error("Could not start the Scheduler Manager");
}
}
// ---- Plan Manager ----
if (!cache)
{
int max_actions_per_host;
int max_actions_per_cluster;
int live_rescheds;
int cold_migrate_mode;
int timeout;
nebula_configuration->get("MAX_ACTIONS_PER_HOST", max_actions_per_host);
nebula_configuration->get("MAX_ACTIONS_PER_CLUSTER", max_actions_per_cluster);
nebula_configuration->get("LIVE_RESCHEDS", live_rescheds);
nebula_configuration->get("COLD_MIGRATE_MODE", cold_migrate_mode);
nebula_configuration->get("ACTION_TIMEOUT", timeout);
planm = new PlanManager(timer_period, max_actions_per_host, max_actions_per_cluster,
live_rescheds, cold_migrate_mode, timeout);
}
// -----------------------------------------------------------
// Load mads
// -----------------------------------------------------------

View File

@ -40,6 +40,7 @@ env.Prepend(LIBS=[
'nebula_dm',
'nebula_tm',
'nebula_um',
'nebula_schedm',
'nebula_datastore',
'nebula_group',
'nebula_authm',

View File

@ -34,9 +34,14 @@ module OpenNebula
:addvnet => "cluster.addvnet",
:delvnet => "cluster.delvnet",
:update => "cluster.update",
:rename => "cluster.rename"
:rename => "cluster.rename",
:optimize => "cluster.optimize",
:planexecute => "cluster.planexecute",
:plandelete => "cluster.plandelete"
}
PLAN_STATE = ['READY', 'APPLYING', 'DONE', 'ERROR', 'TIMEOUT']
# Creates a Cluster description with just its identifier
# this method should be used to create plain Cluster objects.
# +id+ the id of the host
@ -182,6 +187,30 @@ module OpenNebula
return call(CLUSTER_METHODS[:rename], @pe_id, name)
end
# Create optimization plan for the Cluster
#
# @return [nil, OpenNebula::Error] nil in case of success, Error
# otherwise
def optimize()
return call(CLUSTER_METHODS[:optimize], @pe_id)
end
# Start applying the optimization plan for the Cluster
#
# @return [nil, OpenNebula::Error] nil in case of success, Error
# otherwise
def plan_execute()
return call(CLUSTER_METHODS[:planexecute], @pe_id)
end
# Delete optimization plan for the Cluster
#
# @return [nil, OpenNebula::Error] nil in case of success, Error
# otherwise
def plan_delete()
return call(CLUSTER_METHODS[:plandelete], @pe_id)
end
# ---------------------------------------------------------------------
# Helpers to get information
# ---------------------------------------------------------------------
@ -243,6 +272,20 @@ module OpenNebula
return array
end
# Returns state of optimization plan
# @return [Integer] -1 if no plan
def plan_state
state = self['PLAN/STATE'] || -1
state.to_i
end
# Returns an array plan actions
# @return [Array<PlanAction>]
def plan_actions
[self.to_hash['CLUSTER']['PLAN']['ACTION']].flatten
end
private
def contains_resource?(xpath, id)

View File

@ -136,6 +136,13 @@ class OneDBBacKEnd
"search_token MEDIUMTEXT",
acl: "oid INT PRIMARY KEY, userset BIGINT, resource BIGINT, " <<
"rights BIGINT, zone BIGINT, UNIQUE(userset, resource, rights, zone)"
},
"7.0.0" => {
vm_pool: "oid INTEGER PRIMARY KEY, name VARCHAR(128), " <<
"body MEDIUMTEXT, uid INTEGER, gid INTEGER, " <<
"state INTEGER, lcm_state INTEGER, resched INTEGER," <<
"owner_u INTEGER, group_u INTEGER, other_u INTEGER, short_body MEDIUMTEXT, " <<
"body_json JSON"
}
}

View File

@ -167,3 +167,14 @@ const EString<HookManagerMessages> hook_msg_t::_type_str(
{"RETRY", HookManagerMessages::RETRY},
{"LOG", HookManagerMessages::LOG},
});
template<>
const EString<SchedulerManagerMessages> scheduler_msg_t::_type_str(
{
{"UNDEFINED", SchedulerManagerMessages::UNDEFINED},
{"INIT", SchedulerManagerMessages::INIT},
{"FINALIZE", SchedulerManagerMessages::FINALIZE},
{"PLACE", SchedulerManagerMessages::PLACE},
{"OPTIMIZE", SchedulerManagerMessages::OPTIMIZE},
{"LOG", SchedulerManagerMessages::LOG},
});

View File

@ -531,6 +531,9 @@ void RequestManager::register_xml_methods()
xmlrpc_c::methodPtr cluster_delds(new ClusterDelDatastore());
xmlrpc_c::methodPtr cluster_addvnet(new ClusterAddVNet());
xmlrpc_c::methodPtr cluster_delvnet(new ClusterDelVNet());
xmlrpc_c::methodPtr cluster_optimize(new ClusterOptimize());
xmlrpc_c::methodPtr cluster_planexecute(new ClusterPlanExecute());
xmlrpc_c::methodPtr cluster_plandelete(new ClusterPlanDelete());
// System Methods
xmlrpc_c::methodPtr system_version(new SystemVersion());
@ -893,6 +896,10 @@ void RequestManager::register_xml_methods()
RequestManagerRegistry.addMethod("one.cluster.addvnet", cluster_addvnet);
RequestManagerRegistry.addMethod("one.cluster.delvnet", cluster_delvnet);
RequestManagerRegistry.addMethod("one.cluster.optimize", cluster_optimize);
RequestManagerRegistry.addMethod("one.cluster.planexecute", cluster_planexecute);
RequestManagerRegistry.addMethod("one.cluster.plandelete", cluster_plandelete);
RequestManagerRegistry.addMethod("one.clusterpool.info", clusterpool_info);
/* Generic Document objects related methods*/

View File

@ -16,7 +16,10 @@
#include "RequestManagerCluster.h"
#include "HostPool.h"
#include "PlanPool.h"
#include "VirtualMachinePool.h"
#include "PlanManager.h"
#include "SchedulerManager.h"
using namespace std;
@ -299,7 +302,6 @@ void RequestManagerClusterHost::add_generic(
if ( clpool->del_from_cluster(PoolObjectSQL::HOST, cluster.get(), host_id, att.resp_msg) < 0 )
{
failure_response(INTERNAL, att);
return;
}
@ -350,3 +352,110 @@ void RequestManagerClusterHost::add_generic(
return;
}
/* ------------------------------------------------------------------------- */
/* ------------------------------------------------------------------------- */
void ClusterOptimize::request_execute(xmlrpc_c::paramList const& paramList,
RequestAttributes& att)
{
int cluster_id = paramList.getInt(1);
if ( clpool->exist(cluster_id) == -1 )
{
att.resp_obj = PoolObjectSQL::CLUSTER;
att.resp_id = cluster_id;
failure_response(NO_EXISTS, att);
return;
}
auto plan = plpool->get_ro(cluster_id);
if (plan->state() == PlanState::APPLYING)
{
att.resp_msg = "Can't optimize cluster. A previous plan is currently being applied.";
failure_response(ACTION, att);
return;
}
Nebula::instance().get_sm()->trigger_optimize(cluster_id);
success_response(cluster_id, att);
}
/* ------------------------------------------------------------------------- */
/* ------------------------------------------------------------------------- */
void ClusterPlanExecute::request_execute(xmlrpc_c::paramList const& paramList,
RequestAttributes& att)
{
string error_msg;
int cluster_id = paramList.getInt(1);
if ( clpool->exist(cluster_id) == -1 )
{
att.resp_obj = PoolObjectSQL::CLUSTER;
att.resp_id = cluster_id;
failure_response(NO_EXISTS, att);
return;
}
Nebula& nd = Nebula::instance();
auto planm = nd.get_planm();
if (planm->start_plan(cluster_id, error_msg) != 0)
{
att.resp_msg = error_msg;
failure_response(ACTION, att);
return;
};
success_response(cluster_id, att);
}
/* ------------------------------------------------------------------------- */
/* ------------------------------------------------------------------------- */
void ClusterPlanDelete::request_execute(xmlrpc_c::paramList const& paramList,
RequestAttributes& att)
{
int cluster_id = paramList.getInt(1);
auto cluster = clpool->get(cluster_id);
if (!cluster)
{
att.resp_obj = PoolObjectSQL::CLUSTER;
att.resp_id = cluster_id;
failure_response(NO_EXISTS, att);
return;
}
auto plan = plpool->get(cluster_id);
if (plan->state() == PlanState::NONE)
{
att.resp_msg = "Plan for cluster " + to_string(cluster_id) + "does not exist";
failure_response(ACTION, att);
return;
}
plan->clear();
if (plpool->drop(plan.get()) != 0)
{
att.resp_msg = "Unable to delete plan for cluster " + to_string(cluster_id);
failure_response(ACTION, att);
return;
}
success_response(cluster_id, att);
}

View File

@ -70,7 +70,7 @@ void RequestManagerInfo::request_execute(xmlrpc_c::paramList const& paramList,
object->decrypt();
}
load_monitoring(object.get());
load_extended_data(object.get());
to_xml(att, object.get(), str);

View File

@ -191,7 +191,7 @@ bool RequestManagerVirtualMachine::quota_resize_authorization(
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int RequestManagerVirtualMachine::get_default_ds_information(
Request::ErrorCode RequestManagerVirtualMachine::get_default_ds_information(
int cluster_id,
int& ds_id,
string& tm_mad,
@ -217,9 +217,8 @@ int RequestManagerVirtualMachine::get_default_ds_information(
{
att.resp_obj = PoolObjectSQL::CLUSTER;
att.resp_id = cluster_id;
failure_response(NO_EXISTS, att);
return -1;
return NO_EXISTS;
}
if (ds_id == -1)
@ -233,9 +232,7 @@ int RequestManagerVirtualMachine::get_default_ds_information(
att.resp_msg = oss.str();
failure_response(ACTION, att);
return -1;
return ACTION;
}
set<int> ds_cluster_ids;
@ -246,7 +243,7 @@ int RequestManagerVirtualMachine::get_default_ds_information(
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int RequestManagerVirtualMachine::get_ds_information(int ds_id,
Request::ErrorCode RequestManagerVirtualMachine::get_ds_information(int ds_id,
set<int>& ds_cluster_ids,
string& tm_mad,
RequestAttributes& att,
@ -262,8 +259,8 @@ int RequestManagerVirtualMachine::get_ds_information(int ds_id,
{
att.resp_obj = PoolObjectSQL::DATASTORE;
att.resp_id = ds_id;
failure_response(NO_EXISTS, att);
return -1;
return NO_EXISTS;
}
if ( ds->get_type() != Datastore::SYSTEM_DS )
@ -276,9 +273,7 @@ int RequestManagerVirtualMachine::get_ds_information(int ds_id,
att.resp_msg = oss.str();
failure_response(INTERNAL, att);
return -1;
return INTERNAL;
}
ds_cluster_ids = ds->get_cluster_ids();
@ -287,14 +282,14 @@ int RequestManagerVirtualMachine::get_ds_information(int ds_id,
ds->get_template_attribute("DS_MIGRATE", ds_migr);
return 0;
return SUCCESS;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int RequestManagerVirtualMachine::get_host_information(
Request::ErrorCode RequestManagerVirtualMachine::get_host_information(
int hid,
string& name,
string& vmm,
@ -314,16 +309,14 @@ int RequestManagerVirtualMachine::get_host_information(
{
att.resp_obj = PoolObjectSQL::HOST;
att.resp_id = hid;
failure_response(NO_EXISTS, att);
return -1;
return NO_EXISTS;
}
if ( host->get_state() == Host::OFFLINE )
{
att.resp_msg = "Host is offline, cannot use it to deploy VM";
failure_response(ACTION, att);
return -1;
return ACTION;
}
name = host->get_name();
@ -335,7 +328,7 @@ int RequestManagerVirtualMachine::get_host_information(
host->get_permissions(host_perms);
return 0;
return SUCCESS;
}
/* -------------------------------------------------------------------------- */
@ -432,7 +425,6 @@ int RequestManagerVirtualMachine::add_history(VirtualMachine * vm,
if ( vmpool->insert_history(vm) != 0 )
{
att.resp_msg = "Cannot update virtual machine history";
failure_response(INTERNAL, att);
return -1;
}
@ -440,7 +432,6 @@ int RequestManagerVirtualMachine::add_history(VirtualMachine * vm,
if ( vmpool->update(vm) != 0 )
{
att.resp_msg = "Cannot update virtual machine";
failure_response(INTERNAL, att);
return -1;
}
@ -738,8 +729,12 @@ static int set_migrate_vnc_port(VirtualMachine *vm, int cluster_id, bool keep)
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList,
RequestAttributes& att)
Request::ErrorCode VirtualMachineDeploy::request_execute(RequestAttributes& att,
int vid,
int hid,
bool enforce,
int ds_id,
const string& str_tmpl)
{
Nebula& nd = Nebula::instance();
DatastorePool * dspool = nd.get_dspool();
@ -759,59 +754,36 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList,
string tm_mad;
bool auth = false;
bool check_nic_auto = false;
// ------------------------------------------------------------------------
// Get request parameters and information about the target host
// ------------------------------------------------------------------------
int id = xmlrpc_c::value_int(paramList.getInt(1));
int hid = xmlrpc_c::value_int(paramList.getInt(2));
bool enforce = false;
int ds_id = -1;
if ( paramList.size() > 3 )
if (!str_tmpl.empty())
{
enforce = xmlrpc_c::value_boolean(paramList.getBoolean(3));
}
if ( paramList.size() > 4 )
{
ds_id = xmlrpc_c::value_int(paramList.getInt(4));
}
if ( paramList.size() > 5 ) // Template with network scheduling results
{
std::string str_tmpl = xmlrpc_c::value_string(paramList.getString(5));
check_nic_auto = !str_tmpl.empty();
int rc = tmpl.parse_str_or_xml(str_tmpl, att.resp_msg);
if ( rc != 0 )
{
failure_response(INTERNAL, att);
return;
return INTERNAL;
}
}
if (get_host_information(hid,
hostname,
vmm_mad,
cluster_id,
is_public_cloud,
host_perms,
att) != 0)
auto ec = get_host_information(hid,
hostname,
vmm_mad,
cluster_id,
is_public_cloud,
host_perms,
att);
if (ec != SUCCESS)
{
return;
return ec;
}
// ------------------------------------------------------------------------
// Get information about the system DS to use (tm_mad & permissions)
// ------------------------------------------------------------------------
if ( auto vm = get_vm_ro(id, att) )
if (auto vm = pool->get_ro<VirtualMachine>(vid))
{
if (vm->hasHistory() &&
(vm->get_action() == VMActions::STOP_ACTION ||
@ -848,7 +820,9 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList,
}
else
{
return;
att.resp_id = vid;
return NO_EXISTS;
}
if (is_public_cloud) // Set ds_id to -1 and tm_mad empty(). This is used by
@ -861,9 +835,10 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList,
{
if ( ds_id == -1 ) //Use default system DS for cluster
{
if (get_default_ds_information(cluster_id, ds_id, tm_mad, att) != 0)
ec = get_default_ds_information(cluster_id, ds_id, tm_mad, att);
if (ec != SUCCESS)
{
return;
return ec;
}
}
else //Get information from user selected system DS
@ -871,9 +846,10 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList,
set<int> ds_cluster_ids;
bool ds_migr;
if (get_ds_information(ds_id, ds_cluster_ids, tm_mad, att, ds_migr) != 0)
ec = get_ds_information(ds_id, ds_cluster_ids, tm_mad, att, ds_migr);
if (ec != SUCCESS)
{
return;
return ec;
}
if (ds_cluster_ids.count(cluster_id) == 0)
@ -888,9 +864,7 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList,
att.resp_msg = oss.str();
failure_response(ACTION, att);
return;
return ACTION;
}
}
}
@ -907,9 +881,8 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList,
{
att.resp_obj = PoolObjectSQL::DATASTORE;
att.resp_id = ds_id;
failure_response(NO_EXISTS, att);
return;
return NO_EXISTS;
}
ds->get_permissions(ds_perms);
@ -932,27 +905,25 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList,
{
att.resp_msg = "NIC includes a restricted attribute " + aname;
failure_response(AUTHORIZATION, att);
return;
return AUTHORIZATION;
}
}
if (!quota_authorization(&tmpl, Quotas::NETWORK, att_quota, att.resp_msg))
{
failure_response(AUTHORIZATION, att);
return;
return AUTHORIZATION;
}
auth = vm_authorization(id, 0, &tmpl, att, &host_perms, auth_ds_perms, 0);
ec = vm_authorization_no_response(vid, 0, &tmpl, att, &host_perms, auth_ds_perms, 0);
}
else
{
auth = vm_authorization(id, 0, 0, att, &host_perms, auth_ds_perms, 0);
ec = vm_authorization_no_response(vid, 0, 0, att, &host_perms, auth_ds_perms, 0);
}
if (auth == false)
if (ec != SUCCESS)
{
return;
return ec;
}
// ------------------------------------------------------------------------
@ -960,11 +931,13 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList,
// - VM States are right
// - Host capacity if required
// ------------------------------------------------------------------------
auto vm = get_vm(id, att);
auto vm = pool->get<VirtualMachine>(vid);
if (vm == nullptr)
{
return;
att.resp_id = vid;
return NO_EXISTS;
}
if (vm->get_state() != VirtualMachine::PENDING &&
@ -975,38 +948,32 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList,
att.resp_msg = "Deploy action is not available for state " +
vm->state_str();
failure_response(ACTION, att);
return;
return ACTION;
}
if (check_host(hid, enforce || vm->is_pinned(), vm.get(), att.resp_msg) == false)
{
failure_response(ACTION, att);
return;
return ACTION;
}
if ( check_nic_auto && vm->get_auto_network_leases(&tmpl, att.resp_msg) != 0 )
{
failure_response(ACTION, att);
return;
return ACTION;
}
if ( vm->check_tm_mad_disks(tm_mad, att.resp_msg) != 0)
{
failure_response(ACTION, att);
return;
return ACTION;
}
if ( vm->check_shareable_disks(vmm_mad, att.resp_msg) != 0)
{
failure_response(ACTION, att);
return;
return ACTION;
}
if ( nd.get_vmm()->validate_template(vmm_mad, vm.get(), hid, cluster_id, att.resp_msg) != 0 )
{
failure_response(ACTION, att);
return;
return ACTION;
}
RequestAttributes att_quota(vm_perms.uid, vm_perms.gid, att);
@ -1023,8 +990,7 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList,
{
att.resp_msg = att_quota.resp_msg;
failure_response(AUTHORIZATION, att);
return;
return AUTHORIZATION;
}
}
@ -1044,7 +1010,7 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList,
quota_rollback(&quota_tmpl_running, Quotas::VM, att_quota);
}
return;
return AUTHORIZATION;
}
}
else if (old_cid != cluster_id)
@ -1059,7 +1025,7 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList,
quota_rollback(&quota_tmpl_running, Quotas::VM, att_quota);
}
return;
return AUTHORIZATION;
}
// Remove resources from old cluster
@ -1097,7 +1063,7 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList,
}
failure_response(ACTION, att);
return;
return ACTION;
}
// ------------------------------------------------------------------------
@ -1112,7 +1078,7 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList,
ds_id,
att) != 0)
{
return;
return INTERNAL;
}
// ------------------------------------------------------------------------
@ -1121,14 +1087,61 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList,
dm->deploy(std::move(vm), att);
return SUCCESS;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList,
RequestAttributes& att)
{
// ------------------------------------------------------------------------
// Get request parameters and information about the target host
// ------------------------------------------------------------------------
int id = xmlrpc_c::value_int(paramList.getInt(1));
int hid = xmlrpc_c::value_int(paramList.getInt(2));
bool enforce = false;
int ds_id = -1;
std::string str_tmpl;
if ( paramList.size() > 3 )
{
enforce = xmlrpc_c::value_boolean(paramList.getBoolean(3));
}
if ( paramList.size() > 4 )
{
ds_id = xmlrpc_c::value_int(paramList.getInt(4));
}
if ( paramList.size() > 5 ) // Template with network scheduling results
{
str_tmpl = xmlrpc_c::value_string(paramList.getString(5));
}
auto ec = request_execute(att, id, hid, enforce, ds_id, str_tmpl);
if (ec != SUCCESS)
{
failure_response(ec, att);
return;
}
success_response(id, att);
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList,
RequestAttributes& att)
Request::ErrorCode VirtualMachineMigrate::request_execute(RequestAttributes& att,
int vid,
int hid,
bool live,
bool enforce,
int ds_id,
int poweroff)
{
Nebula& nd = Nebula::instance();
@ -1152,44 +1165,19 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
set<int> cluster_ids;
string error_str;
bool auth = false;
bool ds_migr;
// ------------------------------------------------------------------------
// Get request parameters and information about the target host
// ------------------------------------------------------------------------
auto ec = get_host_information(hid,
hostname,
vmm_mad,
cluster_id,
is_public_cloud,
host_perms,
att);
int id = xmlrpc_c::value_int(paramList.getInt(1));
int hid = xmlrpc_c::value_int(paramList.getInt(2));
bool live = xmlrpc_c::value_boolean(paramList.getBoolean(3));
bool enforce = false;
int ds_id = -1;
int poffmgr = 0;
if ( paramList.size() > 4 )
if (ec != SUCCESS)
{
enforce = xmlrpc_c::value_boolean(paramList.getBoolean(4));
}
if ( paramList.size() > 5 )
{
ds_id = xmlrpc_c::value_int(paramList.getInt(5));
}
if ( paramList.size() > 6 )
{
poffmgr = xmlrpc_c::value_int(paramList.getInt(6));
}
if (get_host_information(hid,
hostname,
vmm_mad,
cluster_id,
is_public_cloud,
host_perms,
att) != 0)
{
return;
return ec;
}
if (ds_id == -1)
@ -1204,9 +1192,8 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
{
att.resp_obj = PoolObjectSQL::DATASTORE;
att.resp_id = ds_id;
failure_response(NO_EXISTS, att);
return;
return NO_EXISTS;
}
ds->get_permissions(ds_perms);
@ -1217,11 +1204,11 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
// ------------------------------------------------------------------------
// Authorize request
// ------------------------------------------------------------------------
auth = vm_authorization(id, 0, 0, att, &host_perms, auth_ds_perms, 0);
ec = vm_authorization_no_response(vid, 0, 0, att, &host_perms, auth_ds_perms, 0);
if (auth == false)
if (ec != SUCCESS)
{
return;
return ec;
}
// ------------------------------------------------------------------------
@ -1233,11 +1220,13 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
// - New host and current one are in the same cluster
// - New or old host are not public cloud
// ------------------------------------------------------------------------
auto vm = get_vm(id, att);
auto vm = pool->get<VirtualMachine>(vid);
if (vm == nullptr)
{
return;
att.resp_id = vid;
return NO_EXISTS;
}
if (vm->is_previous_history_open() ||
@ -1248,17 +1237,15 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
vm->get_lcm_state() != VirtualMachine::UNKNOWN))))
{
att.resp_msg = "Migrate action is not available for state " + vm->state_str();
failure_response(ACTION, att);
return;
return ACTION;
}
if (live && vm->is_pinned())
{
att.resp_msg = "VM with a pinned NUMA topology cannot be live-migrated";
failure_response(ACTION, att);
return;
return ACTION;
}
// Get System DS information from current History record
@ -1275,16 +1262,14 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
oss << "VM is already running on host [" << c_hid << "] and datastore [" << c_ds_id << "]";
att.resp_msg = oss.str();
failure_response(ACTION, att);
return;
return ACTION;
}
// Check the host has enough capacity
if (check_host(hid, enforce, vm.get(), att.resp_msg) == false)
{
failure_response(ACTION, att);
return;
return ACTION;
}
int rc = vm->automatic_requirements(cluster_ids, error_str);
@ -1292,8 +1277,8 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
if (rc != 0)
{
att.resp_msg = error_str;
failure_response(ACTION, att);
return;
return ACTION;
}
//Check PCI devices are compatible with migration type
@ -1301,18 +1286,17 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
vm->get_capacity(sr);
if ((sr.pci.size() > 0) && (!poffmgr &&
if ((sr.pci.size() > 0) && (!poweroff &&
vm->get_state() != VirtualMachine::POWEROFF))
{
ostringstream oss;
oss << "Cannot migrate VM [" << id << "], use poweroff or poweroff-hard"
oss << "Cannot migrate VM [" << vid << "], use poweroff or poweroff-hard"
" flag for migrating a VM with PCI devices";
att.resp_msg = oss.str();
failure_response(ACTION, att);
return;
return ACTION;
}
// Check we are migrating to a compatible cluster
@ -1325,31 +1309,28 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
{
att.resp_obj = PoolObjectSQL::HOST;
att.resp_id = c_hid;
failure_response(NO_EXISTS, att);
return;
return NO_EXISTS;
}
if (!cluster_ids.empty() && cluster_ids.count(cluster_id) == 0)
{
ostringstream oss;
oss << "Cannot migrate VM [" << id << "] to host [" << hid << "]. Host is in cluster ["
oss << "Cannot migrate VM [" << vid << "] to host [" << hid << "]. Host is in cluster ["
<< cluster_id << "], and VM requires to be placed on cluster ["
<< one_util::join(cluster_ids, ',') << "]";
att.resp_msg = oss.str();
failure_response(ACTION, att);
return;
return ACTION;
}
if ( is_public_cloud || c_is_public_cloud )
{
att.resp_msg = "Cannot migrate to or from a Public Cloud Host";
failure_response(ACTION, att);
return;
return ACTION;
}
if (ds_id != -1)
@ -1360,48 +1341,46 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
if ( vmmd == nullptr )
{
att.resp_msg = "Cannot find vmm driver: " + vmm_mad;
failure_response(ACTION, att);
return;
return ACTION;
}
if ( c_ds_id != ds_id && live && !vmmd->is_ds_live_migration())
{
att.resp_msg = "A migration to a different system datastore "
"cannot be performed live.";
failure_response(ACTION, att);
return;
return ACTION;
}
if (get_ds_information(ds_id, ds_cluster_ids, tm_mad, att, ds_migr) != 0)
ec = get_ds_information(ds_id, ds_cluster_ids, tm_mad, att, ds_migr);
if (ec != SUCCESS)
{
return;
return ec;
}
if (!ds_migr)
{
att.resp_msg = "System datastore migration not supported by TM driver";
failure_response(ACTION, att);
return;
return ACTION;
}
if (c_tm_mad != tm_mad)
{
att.resp_msg = "Cannot migrate to a system datastore with a different TM driver";
failure_response(ACTION, att);
return;
return ACTION;
}
}
else
{
ds_id = c_ds_id;
if (get_ds_information(ds_id, ds_cluster_ids, tm_mad, att, ds_migr) != 0)
ec = get_ds_information(ds_id, ds_cluster_ids, tm_mad, att, ds_migr);
if (ec != SUCCESS)
{
return;
return ec;
}
}
@ -1409,15 +1388,14 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
{
ostringstream oss;
oss << "Cannot migrate VM [" << id << "] to host [" << hid
oss << "Cannot migrate VM [" << vid << "] to host [" << hid
<< "] and system datastore [" << ds_id << "]. Host is in cluster ["
<< cluster_id << "], and the datastore is in cluster ["
<< one_util::join(ds_cluster_ids, ',') << "]";
att.resp_msg = oss.str();
failure_response(ACTION, att);
return;
return ACTION;
}
// -------------------------------------------------------------------------
@ -1431,9 +1409,8 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
if ( set_migrate_vnc_port(vm.get(), cluster_id, live) == -1 )
{
att.resp_msg = "No free VNC port available in the new cluster";
failure_response(ACTION, att);
return;
return ACTION;
}
// Check cluster quotas on new cluster, remove resources from old cluster
@ -1445,7 +1422,7 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
if ( !quota_authorization(&quota_tmpl, Quotas::VM, att_quota))
{
att.resp_msg = att_quota.resp_msg;
return;
return AUTHORIZATION;
}
quota_tmpl.replace("CLUSTER_ID", c_cluster_id);
@ -1478,8 +1455,7 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
Quotas::vm_del(vm_perms.uid, vm_perms.gid, &quota_tmpl);
}
// failure_response set in add_history
return;
return INTERNAL;
}
// ------------------------------------------------------------------------
@ -1491,7 +1467,7 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
}
else
{
rc = dm->migrate(vm.get(), poffmgr, att);
rc = dm->migrate(vm.get(), poweroff, att);
}
if (rc != 0)
@ -1506,7 +1482,51 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
Quotas::vm_del(vm_perms.uid, vm_perms.gid, &quota_tmpl);
}
failure_response(Request::INTERNAL, att);
return INTERNAL;
}
return SUCCESS;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList,
RequestAttributes& att)
{
// ------------------------------------------------------------------------
// Get request parameters and information about the target host
// ------------------------------------------------------------------------
int id = xmlrpc_c::value_int(paramList.getInt(1));
int hid = xmlrpc_c::value_int(paramList.getInt(2));
bool live = xmlrpc_c::value_boolean(paramList.getBoolean(3));
bool enforce = false;
int ds_id = -1;
int poffmgr = 0;
if ( paramList.size() > 4 )
{
enforce = xmlrpc_c::value_boolean(paramList.getBoolean(4));
}
if ( paramList.size() > 5 )
{
ds_id = xmlrpc_c::value_int(paramList.getInt(5));
}
if ( paramList.size() > 6 )
{
poffmgr = xmlrpc_c::value_int(paramList.getInt(6));
}
auto ec = request_execute(att, id, hid, live, enforce, ds_id, poffmgr);
if (ec != SUCCESS)
{
failure_response(ec, att);
return;
}
success_response(id, att);

348
src/schedm/Plan.cc Normal file
View File

@ -0,0 +1,348 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2024, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#include "Plan.h"
#include "NebulaLog.h"
#include "OneDB.h"
using namespace std;
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int PlanAction::from_xml_node(const xmlNodePtr node)
{
Template tmpl;
int tmp_int;
tmpl.from_xml_node(node);
auto rc = tmpl.get("VM_ID", _vm_id);
rc &= tmpl.get("OPERATION", _operation);
rc &= tmpl.get("HOST_ID", _host_id);
rc &= tmpl.get("DS_ID", _ds_id);
tmpl.get("TIMESTAMP", _timestamp);
tmpl.get("STATE", tmp_int);
_state = static_cast<PlanState>(tmp_int);
vector<VectorAttribute *> nics;
tmpl.get("NIC", nics);
for (auto nic : nics)
{
int nic_id = -1;
int network_id = -1;
nic->vector_value("NIC_ID", nic_id);
nic->vector_value("NETWORK_ID", network_id);
_nics.emplace_back(nic_id, network_id);
}
if (!rc)
{
NebulaLog::error("PLM", "Unable to create PlanAction from xml");
return -1;
}
return 0;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
std::string PlanAction::to_xml() const
{
ostringstream oss;
oss << "<ACTION>"
<< "<VM_ID>" << _vm_id << "</VM_ID>"
<< "<STATE>" << _state << "</STATE>"
<< "<OPERATION>" << _operation << "</OPERATION>"
<< "<HOST_ID>" << _host_id << "</HOST_ID>"
<< "<DS_ID>" << _ds_id << "</DS_ID>"
<< "<TIMESTAMP>" << _timestamp << "</TIMESTAMP>";
for (const auto& nic : _nics)
{
oss << "<NIC>"
<< "<NIC_ID>" << nic.first << "</NIC_ID>"
<< "<NETWORK_ID>" << nic.second << "</NETWORK_ID>"
<< "</NIC>";
}
oss << "</ACTION>";
return oss.str();
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
PlanAction* Plan::get_next_action()
{
for (auto& action : _actions)
{
if (action.state() == PlanState::READY)
{
return &action;
}
}
return nullptr;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
bool Plan::action_finished(int vid, PlanState state)
{
for (auto& action : _actions)
{
if (action.vm_id() == vid && ( action.state() == PlanState::APPLYING ||
action.state() == PlanState::TIMEOUT))
{
action.state(state);
return true;
}
}
return false;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
string Plan::to_xml() const
{
ostringstream oss;
oss << "<PLAN>"
<< "<ID>" << _cid << "</ID>"
<< "<STATE>" << _state << "</STATE>";
for (const auto& action : _actions)
{
oss << action.to_xml();
}
oss << "</PLAN>";
return oss.str();
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int Plan::from_xml(const std::string& xml)
{
if (update_from_str(xml) != 0)
{
return -1;
}
return rebuild_attributes();
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void Plan::check_completed()
{
auto new_state = PlanState::DONE;
for (const auto& a : _actions)
{
if (a.state() == PlanState::READY ||
a.state() == PlanState::APPLYING)
{
return;
}
if (new_state == PlanState::ERROR)
{
continue;
}
if (a.state() == PlanState::ERROR)
{
new_state = PlanState::ERROR;
}
else if (a.state() == PlanState::TIMEOUT)
{
new_state = PlanState::TIMEOUT;
}
}
// Plan is completed, set new state
_state = new_state;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void Plan::timeout_actions(int timeout)
{
auto now = time(nullptr);
for (auto& a : _actions)
{
if (a.state() == PlanState::APPLYING &&
a.timestamp() + timeout < now)
{
a.state(PlanState::TIMEOUT);
}
}
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void Plan::count_actions(int &cluster_actions, std::map<int, int>& host_actions)
{
for (const auto& a : _actions)
{
if (a.state() == PlanState::APPLYING)
{
host_actions[a.host_id()]++;
cluster_actions++;
}
}
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int Plan::rebuild_attributes()
{
int tmp_int;
int rc = 0;
rc += xpath(_cid, "/PLAN/ID", -1);
xpath(tmp_int, "/PLAN/STATE", -1); // State can be empty
_state = static_cast<PlanState>(tmp_int);
vector<xmlNodePtr> action_nodes;
ObjectXML::get_nodes("/PLAN/ACTION", action_nodes);
for (auto node : action_nodes)
{
PlanAction action;
rc += action.from_xml_node(node);
if (rc != 0)
{
break;
}
_actions.emplace_back(action);
}
if (rc != 0)
{
NebulaLog::error("PLM", "Unable to create Plan from xml");
_state = PlanState::ERROR;
return -1;
}
return 0;
}
int Plan::bootstrap(SqlDB * db)
{
std::ostringstream oss;
oss.str(one_db::plan_db_bootstrap);
return db->exec_local_wr(oss);
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int Plan::select_cb(void *nil, int num, char **values, char **names)
{
if ( (!values[0]) || (num != 1) )
{
return -1;
}
return from_xml(values[0]);
}
/* -------------------------------------------------------------------------- */
int Plan::select(SqlDB * db)
{
ostringstream oss;
set_callback(static_cast<Callbackable::Callback>(&Plan::select_cb));
oss << "SELECT body FROM " << one_db::plan_table
<< " WHERE cid = " << _cid;
int rc = db->exec_rd(oss, this);
unset_callback();
return rc;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int Plan::insert_replace(SqlDB *db, bool replace)
{
ostringstream oss;
char * sql_body = db->escape_str(to_xml());
if (!sql_body)
{
return -1;
}
oss << "REPLACE INTO " << one_db::plan_table
<< " (" << one_db::plan_db_names << ") VALUES ("
<< _cid << ","
<< _state << ","
<< "'" << sql_body << "')";
int rc = db->exec_wr(oss);
db->free_str(sql_body);
return rc;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int Plan::drop(SqlDB * db)
{
ostringstream oss;
oss << "DELETE FROM " << one_db::plan_table << " WHERE cid=" << _cid;
return db->exec_wr(oss);
}

378
src/schedm/PlanManager.cc Normal file
View File

@ -0,0 +1,378 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2024, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#include "PlanManager.h"
#include "Nebula.h"
#include "NebulaLog.h"
#include "RaftManager.h"
#include "RequestManagerVirtualMachine.h"
#include "ClusterPool.h"
#include "PlanPool.h"
#include "Plan.h"
using namespace std;
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
PlanManager::PlanManager(time_t timer,
int _max_actions_per_host,
int _max_actions_per_cluster,
int _live_resched,
int _cold_migrate_mode,
int _timeout)
: timer_thread(timer, [this]() {timer_action();})
, max_actions_per_host(_max_actions_per_host)
, max_actions_per_cluster(_max_actions_per_cluster)
, live_resched(_live_resched == 1)
, cold_migrate_mode(_cold_migrate_mode)
, action_timeout(_timeout)
{
NebulaLog::info("PLM", "Staring Plan Manager...");
auto& nd = Nebula::instance();
cluster_pool = nd.get_clpool();
plan_pool = nd.get_planpool();
vm_pool = nd.get_vmpool();
if (_max_actions_per_host <= 0)
{
max_actions_per_host = 1;
}
if (_max_actions_per_cluster <= 0)
{
max_actions_per_cluster = 1;
}
if (cold_migrate_mode < 0 || cold_migrate_mode >2)
{
cold_migrate_mode = 0;
}
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void PlanManager::finalize()
{
NebulaLog::info("PLM", "Stopping Plan Manager...");
timer_thread.stop();
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void PlanManager::add_plan(const string& xml)
{
NebulaLog::debug("PLM", "Adding new plan:" + xml);
Plan plan;
if (plan.from_xml(xml) != 0)
{
NebulaLog::error("PLM", "Error parsing plan XML:" + xml);
return;
}
if (plan.actions().empty())
{
NebulaLog::debug("PLM", "Plan has no actions, skipping...");
return;
}
if (auto cplan = plan_pool->get(plan.cid()))
{
if (plan.cid() == -1)
{
NebulaLog::info("PLM", "Adding new placement plan");
if (cplan->state() == PlanState::APPLYING)
{
NebulaLog::info("PLM", "Cannot add plan. A placement plan is already in progress.");
return;
}
plan.state(PlanState::APPLYING);
}
else if (auto cluster = cluster_pool->get(plan.cid()))
{
NebulaLog::info("PLM", "Adding new plan for cluster " + to_string(plan.cid()));
if (cplan->state() == PlanState::APPLYING)
{
NebulaLog::info("PLM", "Cannot add cluster optimization plan. A plan is already in progress.");
return;
}
if (cluster->is_autoapply())
{
plan.state(PlanState::APPLYING);
}
else
{
plan.state(PlanState::READY);
}
}
else
{
NebulaLog::error("PLM", "Optimization plan for non-existent cluster " + to_string(plan.cid()));
return;
}
}
plan_pool->update(&plan);
// Clear previous scheduling messages for all VM actions
for (const auto& action : plan.actions())
{
if (auto vm = vm_pool->get(action.vm_id()))
{
string sched_message;
vm->get_user_template_attribute("SCHED_MESSAGE", sched_message);
if (sched_message.empty())
{
continue;
}
vm->set_template_error_message("SCHED_MESSAGE", "");
vm_pool->update(vm.get());
}
}
execute_plans();
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
static int start_check_plan(Plan& p, std::string& error)
{
switch(p.state())
{
case PlanState::NONE:
error = "Plan is not ready";
return -1;
case PlanState::APPLYING:
error = "Plan is already applying";
return -1;
case PlanState::DONE:
error = "Plan is already done";
return -1;
case PlanState::ERROR:
error = "Plan is in error state";
return -1;
default:
break;
}
p.state(PlanState::APPLYING);
return 0;
}
/* -------------------------------------------------------------------------- */
int PlanManager::start_plan(int cid, std::string& error)
{
if (auto plan = plan_pool->get(cid))
{
if (start_check_plan(*plan, error) == -1)
{
return -1;
}
plan_pool->update(plan.get());
}
else
{
error = "Plan not found";
return -1;
}
execute_plans();
return 0;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void PlanManager::timer_action()
{
RaftManager * raftm = Nebula::instance().get_raftm();
if (!raftm || (!raftm->is_leader() && !raftm->is_solo()))
{
return;
}
NebulaLog::info("PLM", "Starting Plan Manager timer action...");
execute_plans();
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
bool PlanManager::start_action(PlanAction& action)
{
const string& aname = action.operation();
Request::ErrorCode rc = Request::SUCCESS;
RequestAttributes ra(AuthRequest::ADMIN,
UserPool::ONEADMIN_ID,
GroupPool::ONEADMIN_ID,
PoolObjectSQL::VM);
if (aname == "deploy")
{
VirtualMachineDeploy request;
ostringstream extra;
auto nics = action.nics();
for (auto nic : nics)
{
extra << "NIC=[NIC_ID=\"" << nic.first
<< "\", NETWORK_MODE=\"auto\" , NETWORK_ID=\"" << nic.second
<< "\"]";
}
rc = request.request_execute(ra, action.vm_id(), action.host_id(), false,
action.ds_id(), extra.str());
}
else if (aname == "migrate")
{
VirtualMachineMigrate request;
rc = request.request_execute(ra, action.vm_id(), action.host_id(), live_resched,
false, action.ds_id(), cold_migrate_mode);
}
else
{
VirtualMachineAction request;
rc = request.request_execute(ra, aname, action.vm_id());
}
action.timestamp(time(nullptr));
if (rc != Request::SUCCESS)
{
action.state(PlanState::ERROR);
auto error_msg = Request::failure_message(rc, ra, aname, ra.resp_obj);
NebulaLog::info("PLM", error_msg);
if (auto vm = vm_pool->get(action.vm_id()))
{
vm->set_template_error_message(error_msg);
vm_pool->update(vm.get());
}
return false;
}
action.state(PlanState::APPLYING);
return true;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void PlanManager::action_finished(int cid, int vid, PlanState state)
{
auto plan = plan_pool->get(cid);
if (plan->action_finished(vid, state))
{
plan->check_completed();
plan_pool->update(plan.get());
}
else if (cid != -1)
{
//Not included in cluster plan, it could be in placement plan
action_finished(-1, vid, state);
}
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void PlanManager::execute_plan(Plan& plan)
{
if (plan.state() != PlanState::APPLYING)
{
NebulaLog::info("PLM", "Plan " + to_string(plan.cid()) + " is not applying");
return;
}
plan.timeout_actions(action_timeout);
// Update counter, num of running actions per host, per cluster
map<int, int> host_actions;
int cluster_actions = 0;
plan.count_actions(cluster_actions, host_actions);
// Execute plan actions
while (auto action = plan.get_next_action())
{
if (host_actions[action->host_id()] >= max_actions_per_host
|| cluster_actions >= max_actions_per_cluster)
{
break;
}
if (start_action(*action))
{
host_actions[action->host_id()]++;
cluster_actions++;
}
}
plan.check_completed();
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void PlanManager::execute_plans()
{
auto plans = plan_pool->get_active_plans();
NebulaLog::info("PLM", "Found " + to_string(plans.size()) + " active plans");
for (auto plan_id : plans)
{
auto plan = plan_pool->get(plan_id);
execute_plan(*plan);
plan_pool->update(plan.get());
}
}

View File

@ -14,97 +14,79 @@
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#include "AclXML.h"
#include "AclRule.h"
#include "ObjectXML.h"
#include <vector>
#include "PlanPool.h"
#include "OneDB.h"
using namespace std;
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int AclXML::set_up()
PlanPool::PlanPool(SqlDB * _db)
: db(_db), table(one_db::plan_table)
{
xmlrpc_c::value result;
try
{
client->call("one.acl.info", "", &result);
vector<xmlrpc_c::value> values =
xmlrpc_c::value_array(result).vectorValueValue();
bool success = xmlrpc_c::value_boolean(values[0]);
string message = xmlrpc_c::value_string(values[1]);
if( !success )
{
ostringstream oss;
oss << "ONE returned error while retrieving the acls:" << endl;
oss << message;
NebulaLog::log("ACL", Log::ERROR, oss);
return -1;
}
flush_rules();
load_rules(message);
return 0;
}
catch (exception const& e)
{
ostringstream oss;
oss << "Exception raised: " << e.what();
NebulaLog::log("ACL", Log::ERROR, oss);
return -1;
}
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int AclXML::load_rules(const string& xml_str)
std::unique_ptr<Plan> PlanPool::get(int id)
{
ObjectXML acl_xml(xml_str);
vector<xmlNodePtr> rules;
acl_xml.get_nodes("/ACL_POOL/ACL", rules);
for (auto node : rules)
if ( id < -1 )
{
AclRule * rule = new AclRule(0, 0, 0, 0, 0);
int rc = rule->from_xml(node);
if ( rc == 0 )
{
acl_rules.insert( make_pair(rule->get_user(), rule) );
acl_rules_oids.insert( make_pair(rule->get_oid(), rule) );
}
return nullptr;
}
acl_xml.free_nodes(rules);
std::mutex * object_lock = cache.lock_line(id);
return 0;
std::unique_ptr<Plan> plan = std::make_unique<Plan>(id);
plan->ro = false;
plan->_mutex = object_lock;
plan->select(db);
return plan;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void AclXML::flush_rules()
std::unique_ptr<Plan> PlanPool::get_ro(int id)
{
for ( auto it = acl_rules.begin(); it != acl_rules.end(); it++ )
if ( id < -1 )
{
delete it->second;
return nullptr;
}
acl_rules.clear();
acl_rules_oids.clear();
std::unique_ptr<Plan> plan = std::make_unique<Plan>(id);
plan->ro = true;
plan->select(db);
return plan;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
vector<int> PlanPool::get_active_plans() const
{
ostringstream sql;
vector_cb<int> cb;
std::vector<int> plans;
cb.set_callback(&plans);
sql << "SELECT cid FROM " << one_db::plan_table
<< " WHERE STATE = 1";
db->exec_rd(sql, &cb);
cb.unset_callback();
return plans;
}

34
src/schedm/SConstruct Normal file
View File

@ -0,0 +1,34 @@
# SConstruct for src/schedm
# -------------------------------------------------------------------------- #
# Copyright 2002-2024, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
Import('env')
lib_name='nebula_schedm'
# Sources to generate the library
source_files=[
'SchedulerManager.cc',
'SchedulerManagerDriver.cc',
'Plan.cc',
'PlanManager.cc',
'PlanPool.cc'
]
# Build library
env.StaticLibrary(lib_name, source_files)

View File

@ -0,0 +1,356 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2024, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#include "Nebula.h"
#include "PlanManager.h"
#include "RaftManager.h"
#include "SchedulerManager.h"
#include "VirtualMachinePool.h"
#include <vector>
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
SchedulerManager::SchedulerManager(
time_t _max_wnd_time,
unsigned int _max_wnd_length,
time_t _retry_time,
const std::string& mad_location)
: DriverManager(mad_location)
, Listener("Scheduler Manager")
, wnd_start(0)
, wnd_length(0)
, last_place(time(nullptr))
, max_wnd_time(_max_wnd_time)
, max_wnd_length(_max_wnd_length)
, retry_time(_retry_time)
{
}
int SchedulerManager::start()
{
/**
* Register protocol actions
*/
register_action(SchedulerManagerMessages::PLACE, [this](auto msg) {
this->_place(std::move(msg));});
register_action(SchedulerManagerMessages::OPTIMIZE, [this](auto msg) {
this->_optimize(std::move(msg));});
register_action(SchedulerManagerMessages::LOG,
&SchedulerManager::_log);
register_action(SchedulerManagerMessages::UNDEFINED,
&SchedulerManager::_undefined);
/**
* Start Driver
*/
std::string error;
if ( DriverManager::start(error) != 0 )
{
NebulaLog::error("SCM", error);
return -1;
}
/**
* Strat timer
*/
timer_thread.reset(new Timer(timer_period, [this]() {timer_action();}));
/**
* Start Listener
*/
NebulaLog::log("SCM", Log::INFO, "Starting Scheduler Manager...");
Listener::start();
return 0;
}
int SchedulerManager::load_drivers(const std::vector<const VectorAttribute*>& _mads)
{
const VectorAttribute * vattr = nullptr;
NebulaLog::info("SCM", "Loading Scheduler Manager drivers.");
if ( _mads.size() > 0 )
{
vattr = static_cast<const VectorAttribute *>(_mads[0]);
}
if ( vattr == nullptr )
{
NebulaLog::error("SCM", "Failed to load Scheduler Manager driver.");
return -1;
}
VectorAttribute sched_conf("SCHED_MAD", vattr->value());
sched_conf.replace("NAME", driver_name);
if ( load_driver(&sched_conf) != 0 )
{
NebulaLog::error("SCM", "Unable to load Scheduler Manager driver");
return -1;
}
NebulaLog::log("SCM", Log::INFO, "\tScheduler Manager loaded");
return 0;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
/* MANAGER EVENTS. Triggered by other OpenNebula components to place or */
/* optimize cluster workloads. */
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void SchedulerManager::trigger_place()
{
trigger([&]
{
RaftManager * raftm = Nebula::instance().get_raftm();
if (!raftm || (!raftm->is_leader() && !raftm->is_solo()))
{
return;
}
// --------------------------------------------------------------------
// Check and update the scheduler window
// --------------------------------------------------------------------
{
std::lock_guard<std::mutex> lock(wnd_mtx);
time_t the_time = time(nullptr);
if ( wnd_start == 0 )
{
wnd_start = the_time;
}
++wnd_length;
std::ostringstream oss;
oss << "Scheduler window length " << the_time - wnd_start << "s and "
<< wnd_length << " VMs";
NebulaLog::ddebug("SCM", oss.str());
if (the_time < (wnd_start + max_wnd_time) &&
wnd_length < max_wnd_length)
{
return;
}
//TODO Check a PLACE planning is not being applied
//send place request to driver, reset window
last_place = the_time;
wnd_start = 0;
wnd_length = 0;
}
auto scheduler = get();
if (scheduler == nullptr)
{
return;
}
scheduler->place();
});
}
void SchedulerManager::trigger_optimize(int cluster_id)
{
trigger([this, cluster_id]
{
RaftManager * raftm = Nebula::instance().get_raftm();
if (!raftm || (!raftm->is_leader() && !raftm->is_solo()))
{
return;
}
auto scheduler = get();
if (scheduler == nullptr)
{
return;
}
scheduler->optimize(cluster_id);
});
}
void SchedulerManager::timer_action()
{
static int mark = 0;
static auto vmpool = Nebula::instance().get_vmpool();
std::vector<int> vmids;
mark += timer_period;
if ( mark >= 600 )
{
NebulaLog::log("SCM", Log::INFO, "--Mark--");
mark = 0;
}
RaftManager * raftm = Nebula::instance().get_raftm();
if (!raftm || (!raftm->is_leader() && !raftm->is_solo()))
{
return;
}
// Check the scheduler window & waiting times
{
std::lock_guard<std::mutex> lock(wnd_mtx);
time_t the_time = time(nullptr);
vmpool->get_pending(vmids);
bool expired = ((wnd_start > 0) && (the_time >= (wnd_start + max_wnd_time)))
|| (wnd_length >= max_wnd_length);
bool pending = (vmids.size() > 0) &&
(the_time >= last_place + retry_time);
std::ostringstream oss;
time_t wt = (wnd_start == 0) ? 0 : (the_time - wnd_start);
time_t rt = last_place + retry_time - the_time;
rt = (rt < 0) ? 0 : rt;
oss << "Scheduler window length " << wt << "s and " << wnd_length << " VMs"
<< ". Pending VMs: " << vmids.size() << " time to retry: " << rt;
NebulaLog::ddebug("SCMT", oss.str());
//TODO Check there is no placement plan active
if (!expired && !pending)
{
return;
}
last_place = the_time;
wnd_start = 0;
wnd_length = 0;
}
auto scheduler = get();
if (scheduler == nullptr)
{
return;
}
scheduler->place();
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
/* PROTOCOL ACTIONS. Executed for each type of driver message received from */
/* from the scheduler */
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
static void log_msg(scheduler_msg_t *msg)
{
std::ostringstream oss;
oss << "Message received: ";
msg->write_to(oss);
NebulaLog::ddebug("SCM", oss.str());
}
void SchedulerManager::_undefined(std::unique_ptr<scheduler_msg_t> msg)
{
NebulaLog::warn("SCM", "Received UNDEFINED msg: " + msg->payload());
}
void SchedulerManager::_log(std::unique_ptr<scheduler_msg_t> msg)
{
auto vmpool = Nebula::instance().get_vmpool();
if (msg->oid() < 0)
{
NebulaLog::log("SCM", log_type(msg->status()[0]), msg->payload());
}
else if (auto vm = vmpool->get(msg->oid()))
{
auto lt = log_type(msg->status()[0]);
vm->log("VMM", lt, msg->payload());
if ( lt == Log::ERROR )
{
vm->set_template_error_message("SCHED_MESSAGE", msg->payload());
vmpool->update(vm.get());
}
}
}
void SchedulerManager::_place(std::unique_ptr<scheduler_msg_t> msg)
{
log_msg(msg.get());
if (msg->status() == "FAILURE")
{
std::ostringstream oss;
oss << "Scheduler place operation error: " << msg->payload();
NebulaLog::log("SCM", Log::INFO, oss);
return;
}
auto planm = Nebula::instance().get_planm();
planm->add_plan(msg->payload());
}
void SchedulerManager::_optimize(std::unique_ptr<scheduler_msg_t> msg)
{
log_msg(msg.get());
if (msg->status() == "FAILURE")
{
std::ostringstream oss;
oss << "Optimize error for cluster " << msg->oid() << ": "
<< msg->payload();
NebulaLog::log("SCM", Log::INFO, oss);
return;
}
auto planm = Nebula::instance().get_planm();
planm->add_plan(msg->payload());
}

View File

@ -0,0 +1,764 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2024, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#include "SchedulerManagerDriver.h"
#include "SchedulerManagerMatch.h"
#include "AclManager.h"
#include "VMGroupPool.h"
#include "ClusterPool.h"
#include "Nebula.h"
SchedulerManagerDriver::SchedulerManagerDriver(const std::string& c,
const std::string& a, int ct): Driver(c, a, ct)
{
auto& nebula = Nebula::instance();
vmpool = nebula.get_vmpool();
hpool = nebula.get_hpool();
dspool = nebula.get_dspool();
vnpool = nebula.get_vnpool();
upool = nebula.get_upool();
clpool = nebula.get_clpool();
vmgpool= nebula.get_vmgrouppool();
};
/* -------------------------------------------------------------------------- */
void SchedulerManagerDriver::place() const
{
SchedRequest sr(vmpool, hpool, dspool, vnpool, upool, clpool);
setup_place_pools(sr);
match(sr, "Cannot dispatch VM: ");
std::ostringstream oss;
scheduler_message(sr, oss);
place(oss);
}
void SchedulerManagerDriver::optimize(int cluster_id) const
{
SchedRequest sr(vmpool, hpool, dspool, vnpool, upool, clpool);
setup_optimize_pools(cluster_id, sr);
match(sr, "Optimize: ");
std::ostringstream oss;
scheduler_message(sr, oss);
optimize(cluster_id, oss);
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int SchedulerManagerDriver::scheduler_message(SchedRequest& sr, std::ostringstream& oss) const
{
std::string temp;
oss << "<SCHEDULER_DRIVER_ACTION>";
sr.vmpool.to_xml(oss, sr.match.vms);
sr.hpool.to_xml(oss, sr.match.match_host);
//Include Image and System datastores to compute SELF LN/CP methods
dspool->dump(temp, "", 0, -1, false);
oss << temp;
sr.vnpool.to_xml(oss, sr.match.match_net);
if ( sr.match.match_vmgroups.empty() )
{
oss << "<VM_GROUP_POOL/>";
}
else
{
oss << "<VM_GROUP_POOL>";
for (int id: sr.match.match_vmgroups)
{
if (auto grp = vmgpool->get_ro(id))
{
std::string grp_s;
oss << grp->to_xml(grp_s);
}
}
oss << "</VM_GROUP_POOL>";
}
sr.clpool.to_xml(oss);
sr.match.to_xml(oss);
oss << "</SCHEDULER_DRIVER_ACTION>";
return 0;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void SchedulerManagerDriver::log_vm(int id, const std::string& msg) const
{
if (auto vm = vmpool->get(id))
{
vm->set_template_error_message("SCHED_MESSAGE", msg);
vmpool->update(vm.get());
}
}
void SchedulerManagerDriver::log_cluster(int cluster_id, const std::string& msg) const
{
if (auto cluster = clpool->get(cluster_id))
{
cluster->set_error_message("SCM", "SCHED_MESSAGE", msg);
clpool->update(cluster.get());
}
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int SchedulerManagerDriver::setup_place_pools(SchedRequest &sr) const
{
// -------------------------------------------------------------------------
// Setup VM pool with pending VMs
// -------------------------------------------------------------------------
if ( vmpool->get_pending(sr.vmpool.ids) != 0 || sr.vmpool.ids.empty() )
{
return -1;
}
// -------------------------------------------------------------------------
// Host matching
// -------------------------------------------------------------------------
int rc = hpool->search(sr.hpool.ids, "state = 1 OR state = 2");
if ( rc != 0 )
{
NebulaLog::error("SCM", "Error getting host list.");
return -1;
}
else if ( sr.hpool.ids.empty() )
{
sr.vmpool.each_id([this](int id) {
log_vm(id, "Cannot dispatch VM: No hosts enabled to run VMs");
});
return -1;
}
sr.merge_cluster_to_host();
// -------------------------------------------------------------------------
// Datastore matching (only for system ds)
// -------------------------------------------------------------------------
rc = dspool->list(sr.dspool.ids);
if ( rc != 0 )
{
NebulaLog::error("SCM", "Error getting dastore list.");
return -1;
}
for (auto it = sr.dspool.ids.begin(); it != sr.dspool.ids.end(); )
{
auto ds = dspool->get_ro(*it);
if (!ds ||
(ds->get_type() != Datastore::SYSTEM_DS) ||
(ds->is_shared() && !ds->is_monitored())||
(!ds->is_enabled()))
{
it = sr.dspool.ids.erase(it);
continue;
}
else
{
sr.dspool.set(*it, std::move(ds));
++it;
}
}
if ( sr.dspool.ids.empty() )
{
sr.vmpool.each_id([this](int id) {
log_vm(id, "Cannot dispatch VM: No system datastores found to run VMs");
});
return -1;
}
// -------------------------------------------------------------------------
// Virtual Network matching (only for NETWORK_MODE = auto)
// -------------------------------------------------------------------------
rc = vnpool->list(sr.vnpool.ids);
if ( rc != 0 )
{
NebulaLog::error("SCM", "Error getting virtual network list.");
return -1;
}
return 0;
}
int SchedulerManagerDriver::setup_optimize_pools(int cluster_id, SchedRequest& sr) const
{
// -------------------------------------------------------------------------
// Setup VM pool with pending VMs
// -------------------------------------------------------------------------
if ( vmpool->get_cluster_vms(-1, -1, cluster_id, sr.vmpool.ids) != 0 ||
sr.vmpool.ids.empty() )
{
return -1;
}
// -------------------------------------------------------------------------
// Host matching
// -------------------------------------------------------------------------
std::string filter = "cid = " + std::to_string(cluster_id) + " AND (state = 1 OR state = 2)";
if ( hpool->search(sr.hpool.ids, filter) != 0 )
{
NebulaLog::error("SCM", "Optimize: error getting host list.");
return -1;
}
else if ( sr.hpool.ids.empty() )
{
log_cluster(cluster_id, "Optimize: No hosts enabled in cluster");
return -1;
}
sr.merge_cluster_to_host();
// -------------------------------------------------------------------------
// Datastore matching (only for system ds)
// -------------------------------------------------------------------------
if (dspool->list(sr.dspool.ids) != 0)
{
NebulaLog::error("SCM", "Optimize: error getting dastore list.");
return -1;
}
for (auto it = sr.dspool.ids.begin(); it != sr.dspool.ids.end(); )
{
auto ds = dspool->get_ro(*it);
if (!ds ||
(ds->get_type() != Datastore::SYSTEM_DS) ||
(ds->is_shared() && !ds->is_monitored()) ||
(!ds->is_enabled()) ||
(ds->get_cluster_ids().count(cluster_id) == 0))
{
it = sr.dspool.ids.erase(it);
continue;
}
else
{
sr.dspool.set(*it, std::move(ds));
++it;
}
}
if ( sr.dspool.ids.empty() )
{
log_cluster(cluster_id, "Optimize: No system datastores found in cluster");
return -1;
}
// -------------------------------------------------------------------------
// Virtual Network matching (only for NETWORK_MODE = auto)
// -------------------------------------------------------------------------
if (vnpool->list(sr.vnpool.ids) != 0)
{
NebulaLog::error("SCM", "Optimize: error getting virtual network list.");
return -1;
}
sr.clpool.ids.push_back(cluster_id);
return 0;
}
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------
// Match-making functions for hosts, system ds and vnets
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------
static std::string * build_requirements(std::string& ar, std::string& r)
{
std::string *req = &r;
if (!r.empty())
{
if (!ar.empty())
{
std::ostringstream oss;
oss << ar << " & ( " << r << " )";
r = oss.str();
}
}
else if (!ar.empty())
{
req = &ar;
}
return req;
}
// -----------------------------------------------------------------------------
static int authorize(VirtualMachine * vm,
PoolObjectSQL *obj,
AuthRequest::Operation op,
SchedPool<UserPool, User>& upool)
{
static auto aclm = Nebula::instance().get_aclm();
if (vm->get_uid() == 0 || vm->get_gid() == 0 )
{
return 1;
}
PoolObjectAuth perms;
obj->get_permissions(perms);
User * user = upool.get(vm->get_uid());
if (user == nullptr)
{
return 2;
}
if(!aclm->authorize(vm->get_uid(), user->get_groups(), perms, op))
{
return 0;
}
return 1;
}
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------
static int match_hosts(SchedRequest& sr, VirtualMachine * vm, std::string& error);
static int match_system_ds(SchedRequest& sr, VirtualMachine * vm, std::string& error);
static int match_networks(SchedRequest& sr, VirtualMachine * vm, std::string& error);
void SchedulerManagerDriver::match(SchedRequest& sr, const std::string& ebase) const
{
int rc;
std::string error;
for(int vm_id: sr.vmpool.ids)
{
VirtualMachine * vm = sr.vmpool.get(vm_id);
if ( vm == nullptr )
{
continue;
}
rc = match_hosts(sr, vm, error);
if ( rc == -1 )
{
if (!error.empty())
{
log_vm(vm_id, ebase + error);
}
continue;
}
rc = match_system_ds(sr, vm, error);
if ( rc == -1 )
{
if (!error.empty())
{
log_vm(vm_id, ebase + error);
}
continue;
}
rc = match_networks(sr, vm, error);
if ( rc == -1 )
{
if (!error.empty())
{
log_vm(vm_id, ebase + error);
}
continue;
}
int gid = vm->vmgroup_id();
if ( gid != -1 )
{
sr.match.match_vmgroups.insert(gid);
}
}
// Add all matched VMs to the match.vms set
for (auto vid : sr.vmpool.ids)
{
if (sr.match.is_host_matched(vid) &&
sr.match.is_ds_matched(vid) &&
sr.match.is_net_matched(vid))
{
sr.match.vms.insert(vid);
}
}
// Add all VMs in the VM groups to the match vms set
for (auto gid : sr.match.match_vmgroups)
{
if (auto grp = vmgpool->get_ro(gid))
{
for (const auto r : grp->roles())
{
const auto& vms = r->get_vms();
sr.match.vms.insert(vms.begin(), vms.end());
}
}
}
}
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------
int match_hosts(SchedRequest& sr, VirtualMachine * vm, std::string& error)
{
int n_auth = 0;
int n_match = 0;
error.clear();
// -------------------------------------------------------------------------
// Prepare VM requirements expression for Host matching
// -------------------------------------------------------------------------
std::string areqs, reqs;
vm->get_user_template_attribute("SCHED_REQUIREMENTS", reqs);
vm->get_template_attribute("AUTOMATIC_REQUIREMENTS", areqs);
std::string *requirements = build_requirements(areqs, reqs);
for (int host_id: sr.hpool.ids)
{
Host * host = sr.hpool.get(host_id);
if (host == nullptr)
{
continue;
}
//VM cannot be migrated to its current Host
if (vm->is_resched() &&
(vm->hasHistory() && (vm->get_hid() == host->get_oid())))
{
continue;
}
// ---------------------------------------------------------------------
// Check if user is authorized to deploy on the host
// ---------------------------------------------------------------------
int auth = authorize(vm, host, AuthRequest::MANAGE, sr.upool);
if (auth == 0)
{
continue;
}
else if (auth == 2)
{
return -1; //user does not exists. stop host matching
}
n_auth++;
// ---------------------------------------------------------------------
// Check if user is authorized to deploy on the host
// ---------------------------------------------------------------------
if (!requirements->empty())
{
char * estr;
bool matched;
if ( host->eval_bool(*requirements, matched, &estr) != 0 )
{
std::ostringstream oss;
oss << "Error in SCHED_REQUIREMENTS: '" << requirements
<< "', error: " << estr;
error = oss.str();
free(estr);
return -1;
}
if (matched == false)
{
continue;
}
}
sr.match.add_host(vm->get_oid(), host_id);
n_match++;
}
if (n_auth == 0)
{
error = "User is not authorized to use any host";
return -1;
}
else if (n_match == 0)
{
error = "No host meets SCHED_REQUIREMENTS";
return -1;
}
return 0;
}
// -----------------------------------------------------------------------------
int match_system_ds(SchedRequest& sr, VirtualMachine * vm, std::string& error)
{
int n_auth = 0;
int n_match = 0;
error.clear();
if (vm->is_resched())
{
if (vm->hasHistory())
{
sr.match.add_ds(vm->get_oid(), vm->get_ds_id());
}
return 0;
}
// -------------------------------------------------------------------------
// Prepare VM requirements expression for Host matching
// -------------------------------------------------------------------------
std::string areqs, reqs;
vm->get_user_template_attribute("SCHED_DS_REQUIREMENTS", reqs);
vm->get_template_attribute("AUTOMATIC_DS_REQUIREMENTS", areqs);
std::string *requirements = build_requirements(areqs, reqs);
for (int ds_id: sr.dspool.ids)
{
Datastore * ds = sr.dspool.get(ds_id);
if (ds == nullptr)
{
continue;
}
// ---------------------------------------------------------------------
// Check if user is authorized
// ---------------------------------------------------------------------
int auth = authorize(vm, ds, AuthRequest::USE, sr.upool);
if (auth == 0)
{
continue;
}
else if (auth == 2)
{
return -1; //user does not exists. stop host matching
}
n_auth++;
// ---------------------------------------------------------------------
// Evaluate VM requirements
// ---------------------------------------------------------------------
if (!requirements->empty())
{
char * estr;
bool matched;
if ( ds->eval_bool(*requirements, matched, &estr) != 0 )
{
std::ostringstream oss;
oss << "Error in SCHED_DS_REQUIREMENTS: '" << requirements
<< "', error: " << estr;
error = oss.str();
free(estr);
return -1;
}
if (matched == false)
{
continue;
}
}
sr.match.add_ds(vm->get_oid(), ds_id);
n_match++;
}
if (n_auth == 0)
{
error = "User is not authorized to use any system datastore";
return -1;
}
else if (n_match == 0)
{
error = "No system datastore meets SCHED_DS_REQUIREMENTS";
return -1;
}
return 0;
}
// -----------------------------------------------------------------------------
int match_networks(SchedRequest& sr, VirtualMachine * vm, std::string& error)
{
error.clear();
std::set<int> auto_ids;
if ( vm->get_auto_nics(auto_ids) == 0 || vm->is_resched() )
{
return 0;
}
sr.match.init_net(vm->get_oid(), auto_ids);
// -------------------------------------------------------------------------
// Prepare VM requirements expression for VirtualNetwork matching
// -------------------------------------------------------------------------
std::string areqs;
vm->get_template_attribute("AUTOMATIC_NIC_REQUIREMENTS", areqs);
for (auto nic_id : auto_ids)
{
int n_auth = 0;
int n_match = 0;
auto nic = vm->get_nic(nic_id);
if ( nic == nullptr )
{
continue;
}
std::string reqs = nic->vector_value("SCHED_REQUIREMENTS");
std::string *requirements = build_requirements(areqs, reqs);
for (int net_id: sr.vnpool.ids)
{
VirtualNetwork * net = sr.vnpool.get(net_id);
// -----------------------------------------------------------------
// Check if user is authorized
// -----------------------------------------------------------------
int auth = authorize(vm, net, AuthRequest::USE, sr.upool);
if (auth == 0)
{
continue;
}
else if (auth == 2)
{
return -1; //user does not exists. stop host matching
}
n_auth++;
// -----------------------------------------------------------------
// Evaluate VM requirements
// -----------------------------------------------------------------
if (!requirements->empty())
{
char * estr;
bool matched;
if ( net->eval_bool(*requirements, matched, &estr) != 0 )
{
std::ostringstream oss;
oss << "Error in SCHED_NIC_REQUIREMENTS in NIC " << nic_id
<< ": '" << requirements << "', error: " << estr;
error = oss.str();
free(estr);
return -1;
}
if (matched == false)
{
continue;
}
}
sr.match.add_net(vm->get_oid(), nic_id, net_id);
n_match++;
}
if (n_auth == 0)
{
error = "User is not authorized to use any virtual network for NIC "
+ std::to_string(nic_id);
return -1;
}
else if (n_match == 0)
{
error = "No virtual network meets SCHED_NIC_REQUIREMENTS for NIC "
+ std::to_string(nic_id);
return -1;
}
}
return 0;
}

34
src/schedm_mad/one_sched Executable file
View File

@ -0,0 +1,34 @@
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2024, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
if [ -z "${ONE_LOCATION}" ]; then
MADCOMMON=/usr/lib/one/mads/madcommon.sh
VAR_LOCATION=/var/lib/one
else
MADCOMMON=$ONE_LOCATION/lib/mads/madcommon.sh
VAR_LOCATION=$ONE_LOCATION/var
fi
. $MADCOMMON
# Go to ONE_LOCATION
cd $VAR_LOCATION
# Execute the actual driver
execute_mad $*

148
src/schedm_mad/one_sched.rb Executable file
View File

@ -0,0 +1,148 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2024, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
ONE_LOCATION = ENV['ONE_LOCATION']
if !ONE_LOCATION
RUBY_LIB_LOCATION = '/usr/lib/one/ruby'
GEMS_LOCATION = '/usr/share/one/gems'
ETC_LOCATION = '/etc/one/'
else
RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby'
GEMS_LOCATION = ONE_LOCATION + '/share/gems'
ETC_LOCATION = ONE_LOCATION + '/etc/'
end
# %%RUBYGEMS_SETUP_BEGIN%%
if File.directory?(GEMS_LOCATION)
real_gems_path = File.realpath(GEMS_LOCATION)
if !defined?(Gem) || Gem.path != [real_gems_path]
$LOAD_PATH.reject! {|l| l =~ /vendor_ruby/ }
# Suppress warnings from Rubygems
# https://github.com/OpenNebula/one/issues/5379
begin
verb = $VERBOSE
$VERBOSE = nil
require 'rubygems'
Gem.use_paths(real_gems_path)
ensure
$VERBOSE = verb
end
end
end
# %%RUBYGEMS_SETUP_END%%
$LOAD_PATH << RUBY_LIB_LOCATION
require 'DriverLogger'
require 'OpenNebulaDriver'
require 'getoptlong'
# require 'shellwords'
# Scheduler Driver. This class executes place and optimize actions of the active
# scheduler
class SchedulerDriver < OpenNebulaDriver
# Scheduler Driver Protocol constants
ACTION = {
:place => 'PLACE',
:optimize => 'OPTIMIZE'
}
# Init the driver
def initialize(scheduler, options = {})
@options={
:concurrency => 15,
:threaded => false,
:retries => 0,
:local_actions => {
ACTION[:place] => nil,
ACTION[:optimize] => nil
}
}.merge!(options)
super('scheduler', @options)
@scheduler = scheduler
path = File.join(@local_scripts_path, @scheduler)
raise "Scheduler #{scheduler} not avialable" unless File.directory?(path)
register_action(ACTION[:place].to_sym, method('place'))
register_action(ACTION[:optimize].to_sym, method('optimize'))
end
# Exectutes PLACE action using
# /var/lib/one/remotes/sched/one-drs/place <<STDIN
# <drv_message>
# STDIN
def place(_id, drv_message)
cmd = File.join(@local_scripts_path, @scheduler, 'place')
rc = LocalCommand.run(cmd, log_method(0, :encode => true), drv_message, nil)
result, info = get_info_from_execution(rc, :encode => true)
send_message(ACTION[:place], result, 0, Base64.strict_encode64(info))
end
# Exectutes OPTIMIZE action using
# /var/lib/one/remotes/sched/one-drs/optimize <cluster_id> <<STDIN
# <drv_message>
# STDIN
def optimize(id, drv_message)
cmd = "#{File.join(@local_scripts_path, @scheduler, 'optimize')} #{id}"
rc = LocalCommand.run(cmd, log_method(id, true), drv_message, nil)
result, info = get_info_from_execution(rc)
send_message(ACTION[:optimize], result, id, info)
end
end
################################################################################
# IPAM Driver Main program
################################################################################
opts = GetoptLong.new(
['--threads', '-t', GetoptLong::OPTIONAL_ARGUMENT],
['--scheduler', '-s', GetoptLong::REQUIRED_ARGUMENT]
)
scheduler = 'rank'
threads = 1
begin
opts.each do |opt, arg|
case opt
when '--scheduler'
scheduler = arg
when '--threads'
threads = arg.to_i
end
end
SchedulerDriver.new(scheduler, :concurrency => threads).start_driver
rescue StandardError => e
STDERR.puts "Error starting driver: #{e.message}"
exit(-1)
end

View File

@ -0,0 +1,21 @@
#!/bin/sh
# -------------------------------------------------------------------------- #
# Copyright 2002-2024, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
STDIN=`cat -`
exit 0

View File

@ -0,0 +1,21 @@
#!/bin/sh
# -------------------------------------------------------------------------- #
# Copyright 2002-2024, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
STDIN=`cat -`
exit 0

View File

@ -0,0 +1,37 @@
#*******************************************************************************
# OpenNebula Configuration file
#*******************************************************************************
#*******************************************************************************
# ILP Optimizer configuration attributes
#-------------------------------------------------------------------------------
# DEFAULT_SCHED: Definition of the default scheduling algorithm
# - policy: Specifies the scheduling policy to be used.
# 0 = Packing. Minimize the number of used hosts.
# 1 = Cpu allocation. Minimize the total CPU usage of unallocated CPUs.
# 2 = Cpu count. Minimize the VM count disbalance accross the hosts.
# 3 = Cpu balance. Minimize the CPU load (usage) disbalance accross the hosts.
# 4 = Cpu ratio. Minimize the CPU load (ratio) disbalance accross the hosts.
# - solver: Specifies the ILP solver to be used. Available options:
# "CBC" -> CBC solver: https://coin-or.github.io/pulp/technical/solvers.html#pulp.apis.PULP_CBC_CMD
# "GLPK" -> GLPK solver: https://coin-or.github.io/pulp/technical/solvers.html#pulp.apis.GLPK_CMD
# "COINMP"-> COINMP_DLL solver: https://coin-or.github.io/pulp/technical/solvers.html#pulp.apis.COINMP_DLL
# - solver_path: Specifies the path to the solver binary.
#
# MEMORY_SYSTEM_DS_SCALE: This factor scales the VM usage of the system DS with
# the memory size. This factor can be use to make the scheduler consider the
# overhead of checkpoint files:
# system_ds_usage = system_ds_usage + memory_system_ds_scale * memory
#
# DIFFERENT_VNETS: When set (YES) the NICs of a VM will be forced to be in
# different Virtual Networks.
#*******************************************************************************
DEFAULT_SCHED:
POLICY: 0
SOLVER: "CBC"
SOLVER_PATH: "/usr/lib/one/python/pulp/solverdir/cbc/linux/64/cbc"
MEMORY_SYSTEM_DS_SCALE: 0
DIFFERENT_VNETS: YES

View File

@ -0,0 +1,5 @@
__all__ = ["OptimizerParser", "OptimizerSerializer",]
from .optimizer_parser import OptimizerParser
from .optimizer_serializer import OptimizerSerializer

View File

@ -0,0 +1,6 @@
"""Virtual Machine Mapper for OpenNebula."""
__all__ = ["ILPOptimizer"]
from .ilp_optimizer import ILPOptimizer

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,38 @@
"""Base Class for OpenNebula Mapper."""
import abc
from collections.abc import Collection, Mapping
from typing import Any, Optional
from .model import Allocation, HostCapacity, VMRequirements
class Mapper(abc.ABC):
__slots__ = ()
@abc.abstractmethod
def __init__(
self,
current_placement: Mapping[int, int],
vm_requirements: Collection[VMRequirements],
host_capacities: Collection[HostCapacity],
criteria: Any,
migrations: Optional[bool] = None,
preemptive: bool = False,
**kwargs
) -> None:
raise NotImplementedError()
@abc.abstractmethod
def map(self) -> None:
raise NotImplementedError()
@abc.abstractmethod
def placements(
self, top_k: int = 1
) -> list[dict[int, Optional[Allocation]]]:
raise NotImplementedError()
@abc.abstractmethod
def report(self, path: str = "") -> str:
raise NotImplementedError()

View File

@ -0,0 +1,421 @@
from dataclasses import dataclass, field
import enum
from collections.abc import Collection
from typing import Optional, Union
# @dataclass(frozen=True, slots=True)
@dataclass(frozen=True)
class Allocation:
vm_id: int
host_id: int
host_dstore_ids: dict[int, int]
shared_dstore_ids: dict[int, int]
nics: dict[int, int]
# TODO: Check if `.max_*` and `.total_*` are properly understood.
# @dataclass(frozen=True, slots=True)
@dataclass(frozen=True)
class Capacity:
total: Union[float, int]
usage: Union[float, int]
@property
def free(self) -> Union[float, int]:
return self.total - self.usage
# @dataclass(frozen=True, slots=True)
@dataclass(frozen=True)
class PCIDevice:
short_address: str
vendor_id: str
device_id: str
class_id: str
vm_id: int = -1
# @dataclass(frozen=True, slots=True)
@dataclass(frozen=True)
class HostCapacity:
id: int
memory: Capacity
cpu: Capacity
# The IDs (keys) and capacities (values) for each disk of a host.
disks: dict[int, Capacity] = field(default_factory=dict)
pci_devices: list[PCIDevice] = field(default_factory=list)
cluster_id: int = 0
# @dataclass(frozen=True, slots=True)
@dataclass(frozen=True)
class DStoreCapacity:
id: int
size: Capacity
cluster_ids: list[int] = field(default_factory=list)
# @dataclass(frozen=True, slots=True)
@dataclass(frozen=True)
class VNetCapacity:
id: int
n_free_ip_addresses: int
cluster_ids: list[int] = field(default_factory=list)
class VMState(enum.Enum):
PENDING = 'pending'
RESCHED = 'resched'
RUNNING = 'running'
POWEROFF = 'poweroff'
# @dataclass(frozen=True, slots=True)
@dataclass(frozen=True)
class PCIDeviceRequirement:
short_address: str = ''
vendor_id: str = ''
device_id: str = ''
class_id: str = ''
# @dataclass(frozen=True, slots=True)
@dataclass(frozen=True)
class PCIDeviceMatch:
vm_id: int
requirement: int
host_id: int
short_address: str
# @dataclass(frozen=True, slots=True)
@dataclass(frozen=True)
class DStoreRequirement:
id: int
vm_id: int
size: int
# Whether a local disk of the assigned host can be used.
allow_host_dstores: bool = True
# The IDs of the matching host datastores.
# Dict {host ID: list of IDs of the matching disks}. If `None`, all
# host disks are considered matching.
host_dstore_ids: Optional[dict[int, list[int]]] = None
# The IDs of the matching shared datastores.
shared_dstore_ids: list[int] = field(default_factory=list)
# @dataclass(frozen=True, slots=True)
@dataclass(frozen=True)
class DStoreMatches:
vm_id: int
# The ID or index of the datastore requirement.
requirement: int
# The IDs of the hosts (keys) with suitable storage (values).
host_dstores: dict[int, list[int]]
# The IDs of the shared datastores with suitable storage.
shared_dstores: list[int]
# @dataclass(frozen=True, slots=True)
@dataclass(frozen=True)
class HostMatches:
hosts: list[int]
pci_devices: list[PCIDeviceMatch]
# @dataclass(frozen=True, slots=True)
@dataclass(frozen=True)
class VMRequirements:
id: int
state: Optional[VMState]
memory: int
cpu_ratio: float
cpu_usage: float = float('nan')
storage: dict[int, DStoreRequirement] = field(default_factory=dict)
pci_devices: list[PCIDeviceRequirement] = field(default_factory=list)
host_ids: Optional[set[int]] = None
# Whether multiple NICs can be associated to the same VNet.
share_vnets: bool = False
# Dict {NIC ID: list of IDs of the VNets that match the NIC}.
nic_matches: dict[int, list[int]] = field(default_factory=dict)
def find_host_matches(
self,
host_capacities: Collection[HostCapacity],
vnet_capacities: Collection[VNetCapacity],
free: bool = True
) -> HostMatches:
return _find_host_matches(self, host_capacities, vnet_capacities, free)
def find_storage_matches(
self,
host_capacities: Collection[HostCapacity],
dstore_capacities: Collection[DStoreCapacity],
free: bool = True
) -> list[DStoreMatches]:
vm_id = self.id
var_name = 'free' if free else 'total'
matches: list[DStoreMatches] = []
for req_id, req in self.storage.items():
host_dstore_ids: dict[int, list[int]] = {}
shared_dstore_ids: list[int] = []
if req.allow_host_dstores:
# Host disks used as system datastores.
if req.host_dstore_ids is None:
# No constraints. All host disks are considered.
for host_cap in host_capacities:
disk_matches: list[int] = []
for disk_id, disk_cap in host_cap.disks.items():
if getattr(disk_cap, var_name) >= req.size:
disk_matches.append(disk_id)
if disk_matches:
host_dstore_ids[host_cap.id] = disk_matches
else:
# Only specified hosts and their disks are
# considered.
host_caps = {cap.id: cap for cap in host_capacities}
for host_id, disk_ids in req.host_dstore_ids.items():
disk_caps = host_caps[host_id].disks
disk_matches: list[int] = []
for disk_id in disk_ids:
disk_cap = disk_caps[disk_id]
if getattr(disk_cap, var_name) >= req.size:
disk_matches.append(disk_id)
if disk_matches:
host_dstore_ids[host_id] = disk_matches
else:
# Shared datastores.
dstore_caps = {cap.id: cap for cap in dstore_capacities}
for dstore_id in req.shared_dstore_ids:
dstore_cap = dstore_caps[dstore_id]
# NOTE: This check is probably redundant.
if getattr(dstore_cap.size, var_name) >= req.size:
shared_dstore_ids.append(dstore_id)
# Matches.
match_ = DStoreMatches(
vm_id=vm_id,
requirement=req_id,
host_dstores=host_dstore_ids,
shared_dstores=shared_dstore_ids
)
matches.append(match_)
return matches
# @dataclass(frozen=True, slots=True)
@dataclass(frozen=True)
class VMGroup:
id: int
affined: bool
vm_ids: set[int]
def find_host_matches(
self,
vm_requirements: Collection[VMRequirements],
host_capacities: Collection[HostCapacity],
vnet_capacities: Collection[VNetCapacity],
free: bool = True
) -> dict[int, HostMatches]:
if not self.affined:
return {
vm_req.id: vm_req.find_host_matches(
host_capacities, vnet_capacities, free
)
for vm_req in vm_requirements
if vm_req.id in self.vm_ids
}
# TODO: Test filtering according to NIC requirements.
vm_ids = self.vm_ids
memory = 0
cpu_ratio = 0.0
pci_devices: list[PCIDeviceRequirement] = []
pci_req_idxs: list[tuple[int, int]] = []
all_host_ids: list[set[int]] = []
# NOTE: NIC requirements are used here only for filtering, not
# for matching, so NIC IDs (the keys) are not relevant.
all_nic_matches: list[list[int]] = []
for vm_req in vm_requirements:
vm_id = vm_req.id
if vm_id not in vm_ids:
continue
memory += vm_req.memory
cpu_ratio += vm_req.cpu_ratio
pci_devices += vm_req.pci_devices
for i in range(len(vm_req.pci_devices)):
pci_req_idxs.append((vm_id, i))
if vm_req.host_ids is not None:
all_host_ids.append(vm_req.host_ids)
all_nic_matches += vm_req.nic_matches.values()
group_req = VMRequirements(
id=-1,
state=None,
memory=memory,
cpu_ratio=cpu_ratio,
pci_devices=pci_devices,
host_ids=set.intersection(*all_host_ids) if all_host_ids else None,
# TODO: Check this logic.
share_vnets=all(vm_req.share_vnets for vm_req in vm_requirements),
nic_matches=dict(enumerate(all_nic_matches))
)
matches = group_req.find_host_matches(
host_capacities, vnet_capacities, free
)
match_host_ids = matches.hosts
result = {vm_id: HostMatches(match_host_ids, []) for vm_id in vm_ids}
for match_ in matches.pci_devices:
vm_id, req_idx = pci_req_idxs[match_.requirement]
host_id = match_.host_id
short_address = match_.short_address
corr_match = PCIDeviceMatch(vm_id, req_idx, host_id, short_address)
result[vm_id].pci_devices.append(corr_match)
return result
def _match_pci_device(
requirement: PCIDeviceRequirement, pci_device: PCIDevice, free: bool
) -> bool:
if free and pci_device.vm_id >= 0:
return False
return (
requirement.vendor_id in {pci_device.vendor_id, ''}
and requirement.device_id in {pci_device.device_id, ''}
and requirement.class_id in {pci_device.class_id, ''}
)
# if requirement.vendor_id and requirement.vendor_id != capacity.vendor_id:
# return False
# if requirement.device_id and requirement.device_id != capacity.device_id:
# return False
# if requirement.class_id and requirement.class_id != capacity.class_id:
# return False
# return True
def _match_pci_devices(
vm_requirements: VMRequirements, host_capacity: HostCapacity, free: bool
) -> Optional[list[PCIDeviceMatch]]:
vm_id = vm_requirements.id
host_id = host_capacity.id
if free:
pcids = {
pcid.short_address: pcid
for pcid in host_capacity.pci_devices
if pcid.vm_id == -1
}
else:
pcids = {
pcid.short_address: pcid for pcid in host_capacity.pci_devices
}
all_matches: list[PCIDeviceMatch] = []
for req_idx, pcid_req in enumerate(vm_requirements.pci_devices):
if address := pcid_req.short_address:
# Checking for the required short address.
if address in pcids:
# The host can satisfy the requirement `req`.
pcid_match = PCIDeviceMatch(
vm_id=vm_id,
requirement=req_idx,
host_id=host_id,
short_address=address
)
all_matches.append(pcid_match)
else:
# The host cannot satisfy the requirement `req`. Since
# one of the VM requirements for PCI devices is not met,
# the method returns `None`.
return None
else:
# Checking for the required vendor, device, or class.
matches: list[PCIDeviceMatch] = []
for address, pcid in pcids.items():
if _match_pci_device(pcid_req, pcid, free):
pcid_match = PCIDeviceMatch(
vm_id=vm_id,
requirement=req_idx,
host_id=host_id,
short_address=address
)
matches.append(pcid_match)
if matches:
# The host has at least one PCI device that can
# satisfy the requirement `req`.
all_matches += matches
else:
# The host has no PCI devices that can satisfy the
# requirement `req`. Since one of the VM
# requirements for PCI devices is not met, the
# method returns `None`.
return None
return all_matches
def _find_host_matches(
vm_requirements: VMRequirements,
host_capacities: Collection[HostCapacity],
vnet_capacities: Collection[VNetCapacity],
free: bool
) -> HostMatches:
# TODO: Consider if filtering by host ID and capacity is necessary.
host_caps = {host_cap.id: host_cap for host_cap in host_capacities}
if vm_requirements.state is VMState.PENDING:
# # TODO: Consider additional filtering when shared VNets are not
# # allowed.
# if not vm_requirements.share_vnets:
# all_nic_matches: set[int] = set()
# for vnet_ids in vm_requirements.nic_matches.values():
# all_nic_matches |= set(vnet_ids)
# if len(all_nic_matches) > len(vnet_capacities):
# return HostMatches(hosts=[], pci_devices=[])
# Filter according to NICs. A VM can be allocated to a host only
# if the cluster of that host can support all NIC requirements.
# TODO: Test filtering according to NIC requirements.
# TODO: Consider using a similar approach for disks when there
# is no appropriate shared storage.
vnet_caps = {vnet_cap.id: vnet_cap for vnet_cap in vnet_capacities}
cluster_ids: list[set[int]] = []
for vnet_ids in vm_requirements.nic_matches.values():
nic_cluster_ids: set[int] = set()
for vnet_id in vnet_ids:
nic_cluster_ids |= set(vnet_caps[vnet_id].cluster_ids)
cluster_ids.append(nic_cluster_ids)
if cluster_ids and (common_ids := set.intersection(*cluster_ids)):
for id_, host_cap in tuple(host_caps.items()):
if host_cap.cluster_id not in common_ids:
del host_caps[id_]
# Filter according to host IDs.
if (host_id_reqs := vm_requirements.host_ids) is not None:
for id_ in tuple(host_caps):
if id_ not in host_id_reqs:
del host_caps[id_]
# Filter according to CPU and memory.
var_name = 'free' if free else 'total'
cpu_ratio = vm_requirements.cpu_ratio
memory = vm_requirements.memory
for id_, host_cap in tuple(host_caps.items()):
if (
getattr(host_cap.cpu, var_name) < cpu_ratio
or getattr(host_cap.memory, var_name) < memory
):
del host_caps[id_]
# Filter according to PCI devices.
pcid_matches: list[PCIDeviceMatch] = []
if vm_requirements.pci_devices:
for id_, host_cap in tuple(host_caps.items()):
matches = _match_pci_devices(vm_requirements, host_cap, free)
if matches is None:
del host_caps[id_]
else:
pcid_matches += matches
# Result.
return HostMatches(hosts=list(host_caps), pci_devices=pcid_matches)

View File

@ -0,0 +1,49 @@
from lib.models.plan import Plan
from lib.models.cluster import Cluster
from lib.models.datastore import Datastore
from lib.models.datastore_pool import DatastorePool
from lib.models.host import Host
from lib.models.host_pool import HostPool
from lib.models.requirements import Requirements
from lib.models.scheduler_driver_action import SchedulerDriverAction
from lib.models.shared import (
DatastoreQuota,
Ids,
ImageQuota,
Lock,
NetworkQuota,
Permissions,
SchedAction,
VmQuota,
)
from lib.models.vm import Vm
from lib.models.vm_group import VmGroup
from lib.models.vm_group_pool import VmGroupPool
from lib.models.vm_pool_extended import VmPool
from lib.models.vnet import Vnet
from lib.models.vnet_pool_extended import VnetPool
__all__ = [
"Plan",
"Cluster",
"Datastore",
"DatastorePool",
"Host",
"HostPool",
"Requirements",
"SchedulerDriverAction",
"DatastoreQuota",
"Ids",
"ImageQuota",
"Lock",
"NetworkQuota",
"Permissions",
"SchedAction",
"VmQuota",
"Vm",
"VmGroup",
"VmGroupPool",
"VmPool",
"Vnet",
"VnetPool",
]

View File

@ -0,0 +1,86 @@
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class Cluster:
class Meta:
name = "CLUSTER"
id: Optional[int] = field(
default=None,
metadata={
"name": "ID",
"type": "Element",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"name": "NAME",
"type": "Element",
"required": True,
},
)
hosts: Optional["Cluster.Hosts"] = field(
default=None,
metadata={
"name": "HOSTS",
"type": "Element",
"required": True,
},
)
datastores: Optional["Cluster.Datastores"] = field(
default=None,
metadata={
"name": "DATASTORES",
"type": "Element",
"required": True,
},
)
vnets: Optional["Cluster.Vnets"] = field(
default=None,
metadata={
"name": "VNETS",
"type": "Element",
"required": True,
},
)
template: Optional[object] = field(
default=None,
metadata={
"name": "TEMPLATE",
"type": "Element",
},
)
@dataclass
class Hosts:
id: list[int] = field(
default_factory=list,
metadata={
"name": "ID",
"type": "Element",
},
)
@dataclass
class Datastores:
id: list[int] = field(
default_factory=list,
metadata={
"name": "ID",
"type": "Element",
},
)
@dataclass
class Vnets:
id: list[int] = field(
default_factory=list,
metadata={
"name": "ID",
"type": "Element",
},
)

View File

@ -0,0 +1,306 @@
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class Datastore:
class Meta:
name = "DATASTORE"
id: Optional[int] = field(
default=None,
metadata={
"name": "ID",
"type": "Element",
"required": True,
},
)
uid: Optional[int] = field(
default=None,
metadata={
"name": "UID",
"type": "Element",
"required": True,
},
)
gid: Optional[int] = field(
default=None,
metadata={
"name": "GID",
"type": "Element",
"required": True,
},
)
uname: Optional[str] = field(
default=None,
metadata={
"name": "UNAME",
"type": "Element",
"required": True,
},
)
gname: Optional[str] = field(
default=None,
metadata={
"name": "GNAME",
"type": "Element",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"name": "NAME",
"type": "Element",
"required": True,
},
)
permissions: Optional["Datastore.Permissions"] = field(
default=None,
metadata={
"name": "PERMISSIONS",
"type": "Element",
},
)
ds_mad: Optional[str] = field(
default=None,
metadata={
"name": "DS_MAD",
"type": "Element",
"required": True,
},
)
tm_mad: Optional[str] = field(
default=None,
metadata={
"name": "TM_MAD",
"type": "Element",
"required": True,
},
)
base_path: Optional[str] = field(
default=None,
metadata={
"name": "BASE_PATH",
"type": "Element",
"required": True,
},
)
type_value: Optional[int] = field(
default=None,
metadata={
"name": "TYPE",
"type": "Element",
"required": True,
},
)
disk_type: Optional[int] = field(
default=None,
metadata={
"name": "DISK_TYPE",
"type": "Element",
"required": True,
},
)
state: Optional[int] = field(
default=None,
metadata={
"name": "STATE",
"type": "Element",
"required": True,
},
)
clusters: Optional["Datastore.Clusters"] = field(
default=None,
metadata={
"name": "CLUSTERS",
"type": "Element",
"required": True,
},
)
total_mb: Optional[int] = field(
default=None,
metadata={
"name": "TOTAL_MB",
"type": "Element",
"required": True,
},
)
free_mb: Optional[int] = field(
default=None,
metadata={
"name": "FREE_MB",
"type": "Element",
"required": True,
},
)
used_mb: Optional[int] = field(
default=None,
metadata={
"name": "USED_MB",
"type": "Element",
"required": True,
},
)
images: Optional["Datastore.Images"] = field(
default=None,
metadata={
"name": "IMAGES",
"type": "Element",
"required": True,
},
)
template: Optional["Datastore.Template"] = field(
default=None,
metadata={
"name": "TEMPLATE",
"type": "Element",
"required": True,
},
)
@dataclass
class Permissions:
owner_u: Optional[int] = field(
default=None,
metadata={
"name": "OWNER_U",
"type": "Element",
"required": True,
},
)
owner_m: Optional[int] = field(
default=None,
metadata={
"name": "OWNER_M",
"type": "Element",
"required": True,
},
)
owner_a: Optional[int] = field(
default=None,
metadata={
"name": "OWNER_A",
"type": "Element",
"required": True,
},
)
group_u: Optional[int] = field(
default=None,
metadata={
"name": "GROUP_U",
"type": "Element",
"required": True,
},
)
group_m: Optional[int] = field(
default=None,
metadata={
"name": "GROUP_M",
"type": "Element",
"required": True,
},
)
group_a: Optional[int] = field(
default=None,
metadata={
"name": "GROUP_A",
"type": "Element",
"required": True,
},
)
other_u: Optional[int] = field(
default=None,
metadata={
"name": "OTHER_U",
"type": "Element",
"required": True,
},
)
other_m: Optional[int] = field(
default=None,
metadata={
"name": "OTHER_M",
"type": "Element",
"required": True,
},
)
other_a: Optional[int] = field(
default=None,
metadata={
"name": "OTHER_A",
"type": "Element",
"required": True,
},
)
@dataclass
class Clusters:
id: list[int] = field(
default_factory=list,
metadata={
"name": "ID",
"type": "Element",
},
)
@dataclass
class Images:
id: list[int] = field(
default_factory=list,
metadata={
"name": "ID",
"type": "Element",
},
)
@dataclass
class Template:
vcenter_dc_name: Optional[str] = field(
default=None,
metadata={
"name": "VCENTER_DC_NAME",
"type": "Element",
},
)
vcenter_dc_ref: Optional[str] = field(
default=None,
metadata={
"name": "VCENTER_DC_REF",
"type": "Element",
},
)
vcenter_ds_name: Optional[str] = field(
default=None,
metadata={
"name": "VCENTER_DS_NAME",
"type": "Element",
},
)
vcenter_ds_ref: Optional[str] = field(
default=None,
metadata={
"name": "VCENTER_DS_REF",
"type": "Element",
},
)
vcenter_host: Optional[str] = field(
default=None,
metadata={
"name": "VCENTER_HOST",
"type": "Element",
},
)
vcenter_instance_id: Optional[str] = field(
default=None,
metadata={
"name": "VCENTER_INSTANCE_ID",
"type": "Element",
},
)
any_element: list[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
},
)

View File

@ -0,0 +1,17 @@
from dataclasses import dataclass, field
from lib.models.datastore import Datastore
@dataclass
class DatastorePool:
class Meta:
name = "DATASTORE_POOL"
datastore: list[Datastore] = field(
default_factory=list,
metadata={
"name": "DATASTORE",
"type": "Element",
},
)

View File

@ -0,0 +1,814 @@
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class Host:
class Meta:
name = "HOST"
id: Optional[int] = field(
default=None,
metadata={
"name": "ID",
"type": "Element",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"name": "NAME",
"type": "Element",
"required": True,
},
)
state: Optional[int] = field(
default=None,
metadata={
"name": "STATE",
"type": "Element",
"required": True,
},
)
prev_state: Optional[int] = field(
default=None,
metadata={
"name": "PREV_STATE",
"type": "Element",
"required": True,
},
)
im_mad: Optional[str] = field(
default=None,
metadata={
"name": "IM_MAD",
"type": "Element",
"required": True,
},
)
vm_mad: Optional[str] = field(
default=None,
metadata={
"name": "VM_MAD",
"type": "Element",
"required": True,
},
)
cluster_id: Optional[int] = field(
default=None,
metadata={
"name": "CLUSTER_ID",
"type": "Element",
"required": True,
},
)
cluster: Optional[str] = field(
default=None,
metadata={
"name": "CLUSTER",
"type": "Element",
"required": True,
},
)
host_share: Optional["Host.HostShare"] = field(
default=None,
metadata={
"name": "HOST_SHARE",
"type": "Element",
"required": True,
},
)
vms: Optional["Host.Vms"] = field(
default=None,
metadata={
"name": "VMS",
"type": "Element",
"required": True,
},
)
template: Optional["Host.Template"] = field(
default=None,
metadata={
"name": "TEMPLATE",
"type": "Element",
"required": True,
},
)
monitoring: Optional["Host.Monitoring"] = field(
default=None,
metadata={
"name": "MONITORING",
"type": "Element",
"required": True,
},
)
@dataclass
class HostShare:
mem_usage: Optional[int] = field(
default=None,
metadata={
"name": "MEM_USAGE",
"type": "Element",
"required": True,
},
)
cpu_usage: Optional[int] = field(
default=None,
metadata={
"name": "CPU_USAGE",
"type": "Element",
"required": True,
},
)
total_mem: Optional[int] = field(
default=None,
metadata={
"name": "TOTAL_MEM",
"type": "Element",
"required": True,
},
)
total_cpu: Optional[int] = field(
default=None,
metadata={
"name": "TOTAL_CPU",
"type": "Element",
"required": True,
},
)
max_mem: Optional[int] = field(
default=None,
metadata={
"name": "MAX_MEM",
"type": "Element",
"required": True,
},
)
max_cpu: Optional[int] = field(
default=None,
metadata={
"name": "MAX_CPU",
"type": "Element",
"required": True,
},
)
running_vms: Optional[int] = field(
default=None,
metadata={
"name": "RUNNING_VMS",
"type": "Element",
"required": True,
},
)
vms_thread: Optional[int] = field(
default=None,
metadata={
"name": "VMS_THREAD",
"type": "Element",
"required": True,
},
)
datastores: Optional["Host.HostShare.Datastores"] = field(
default=None,
metadata={
"name": "DATASTORES",
"type": "Element",
"required": True,
},
)
pci_devices: Optional["Host.HostShare.PciDevices"] = field(
default=None,
metadata={
"name": "PCI_DEVICES",
"type": "Element",
"required": True,
},
)
numa_nodes: Optional["Host.HostShare.NumaNodes"] = field(
default=None,
metadata={
"name": "NUMA_NODES",
"type": "Element",
"required": True,
},
)
@dataclass
class Datastores:
disk_usage: Optional[int] = field(
default=None,
metadata={
"name": "DISK_USAGE",
"type": "Element",
"required": True,
},
)
ds: list["Host.HostShare.Datastores.Ds"] = field(
default_factory=list,
metadata={
"name": "DS",
"type": "Element",
},
)
free_disk: Optional[int] = field(
default=None,
metadata={
"name": "FREE_DISK",
"type": "Element",
"required": True,
},
)
max_disk: Optional[int] = field(
default=None,
metadata={
"name": "MAX_DISK",
"type": "Element",
"required": True,
},
)
used_disk: Optional[int] = field(
default=None,
metadata={
"name": "USED_DISK",
"type": "Element",
"required": True,
},
)
@dataclass
class Ds:
free_mb: Optional[int] = field(
default=None,
metadata={
"name": "FREE_MB",
"type": "Element",
"required": True,
},
)
id: Optional[int] = field(
default=None,
metadata={
"name": "ID",
"type": "Element",
"required": True,
},
)
total_mb: Optional[int] = field(
default=None,
metadata={
"name": "TOTAL_MB",
"type": "Element",
"required": True,
},
)
used_mb: Optional[int] = field(
default=None,
metadata={
"name": "USED_MB",
"type": "Element",
"required": True,
},
)
replica_cache: Optional[str] = field(
default=None,
metadata={
"name": "REPLICA_CACHE",
"type": "Element",
},
)
replica_cache_size: Optional[str] = field(
default=None,
metadata={
"name": "REPLICA_CACHE_SIZE",
"type": "Element",
},
)
replica_images: Optional[int] = field(
default=None,
metadata={
"name": "REPLICA_IMAGES",
"type": "Element",
},
)
@dataclass
class PciDevices:
pci: list["Host.HostShare.PciDevices.Pci"] = field(
default_factory=list,
metadata={
"name": "PCI",
"type": "Element",
},
)
@dataclass
class Pci:
address: Optional[str] = field(
default=None,
metadata={
"name": "ADDRESS",
"type": "Element",
"required": True,
},
)
bus: Optional[str] = field(
default=None,
metadata={
"name": "BUS",
"type": "Element",
"required": True,
},
)
class_value: Optional[str] = field(
default=None,
metadata={
"name": "CLASS",
"type": "Element",
"required": True,
},
)
class_name: Optional[str] = field(
default=None,
metadata={
"name": "CLASS_NAME",
"type": "Element",
"required": True,
},
)
device: Optional[str] = field(
default=None,
metadata={
"name": "DEVICE",
"type": "Element",
"required": True,
},
)
device_name: Optional[str] = field(
default=None,
metadata={
"name": "DEVICE_NAME",
"type": "Element",
"required": True,
},
)
domain: Optional[str] = field(
default=None,
metadata={
"name": "DOMAIN",
"type": "Element",
"required": True,
},
)
function: Optional[str] = field(
default=None,
metadata={
"name": "FUNCTION",
"type": "Element",
"required": True,
},
)
numa_node: Optional[str] = field(
default=None,
metadata={
"name": "NUMA_NODE",
"type": "Element",
"required": True,
},
)
profiles: Optional[str] = field(
default=None,
metadata={
"name": "PROFILES",
"type": "Element",
},
)
short_address: Optional[str] = field(
default=None,
metadata={
"name": "SHORT_ADDRESS",
"type": "Element",
"required": True,
},
)
slot: Optional[str] = field(
default=None,
metadata={
"name": "SLOT",
"type": "Element",
"required": True,
},
)
type_value: Optional[str] = field(
default=None,
metadata={
"name": "TYPE",
"type": "Element",
"required": True,
},
)
uuid: Optional[str] = field(
default=None,
metadata={
"name": "UUID",
"type": "Element",
},
)
vendor: Optional[str] = field(
default=None,
metadata={
"name": "VENDOR",
"type": "Element",
"required": True,
},
)
vendor_name: Optional[str] = field(
default=None,
metadata={
"name": "VENDOR_NAME",
"type": "Element",
"required": True,
},
)
vmid: Optional[int] = field(
default=None,
metadata={
"name": "VMID",
"type": "Element",
"required": True,
},
)
@dataclass
class NumaNodes:
node: list["Host.HostShare.NumaNodes.Node"] = field(
default_factory=list,
metadata={
"name": "NODE",
"type": "Element",
},
)
@dataclass
class Node:
core: list["Host.HostShare.NumaNodes.Node.Core"] = field(
default_factory=list,
metadata={
"name": "CORE",
"type": "Element",
},
)
hugepage: list["Host.HostShare.NumaNodes.Node.Hugepage"] = (
field(
default_factory=list,
metadata={
"name": "HUGEPAGE",
"type": "Element",
},
)
)
memory: Optional["Host.HostShare.NumaNodes.Node.Memory"] = (
field(
default=None,
metadata={
"name": "MEMORY",
"type": "Element",
"required": True,
},
)
)
node_id: Optional[int] = field(
default=None,
metadata={
"name": "NODE_ID",
"type": "Element",
"required": True,
},
)
@dataclass
class Core:
cpus: Optional[str] = field(
default=None,
metadata={
"name": "CPUS",
"type": "Element",
"required": True,
},
)
dedicated: Optional[str] = field(
default=None,
metadata={
"name": "DEDICATED",
"type": "Element",
"required": True,
},
)
free: Optional[int] = field(
default=None,
metadata={
"name": "FREE",
"type": "Element",
"required": True,
},
)
id: Optional[int] = field(
default=None,
metadata={
"name": "ID",
"type": "Element",
"required": True,
},
)
@dataclass
class Hugepage:
pages: Optional[int] = field(
default=None,
metadata={
"name": "PAGES",
"type": "Element",
"required": True,
},
)
size: Optional[int] = field(
default=None,
metadata={
"name": "SIZE",
"type": "Element",
"required": True,
},
)
usage: Optional[int] = field(
default=None,
metadata={
"name": "USAGE",
"type": "Element",
"required": True,
},
)
@dataclass
class Memory:
distance: Optional[str] = field(
default=None,
metadata={
"name": "DISTANCE",
"type": "Element",
"required": True,
},
)
total: Optional[int] = field(
default=None,
metadata={
"name": "TOTAL",
"type": "Element",
"required": True,
},
)
usage: Optional[int] = field(
default=None,
metadata={
"name": "USAGE",
"type": "Element",
"required": True,
},
)
@dataclass
class Vms:
id: list[int] = field(
default_factory=list,
metadata={
"name": "ID",
"type": "Element",
},
)
@dataclass
class Template:
any_element: list[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"process_contents": "skip",
},
)
vcenter_ccr_ref: Optional[str] = field(
default=None,
metadata={
"name": "VCENTER_CCR_REF",
"type": "Element",
},
)
vcenter_ds_ref: list[str] = field(
default_factory=list,
metadata={
"name": "VCENTER_DS_REF",
"type": "Element",
},
)
vcenter_host: Optional[str] = field(
default=None,
metadata={
"name": "VCENTER_HOST",
"type": "Element",
},
)
vcenter_instance_id: Optional[str] = field(
default=None,
metadata={
"name": "VCENTER_INSTANCE_ID",
"type": "Element",
},
)
vcenter_name: Optional[str] = field(
default=None,
metadata={
"name": "VCENTER_NAME",
"type": "Element",
},
)
vcenter_password: Optional[str] = field(
default=None,
metadata={
"name": "VCENTER_PASSWORD",
"type": "Element",
},
)
vcenter_resource_pool_info: list[object] = field(
default_factory=list,
metadata={
"name": "VCENTER_RESOURCE_POOL_INFO",
"type": "Element",
},
)
vcenter_user: Optional[str] = field(
default=None,
metadata={
"name": "VCENTER_USER",
"type": "Element",
},
)
vcenter_version: Optional[str] = field(
default=None,
metadata={
"name": "VCENTER_VERSION",
"type": "Element",
},
)
@dataclass
class Monitoring:
timestamp: Optional[int] = field(
default=None,
metadata={
"name": "TIMESTAMP",
"type": "Element",
},
)
id: Optional[int] = field(
default=None,
metadata={
"name": "ID",
"type": "Element",
},
)
capacity: Optional["Host.Monitoring.Capacity"] = field(
default=None,
metadata={
"name": "CAPACITY",
"type": "Element",
},
)
system: Optional["Host.Monitoring.System"] = field(
default=None,
metadata={
"name": "SYSTEM",
"type": "Element",
},
)
numa_node: list["Host.Monitoring.NumaNode"] = field(
default_factory=list,
metadata={
"name": "NUMA_NODE",
"type": "Element",
},
)
@dataclass
class Capacity:
free_cpu: Optional[int] = field(
default=None,
metadata={
"name": "FREE_CPU",
"type": "Element",
"required": True,
},
)
free_memory: Optional[int] = field(
default=None,
metadata={
"name": "FREE_MEMORY",
"type": "Element",
"required": True,
},
)
used_cpu: Optional[int] = field(
default=None,
metadata={
"name": "USED_CPU",
"type": "Element",
"required": True,
},
)
used_memory: Optional[int] = field(
default=None,
metadata={
"name": "USED_MEMORY",
"type": "Element",
"required": True,
},
)
@dataclass
class System:
netrx: Optional[int] = field(
default=None,
metadata={
"name": "NETRX",
"type": "Element",
},
)
nettx: Optional[int] = field(
default=None,
metadata={
"name": "NETTX",
"type": "Element",
},
)
@dataclass
class NumaNode:
hugepage: list["Host.Monitoring.NumaNode.Hugepage"] = field(
default_factory=list,
metadata={
"name": "HUGEPAGE",
"type": "Element",
},
)
memory: Optional["Host.Monitoring.NumaNode.Memory"] = field(
default=None,
metadata={
"name": "MEMORY",
"type": "Element",
"required": True,
},
)
node_id: Optional[int] = field(
default=None,
metadata={
"name": "NODE_ID",
"type": "Element",
"required": True,
},
)
@dataclass
class Hugepage:
free: Optional[int] = field(
default=None,
metadata={
"name": "FREE",
"type": "Element",
"required": True,
},
)
size: Optional[int] = field(
default=None,
metadata={
"name": "SIZE",
"type": "Element",
"required": True,
},
)
@dataclass
class Memory:
free: Optional[str] = field(
default=None,
metadata={
"name": "FREE",
"type": "Element",
"required": True,
},
)
used: Optional[int] = field(
default=None,
metadata={
"name": "USED",
"type": "Element",
"required": True,
},
)

View File

@ -0,0 +1,17 @@
from dataclasses import dataclass, field
from lib.models.host import Host
@dataclass
class HostPool:
class Meta:
name = "HOST_POOL"
host: list[Host] = field(
default_factory=list,
metadata={
"name": "HOST",
"type": "Element",
},
)

Some files were not shown because too many files have changed in this diff Show More