mirror of
https://github.com/OpenNebula/one.git
synced 2025-01-18 06:03:39 +03:00
F #4936:Refactor ActionManager and Timers
co-authored-by: Pavel Czerny <pczerny@opennebula.systems>
This commit is contained in:
parent
f28db525c9
commit
325db91bcb
@ -17,21 +17,18 @@
|
|||||||
#ifndef ACL_MANAGER_H_
|
#ifndef ACL_MANAGER_H_
|
||||||
#define ACL_MANAGER_H_
|
#define ACL_MANAGER_H_
|
||||||
|
|
||||||
#include "AuthManager.h"
|
#include "Listener.h"
|
||||||
#include "AuthRequest.h"
|
#include "AuthRequest.h"
|
||||||
#include "PoolObjectSQL.h"
|
#include "PoolObjectSQL.h"
|
||||||
#include "AclRule.h"
|
|
||||||
#include "NebulaLog.h"
|
|
||||||
|
|
||||||
|
class AclRule;
|
||||||
class PoolObjectAuth;
|
class PoolObjectAuth;
|
||||||
class SqlDB;
|
class SqlDB;
|
||||||
|
|
||||||
extern "C" void * acl_action_loop(void *arg);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This class manages the ACL rules and the authorization engine
|
* This class manages the ACL rules and the authorization engine
|
||||||
*/
|
*/
|
||||||
class AclManager : public Callbackable, public ActionListener
|
class AclManager : public Callbackable
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
/**
|
/**
|
||||||
@ -54,6 +51,8 @@ public:
|
|||||||
|
|
||||||
void finalize();
|
void finalize();
|
||||||
|
|
||||||
|
void join_thread();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Reload the ACL rules from the DB. This function needs to be used when
|
* Reload the ACL rules from the DB. This function needs to be used when
|
||||||
* a server becomes leader of the zone as the ACL cache maybe out-dated
|
* a server becomes leader of the zone as the ACL cache maybe out-dated
|
||||||
@ -215,19 +214,6 @@ public:
|
|||||||
*/
|
*/
|
||||||
virtual int dump(std::ostringstream& oss);
|
virtual int dump(std::ostringstream& oss);
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
|
||||||
// Refresh loop thread
|
|
||||||
// -------------------------------------------------------------------------
|
|
||||||
/**
|
|
||||||
* Gets the AclManager thread identification. The thread is only
|
|
||||||
* initialized if the refresh_cache flag is true.
|
|
||||||
* @return pthread_t for the manager thread (that in the action loop).
|
|
||||||
*/
|
|
||||||
pthread_t get_thread_id() const
|
|
||||||
{
|
|
||||||
return acl_thread;
|
|
||||||
};
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
/**
|
/**
|
||||||
* Constructor for derived ACL managers. Classes derived from this one
|
* Constructor for derived ACL managers. Classes derived from this one
|
||||||
@ -235,9 +221,11 @@ protected:
|
|||||||
* from DB)
|
* from DB)
|
||||||
*/
|
*/
|
||||||
AclManager(int _zone_id)
|
AclManager(int _zone_id)
|
||||||
:zone_id(_zone_id), db(0), is_federation_slave(false)
|
: zone_id(_zone_id)
|
||||||
|
, db(0)
|
||||||
|
, is_federation_slave(false)
|
||||||
|
, timer_period(-1)
|
||||||
{
|
{
|
||||||
pthread_mutex_init(&mutex, 0);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
@ -346,23 +334,7 @@ private:
|
|||||||
// Mutex synchronization
|
// Mutex synchronization
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
|
|
||||||
pthread_mutex_t mutex;
|
std::mutex acl_mutex;
|
||||||
|
|
||||||
/**
|
|
||||||
* Function to lock the manager
|
|
||||||
*/
|
|
||||||
void lock()
|
|
||||||
{
|
|
||||||
pthread_mutex_lock(&mutex);
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Function to unlock the manager
|
|
||||||
*/
|
|
||||||
void unlock()
|
|
||||||
{
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
};
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
// DataBase implementation variables
|
// DataBase implementation variables
|
||||||
@ -429,33 +401,18 @@ private:
|
|||||||
time_t timer_period;
|
time_t timer_period;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Thread id for the ACL Manager
|
* Timer action async execution
|
||||||
*/
|
*/
|
||||||
pthread_t acl_thread;
|
std::unique_ptr<Timer> timer_thread;
|
||||||
|
|
||||||
/**
|
|
||||||
* Action engine for the Manager
|
|
||||||
*/
|
|
||||||
ActionManager am;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Function to execute the Manager action loop method within a new pthread
|
|
||||||
* (requires C linkage)
|
|
||||||
*/
|
|
||||||
friend void * acl_action_loop(void *arg);
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
// Action Listener interface
|
// Action Listener interface
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
void timer_action(const ActionRequest& ar)
|
void timer_action()
|
||||||
{
|
{
|
||||||
select();
|
select();
|
||||||
};
|
};
|
||||||
|
|
||||||
void finalize_action(const ActionRequest& ar)
|
|
||||||
{
|
|
||||||
NebulaLog::log("ACL",Log::INFO,"Stopping ACL Manager...");
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /*ACL_MANAGER_H*/
|
#endif /*ACL_MANAGER_H*/
|
||||||
|
@ -1,250 +0,0 @@
|
|||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* Copyright 2002-2020, OpenNebula Project, OpenNebula Systems */
|
|
||||||
/* */
|
|
||||||
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
|
||||||
/* not use this file except in compliance with the License. You may obtain */
|
|
||||||
/* a copy of the License at */
|
|
||||||
/* */
|
|
||||||
/* http://www.apache.org/licenses/LICENSE-2.0 */
|
|
||||||
/* */
|
|
||||||
/* Unless required by applicable law or agreed to in writing, software */
|
|
||||||
/* distributed under the License is distributed on an "AS IS" BASIS, */
|
|
||||||
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
|
|
||||||
/* See the License for the specific language governing permissions and */
|
|
||||||
/* limitations under the License. */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
#ifndef ACTION_MANAGER_H_
|
|
||||||
#define ACTION_MANAGER_H_
|
|
||||||
|
|
||||||
#include <queue>
|
|
||||||
#include <pthread.h>
|
|
||||||
#include <ctime>
|
|
||||||
#include <string>
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Represents a generic request, pending actions are stored in a queue.
|
|
||||||
* Each element stores the base action type, additional data is added by each
|
|
||||||
* ActionListener implementation.
|
|
||||||
*/
|
|
||||||
class ActionRequest
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
/**
|
|
||||||
* Base Action types
|
|
||||||
*/
|
|
||||||
enum Type
|
|
||||||
{
|
|
||||||
FINALIZE,
|
|
||||||
TIMER,
|
|
||||||
USER
|
|
||||||
};
|
|
||||||
|
|
||||||
Type type() const
|
|
||||||
{
|
|
||||||
return _type;
|
|
||||||
}
|
|
||||||
|
|
||||||
ActionRequest(Type __type): _type(__type){};
|
|
||||||
|
|
||||||
virtual ~ActionRequest(){};
|
|
||||||
|
|
||||||
virtual ActionRequest * clone() const
|
|
||||||
{
|
|
||||||
return new ActionRequest(_type);
|
|
||||||
}
|
|
||||||
|
|
||||||
protected:
|
|
||||||
Type _type;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* ActionListener class. Interface to be implemented by any class that need to
|
|
||||||
* handle actions.
|
|
||||||
*/
|
|
||||||
class ActionListener
|
|
||||||
{
|
|
||||||
protected:
|
|
||||||
ActionListener(){};
|
|
||||||
|
|
||||||
virtual ~ActionListener(){};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* the user_action() function is executed upon action arrival.
|
|
||||||
* This function should check the action type, and perform the
|
|
||||||
* corresponding action.
|
|
||||||
* @param ar the ActionRequest
|
|
||||||
*/
|
|
||||||
virtual void user_action(const ActionRequest& ar){};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Periodic timer action, executed each time the time_out expires. Listener
|
|
||||||
* needs to re-implement the default timer action if needed.
|
|
||||||
* @param ar the ActionRequest
|
|
||||||
*/
|
|
||||||
virtual void timer_action(const ActionRequest& ar){};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Action executed when the Manager finlizes. Listener needs to re-implement
|
|
||||||
* the default action if needed.
|
|
||||||
* @param ar the ActionRequest
|
|
||||||
*/
|
|
||||||
virtual void finalize_action(const ActionRequest& ar){};
|
|
||||||
|
|
||||||
private:
|
|
||||||
friend class ActionManager;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Invoke the action handler
|
|
||||||
*/
|
|
||||||
void _do_action(const ActionRequest& ar)
|
|
||||||
{
|
|
||||||
switch(ar.type())
|
|
||||||
{
|
|
||||||
case ActionRequest::FINALIZE:
|
|
||||||
finalize_action(ar);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case ActionRequest::TIMER:
|
|
||||||
timer_action(ar);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case ActionRequest::USER:
|
|
||||||
user_action(ar);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* ActionManager. Provides action support for a class implementing
|
|
||||||
* the ActionListener interface.
|
|
||||||
*/
|
|
||||||
class ActionManager
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
|
|
||||||
ActionManager();
|
|
||||||
|
|
||||||
virtual ~ActionManager();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Function to trigger an action to this manager.
|
|
||||||
* @param action the action name
|
|
||||||
* @param args arguments for the action
|
|
||||||
*/
|
|
||||||
void trigger(const ActionRequest& ar);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Trigger the FINALIZE event
|
|
||||||
*/
|
|
||||||
void finalize()
|
|
||||||
{
|
|
||||||
ActionRequest frequest(ActionRequest::FINALIZE);
|
|
||||||
|
|
||||||
trigger(frequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The calling thread will be suspended until an action is triggered.
|
|
||||||
* @param timeout for the periodic action.
|
|
||||||
* @param timer_args arguments for the timer action
|
|
||||||
*/
|
|
||||||
void loop(struct timespec& _timeout, const ActionRequest& trequest);
|
|
||||||
|
|
||||||
void loop(time_t timeout, const ActionRequest& trequest)
|
|
||||||
{
|
|
||||||
struct timespec _timeout;
|
|
||||||
|
|
||||||
_timeout.tv_sec = timeout;
|
|
||||||
_timeout.tv_nsec = 0;
|
|
||||||
|
|
||||||
loop(_timeout, trequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The calling thread will be suspended until an action is triggered.
|
|
||||||
* @param timeout for the periodic action, the timer action will recieve
|
|
||||||
* an "empty" ActionRequest.
|
|
||||||
*/
|
|
||||||
void loop(time_t timeout)
|
|
||||||
{
|
|
||||||
ActionRequest trequest(ActionRequest::TIMER);
|
|
||||||
|
|
||||||
struct timespec _timeout;
|
|
||||||
|
|
||||||
_timeout.tv_sec = timeout;
|
|
||||||
_timeout.tv_nsec = 0;
|
|
||||||
|
|
||||||
loop(_timeout, trequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
void loop(struct timespec& _timeout)
|
|
||||||
{
|
|
||||||
ActionRequest trequest(ActionRequest::TIMER);
|
|
||||||
|
|
||||||
loop(_timeout, trequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The calling thread will be suspended until an action is triggered. No
|
|
||||||
* periodic action is defined.
|
|
||||||
*/
|
|
||||||
void loop()
|
|
||||||
{
|
|
||||||
ActionRequest trequest(ActionRequest::TIMER);
|
|
||||||
struct timespec _timeout;
|
|
||||||
|
|
||||||
_timeout.tv_sec = 0;
|
|
||||||
_timeout.tv_nsec = 0;
|
|
||||||
|
|
||||||
loop(_timeout, trequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Register the calling object in this action manager.
|
|
||||||
* @param listener a pointer to the action listner
|
|
||||||
*/
|
|
||||||
void addListener(ActionListener * listener)
|
|
||||||
{
|
|
||||||
this->listener = listener;
|
|
||||||
};
|
|
||||||
|
|
||||||
private:
|
|
||||||
/**
|
|
||||||
* Queue of pending actions, processed in a FIFO manner
|
|
||||||
*/
|
|
||||||
std::queue<ActionRequest *> actions;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Action synchronization is implemented using the pthread library,
|
|
||||||
* with condition variable and its associated mutex
|
|
||||||
*/
|
|
||||||
pthread_mutex_t mutex;
|
|
||||||
pthread_cond_t cond;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The listener notified by this manager
|
|
||||||
*/
|
|
||||||
ActionListener * listener;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Function to lock the Manager mutex
|
|
||||||
*/
|
|
||||||
void lock()
|
|
||||||
{
|
|
||||||
pthread_mutex_lock(&mutex);
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Function to unlock the Manager mutex
|
|
||||||
*/
|
|
||||||
void unlock()
|
|
||||||
{
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
};
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif /*ACTION_MANAGER_H_*/
|
|
@ -20,60 +20,17 @@
|
|||||||
#include <time.h>
|
#include <time.h>
|
||||||
|
|
||||||
#include "NebulaLog.h"
|
#include "NebulaLog.h"
|
||||||
#include "ActionManager.h"
|
#include "Listener.h"
|
||||||
#include "ProtocolMessages.h"
|
#include "ProtocolMessages.h"
|
||||||
#include "DriverManager.h"
|
#include "DriverManager.h"
|
||||||
|
|
||||||
//Forward definitions
|
//Forward definitions
|
||||||
class AuthRequest;
|
class AuthRequest;
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
class AMAction : public ActionRequest
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
enum Actions
|
|
||||||
{
|
|
||||||
AUTHENTICATE,
|
|
||||||
AUTHORIZE
|
|
||||||
};
|
|
||||||
|
|
||||||
AMAction(Actions a, AuthRequest *r):ActionRequest(ActionRequest::USER),
|
|
||||||
_action(a), _request(r) {}
|
|
||||||
|
|
||||||
AMAction(const AMAction& o):ActionRequest(o._type), _action(o._action),
|
|
||||||
_request(o._request) {}
|
|
||||||
|
|
||||||
Actions action() const
|
|
||||||
{
|
|
||||||
return _action;
|
|
||||||
}
|
|
||||||
|
|
||||||
AuthRequest * request() const
|
|
||||||
{
|
|
||||||
return _request;
|
|
||||||
}
|
|
||||||
|
|
||||||
ActionRequest * clone() const
|
|
||||||
{
|
|
||||||
return new AMAction(*this);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
Actions _action;
|
|
||||||
|
|
||||||
AuthRequest * _request;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
extern "C" void * authm_action_loop(void *arg);
|
|
||||||
|
|
||||||
class AuthManager :
|
class AuthManager :
|
||||||
public DriverManager<Driver<auth_msg_t>>,
|
public DriverManager<Driver<auth_msg_t>>,
|
||||||
public ActionListener
|
public Listener
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
@ -81,24 +38,9 @@ public:
|
|||||||
time_t timer,
|
time_t timer,
|
||||||
const std::string& mads_location):
|
const std::string& mads_location):
|
||||||
DriverManager(mads_location),
|
DriverManager(mads_location),
|
||||||
timer_period(timer)
|
Listener("Authorization Manager"),
|
||||||
|
timer_thread(timer, [this](){timer_action();})
|
||||||
{
|
{
|
||||||
am.addListener(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
~AuthManager() {}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Triggers specific actions to the Auth Manager. This function
|
|
||||||
* wraps the ActionManager trigger function.
|
|
||||||
* @param action the Auth Manager action
|
|
||||||
* @param request an auth request
|
|
||||||
*/
|
|
||||||
void trigger(AMAction::Actions action, AuthRequest* request)
|
|
||||||
{
|
|
||||||
AMAction auth_ar(action, request);
|
|
||||||
|
|
||||||
am.trigger(auth_ar);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -109,29 +51,12 @@ public:
|
|||||||
*/
|
*/
|
||||||
int start();
|
int start();
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
void finalize()
|
|
||||||
{
|
|
||||||
am.finalize();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Loads Virtual Machine Manager Mads defined in configuration file
|
* Loads Virtual Machine Manager Mads defined in configuration file
|
||||||
* @param _mads configuration of drivers
|
* @param _mads configuration of drivers
|
||||||
*/
|
*/
|
||||||
int load_drivers(const std::vector<const VectorAttribute*>& _mads);
|
int load_drivers(const std::vector<const VectorAttribute*>& _mads);
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the thread identification.
|
|
||||||
* @return pthread_t for the manager thread (that in the action loop).
|
|
||||||
*/
|
|
||||||
pthread_t get_thread_id() const
|
|
||||||
{
|
|
||||||
return authm_thread;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns true if there is an authorization driver enabled
|
* Returns true if there is an authorization driver enabled
|
||||||
*
|
*
|
||||||
@ -142,21 +67,21 @@ public:
|
|||||||
return authz_enabled;
|
return authz_enabled;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This function authenticates a user
|
||||||
|
*/
|
||||||
|
void trigger_authenticate(AuthRequest& ar);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This function authorizes a user request
|
||||||
|
*/
|
||||||
|
void trigger_authorize(AuthRequest& ar);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/**
|
/**
|
||||||
* Thread id for the Transfer Manager
|
* Timer action async execution
|
||||||
*/
|
*/
|
||||||
pthread_t authm_thread;
|
Timer timer_thread;
|
||||||
|
|
||||||
/**
|
|
||||||
* Action engine for the Manager
|
|
||||||
*/
|
|
||||||
ActionManager am;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Timer for the Manager (periocally triggers timer action)
|
|
||||||
*/
|
|
||||||
time_t timer_period;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generic name for the Auth driver
|
* Generic name for the Auth driver
|
||||||
@ -168,6 +93,11 @@ private:
|
|||||||
*/
|
*/
|
||||||
bool authz_enabled;
|
bool authz_enabled;
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
static const int drivers_timeout = 10;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a pointer to a Auth Manager driver.
|
* Returns a pointer to a Auth Manager driver.
|
||||||
* @param name of an attribute of the driver (e.g. its type)
|
* @param name of an attribute of the driver (e.g. its type)
|
||||||
@ -192,22 +122,6 @@ private:
|
|||||||
return DriverManager::get_driver(auth_driver_name);
|
return DriverManager::get_driver(auth_driver_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* This function authenticates a user
|
|
||||||
*/
|
|
||||||
void authenticate_action(AuthRequest * ar);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This function authorizes a user request
|
|
||||||
*/
|
|
||||||
void authorize_action(AuthRequest * ar);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Function to execute the Manager action loop method within a new pthread
|
|
||||||
* (requires C linkage)
|
|
||||||
*/
|
|
||||||
friend void * authm_action_loop(void *arg);
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
// Protocol implementation, procesing messages from driver
|
// Protocol implementation, procesing messages from driver
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
@ -234,21 +148,12 @@ private:
|
|||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
// Action Listener interface
|
// Action Listener interface
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
void timer_action(const ActionRequest& ar)
|
void timer_action()
|
||||||
{
|
{
|
||||||
check_time_outs_action();
|
check_time_outs_action();
|
||||||
}
|
}
|
||||||
|
|
||||||
static const int drivers_timeout = 10;
|
void finalize_action() override;
|
||||||
|
|
||||||
void finalize_action(const ActionRequest& ar)
|
|
||||||
{
|
|
||||||
NebulaLog::log("AuM",Log::INFO,"Stopping Authorization Manager...");
|
|
||||||
|
|
||||||
DriverManager::stop(drivers_timeout);
|
|
||||||
}
|
|
||||||
|
|
||||||
void user_action(const ActionRequest& ar);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /*AUTH_MANAGER_H*/
|
#endif /*AUTH_MANAGER_H*/
|
||||||
|
@ -19,13 +19,13 @@
|
|||||||
|
|
||||||
#include <set>
|
#include <set>
|
||||||
|
|
||||||
#include "ActionManager.h"
|
|
||||||
#include "PoolObjectAuth.h"
|
#include "PoolObjectAuth.h"
|
||||||
#include "AuthManager.h"
|
|
||||||
#include "NebulaUtil.h"
|
#include "NebulaUtil.h"
|
||||||
|
|
||||||
#include "SyncRequest.h"
|
#include "SyncRequest.h"
|
||||||
|
|
||||||
|
class AuthManager;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The AuthRequest class is used to pass an Authorization or Authentication
|
* The AuthRequest class is used to pass an Authorization or Authentication
|
||||||
* request to the AuthManager. The result of the request will be stored
|
* request to the AuthManager. The result of the request will be stored
|
||||||
|
@ -17,11 +17,7 @@
|
|||||||
#ifndef DISPATCH_MANAGER_H_
|
#ifndef DISPATCH_MANAGER_H_
|
||||||
#define DISPATCH_MANAGER_H_
|
#define DISPATCH_MANAGER_H_
|
||||||
|
|
||||||
#include "ActionManager.h"
|
#include "Listener.h"
|
||||||
#include "VirtualMachinePool.h"
|
|
||||||
|
|
||||||
|
|
||||||
extern "C" void * dm_action_loop(void *arg);
|
|
||||||
|
|
||||||
//Forward definitions
|
//Forward definitions
|
||||||
class TransferManager;
|
class TransferManager;
|
||||||
@ -30,63 +26,23 @@ class VirtualMachineManager;
|
|||||||
class ImageManager;
|
class ImageManager;
|
||||||
class ClusterPool;
|
class ClusterPool;
|
||||||
class HostPool;
|
class HostPool;
|
||||||
|
class VirtualMachinePool;
|
||||||
class VirtualRouterPool;
|
class VirtualRouterPool;
|
||||||
class UserPool;
|
class UserPool;
|
||||||
|
class VirtualMachine;
|
||||||
|
class VirtualMachineTemplate;
|
||||||
|
|
||||||
struct RequestAttributes;
|
struct RequestAttributes;
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
class DMAction : public ActionRequest
|
class DispatchManager : public Listener
|
||||||
{
|
|
||||||
public:
|
|
||||||
enum Actions
|
|
||||||
{
|
|
||||||
SUSPEND_SUCCESS, /**< Send by LCM when a VM is suspended*/
|
|
||||||
STOP_SUCCESS, /**< Send by LCM when a VM is stopped*/
|
|
||||||
UNDEPLOY_SUCCESS, /**< Send by LCM when a VM is undeployed and saved*/
|
|
||||||
POWEROFF_SUCCESS, /**< Send by LCM when a VM is powered off */
|
|
||||||
DONE, /**< Send by LCM when a VM is shut down*/
|
|
||||||
RESUBMIT /**< Send by LCM when a VM is ready for resubmission*/
|
|
||||||
};
|
|
||||||
|
|
||||||
DMAction(Actions a, int v):ActionRequest(ActionRequest::USER),
|
|
||||||
_action(a), _vm_id(v){}
|
|
||||||
|
|
||||||
DMAction(const DMAction& o):ActionRequest(o._type), _action(o._action),
|
|
||||||
_vm_id(o._vm_id){}
|
|
||||||
|
|
||||||
Actions action() const
|
|
||||||
{
|
|
||||||
return _action;
|
|
||||||
}
|
|
||||||
|
|
||||||
int vm_id() const
|
|
||||||
{
|
|
||||||
return _vm_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
ActionRequest * clone() const
|
|
||||||
{
|
|
||||||
return new DMAction(*this);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
Actions _action;
|
|
||||||
|
|
||||||
int _vm_id;
|
|
||||||
};
|
|
||||||
|
|
||||||
class DispatchManager : public ActionListener
|
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
DispatchManager():
|
DispatchManager()
|
||||||
hpool(0), vmpool(0), clpool(0), vrouterpool(0), tm(0), vmm(0), lcm(0), imagem(0)
|
: Listener("Dispatch Manager")
|
||||||
{
|
{
|
||||||
am.addListener(this);
|
}
|
||||||
};
|
|
||||||
|
|
||||||
~DispatchManager() = default;
|
~DispatchManager() = default;
|
||||||
|
|
||||||
@ -96,25 +52,6 @@ public:
|
|||||||
*/
|
*/
|
||||||
void init_managers();
|
void init_managers();
|
||||||
|
|
||||||
/**
|
|
||||||
* Triggers specific actions to the Dispatch Manager. This function
|
|
||||||
* wraps the ActionManager trigger function.
|
|
||||||
* @param action the DM action
|
|
||||||
* @param vid VM unique id. This is the argument of the passed to the
|
|
||||||
* invoked action.
|
|
||||||
*/
|
|
||||||
void trigger(DMAction::Actions action, int vid)
|
|
||||||
{
|
|
||||||
DMAction dm_ar(action, vid);
|
|
||||||
|
|
||||||
am.trigger(dm_ar);
|
|
||||||
}
|
|
||||||
|
|
||||||
void finalize()
|
|
||||||
{
|
|
||||||
am.finalize();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This functions creates a new thread for the Dispatch Manager. This
|
* This functions creates a new thread for the Dispatch Manager. This
|
||||||
* thread will wait in an action loop till it receives ACTION_FINALIZE.
|
* thread will wait in an action loop till it receives ACTION_FINALIZE.
|
||||||
@ -122,15 +59,6 @@ public:
|
|||||||
*/
|
*/
|
||||||
int start();
|
int start();
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the thread identification.
|
|
||||||
* @return pthread_t for the manager thread (that in the action loop).
|
|
||||||
*/
|
|
||||||
pthread_t get_thread_id() const
|
|
||||||
{
|
|
||||||
return dm_thread;
|
|
||||||
};
|
|
||||||
|
|
||||||
//--------------------------------------------------------------------------
|
//--------------------------------------------------------------------------
|
||||||
// DM Actions, the RM and the Scheduler will invoke this methods
|
// DM Actions, the RM and the Scheduler will invoke this methods
|
||||||
//--------------------------------------------------------------------------
|
//--------------------------------------------------------------------------
|
||||||
@ -272,20 +200,7 @@ public:
|
|||||||
/**
|
/**
|
||||||
* VM ID interface
|
* VM ID interface
|
||||||
*/
|
*/
|
||||||
int delete_vm(int vid, const RequestAttributes& ra, std::string& error_str)
|
int delete_vm(int vid, const RequestAttributes& ra, std::string& error_str);
|
||||||
{
|
|
||||||
VirtualMachine * vm;
|
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
|
||||||
|
|
||||||
if ( vm == 0 )
|
|
||||||
{
|
|
||||||
error_str = "Virtual machine does not exist";
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return delete_vm(vm, ra, error_str);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Moves a VM to PENDING state preserving any resource (i.e. leases) and id
|
* Moves a VM to PENDING state preserving any resource (i.e. leases) and id
|
||||||
@ -504,99 +419,73 @@ public:
|
|||||||
int live_updateconf(int vid, const RequestAttributes& ra,
|
int live_updateconf(int vid, const RequestAttributes& ra,
|
||||||
std::string& error_str);
|
std::string& error_str);
|
||||||
|
|
||||||
private:
|
//--------------------------------------------------------------------------
|
||||||
/**
|
// DM Actions associated with a VM state transition
|
||||||
* Thread id for the Dispatch Manager
|
//--------------------------------------------------------------------------
|
||||||
*/
|
|
||||||
pthread_t dm_thread;
|
|
||||||
|
|
||||||
|
void trigger_suspend_success(int vid);
|
||||||
|
|
||||||
|
void trigger_stop_success(int vid);
|
||||||
|
|
||||||
|
void trigger_undeploy_success(int vid);
|
||||||
|
|
||||||
|
void trigger_poweroff_success(int vid);
|
||||||
|
|
||||||
|
void trigger_done(int vid);
|
||||||
|
|
||||||
|
void trigger_resubmit(int vid);
|
||||||
|
|
||||||
|
private:
|
||||||
/**
|
/**
|
||||||
* Pointer to the Host Pool, to access hosts
|
* Pointer to the Host Pool, to access hosts
|
||||||
*/
|
*/
|
||||||
HostPool * hpool;
|
HostPool * hpool = nullptr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to the Virtual Machine Pool, to access VMs
|
* Pointer to the Virtual Machine Pool, to access VMs
|
||||||
*/
|
*/
|
||||||
VirtualMachinePool * vmpool;
|
VirtualMachinePool * vmpool = nullptr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to the User Pool, to access user
|
* Pointer to the User Pool, to access user
|
||||||
*/
|
*/
|
||||||
UserPool * upool;
|
UserPool * upool = nullptr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to the Cluster Pool
|
* Pointer to the Cluster Pool
|
||||||
*/
|
*/
|
||||||
ClusterPool * clpool;
|
ClusterPool * clpool = nullptr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to the Virtual Router Pool
|
* Pointer to the Virtual Router Pool
|
||||||
*/
|
*/
|
||||||
VirtualRouterPool * vrouterpool;
|
VirtualRouterPool * vrouterpool = nullptr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to TransferManager
|
* Pointer to TransferManager
|
||||||
*/
|
*/
|
||||||
TransferManager * tm;
|
TransferManager * tm = nullptr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to VirtualMachineManager
|
* Pointer to VirtualMachineManager
|
||||||
*/
|
*/
|
||||||
VirtualMachineManager * vmm;
|
VirtualMachineManager * vmm = nullptr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to LifeCycleManager
|
* Pointer to LifeCycleManager
|
||||||
*/
|
*/
|
||||||
LifeCycleManager * lcm;
|
LifeCycleManager * lcm = nullptr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to ImageManager
|
* Pointer to ImageManager
|
||||||
*/
|
*/
|
||||||
ImageManager * imagem;
|
ImageManager * imagem = nullptr;
|
||||||
|
|
||||||
/**
|
|
||||||
* Action engine for the Manager
|
|
||||||
*/
|
|
||||||
ActionManager am;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Frees the resources associated to a VM: disks, ip addresses and Quotas
|
* Frees the resources associated to a VM: disks, ip addresses and Quotas
|
||||||
*/
|
*/
|
||||||
void free_vm_resources(VirtualMachine * vm, bool check_images);
|
void free_vm_resources(VirtualMachine * vm, bool check_images);
|
||||||
|
|
||||||
//--------------------------------------------------------------------------
|
|
||||||
// DM Actions associated with a VM state transition
|
|
||||||
//--------------------------------------------------------------------------
|
|
||||||
|
|
||||||
void suspend_success_action(int vid);
|
|
||||||
|
|
||||||
void stop_success_action(int vid);
|
|
||||||
|
|
||||||
void undeploy_success_action(int vid);
|
|
||||||
|
|
||||||
void poweroff_success_action(int vid);
|
|
||||||
|
|
||||||
void done_action(int vid);
|
|
||||||
|
|
||||||
void resubmit_action(int vid);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Function to execute the Manager action loop method within a new pthread
|
|
||||||
* (requires C linkage)
|
|
||||||
*/
|
|
||||||
friend void * dm_action_loop(void *arg);
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
|
||||||
// Action Listener interface
|
|
||||||
// -------------------------------------------------------------------------
|
|
||||||
void finalize_action(const ActionRequest& ar)
|
|
||||||
{
|
|
||||||
NebulaLog::log("DiM",Log::INFO,"Stopping Dispatch Manager...");
|
|
||||||
};
|
|
||||||
|
|
||||||
void user_action(const ActionRequest& ar);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Fill a template only with the necessary attributes to update the quotas
|
* Fill a template only with the necessary attributes to update the quotas
|
||||||
* @param vm with the attributes
|
* @param vm with the attributes
|
||||||
|
@ -20,16 +20,15 @@
|
|||||||
#include <string>
|
#include <string>
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
#include <mutex>
|
||||||
|
|
||||||
#include "ReplicaManager.h"
|
#include "ReplicaManager.h"
|
||||||
#include "ActionManager.h"
|
|
||||||
|
|
||||||
extern "C" void * frm_loop(void *arg);
|
|
||||||
|
|
||||||
class LogDB;
|
class LogDB;
|
||||||
class LogDBRecord;
|
class LogDBRecord;
|
||||||
|
|
||||||
class FedReplicaManager : public ReplicaManager, ActionListener
|
|
||||||
|
class FedReplicaManager : public ReplicaManager
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
@ -83,19 +82,6 @@ public:
|
|||||||
int xmlrpc_replicate_log(int zone_id, bool& success, uint64_t& last,
|
int xmlrpc_replicate_log(int zone_id, bool& success, uint64_t& last,
|
||||||
std::string& err);
|
std::string& err);
|
||||||
|
|
||||||
/**
|
|
||||||
* Finalizes the Federation Replica Manager
|
|
||||||
*/
|
|
||||||
void finalize()
|
|
||||||
{
|
|
||||||
am.finalize();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Starts the Federation Replica Manager
|
|
||||||
*/
|
|
||||||
int start();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Start the replication threads, and updates the server list of the zone
|
* Start the replication threads, and updates the server list of the zone
|
||||||
*/
|
*/
|
||||||
@ -130,31 +116,16 @@ public:
|
|||||||
*/
|
*/
|
||||||
void delete_zone(int zone_id);
|
void delete_zone(int zone_id);
|
||||||
|
|
||||||
/**
|
|
||||||
* @return the id of fed. replica thread
|
|
||||||
*/
|
|
||||||
pthread_t get_thread_id() const
|
|
||||||
{
|
|
||||||
return frm_thread;
|
|
||||||
};
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
friend void * frm_loop(void *arg);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates federation replica thread objects
|
* Creates federation replica thread objects
|
||||||
*/
|
*/
|
||||||
ReplicaThread * thread_factory(int follower_id);
|
ReplicaThread * thread_factory(int follower_id);
|
||||||
|
|
||||||
/**
|
|
||||||
* Thread id of the main event loop
|
|
||||||
*/
|
|
||||||
pthread_t frm_thread;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Controls access to the zone list and server data
|
* Controls access to the zone list and server data
|
||||||
*/
|
*/
|
||||||
pthread_mutex_t mutex;
|
std::mutex fed_mutex;
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
// Synchronization variables
|
// Synchronization variables
|
||||||
@ -185,16 +156,6 @@ private:
|
|||||||
|
|
||||||
LogDB * logdb;
|
LogDB * logdb;
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
|
||||||
// Action Listener interface
|
|
||||||
// -------------------------------------------------------------------------
|
|
||||||
ActionManager am;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Termination function
|
|
||||||
*/
|
|
||||||
void finalize_action(const ActionRequest& ar);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the nerxt record to replicate in a zone
|
* Get the nerxt record to replicate in a zone
|
||||||
* @param zone_id of the zone
|
* @param zone_id of the zone
|
||||||
|
@ -19,16 +19,10 @@
|
|||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "ActionManager.h"
|
|
||||||
#include "Attribute.h"
|
#include "Attribute.h"
|
||||||
|
|
||||||
class SqlDB;
|
class SqlDB;
|
||||||
|
|
||||||
/**
|
|
||||||
* Thread loop (timer action to purge the log)
|
|
||||||
*/
|
|
||||||
extern "C" void * hlog_action_loop(void *arg);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This class represents the execution log of Hooks. It writes/reads execution
|
* This class represents the execution log of Hooks. It writes/reads execution
|
||||||
* records in the DB.
|
* records in the DB.
|
||||||
|
@ -19,63 +19,23 @@
|
|||||||
|
|
||||||
#include "ProtocolMessages.h"
|
#include "ProtocolMessages.h"
|
||||||
#include "DriverManager.h"
|
#include "DriverManager.h"
|
||||||
#include "ActionManager.h"
|
#include "Listener.h"
|
||||||
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
class HMAction : public ActionRequest
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
enum Actions
|
|
||||||
{
|
|
||||||
SEND_EVENT, /**< Send event to hook manager driver*/
|
|
||||||
RETRY /**< Send RETRY action to hook manager driver*/
|
|
||||||
};
|
|
||||||
|
|
||||||
HMAction(Actions a, const std::string& m):ActionRequest(ActionRequest::USER),
|
|
||||||
_action(a), _message(m){};
|
|
||||||
|
|
||||||
HMAction(const HMAction& o):ActionRequest(o._type), _action(o._action),
|
|
||||||
_message(o._message){};
|
|
||||||
|
|
||||||
Actions action() const
|
|
||||||
{
|
|
||||||
return _action;
|
|
||||||
}
|
|
||||||
|
|
||||||
const std::string& message() const
|
|
||||||
{
|
|
||||||
return _message;
|
|
||||||
}
|
|
||||||
|
|
||||||
ActionRequest * clone() const
|
|
||||||
{
|
|
||||||
return new HMAction(*this);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
Actions _action;
|
|
||||||
|
|
||||||
std::string _message;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
extern "C" void * hm_action_loop(void *arg);
|
|
||||||
|
|
||||||
class HookManager :
|
class HookManager :
|
||||||
public DriverManager<Driver<hook_msg_t>>,
|
public DriverManager<Driver<hook_msg_t>>,
|
||||||
public ActionListener
|
public Listener
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
HookManager(const std::string& mad_location): DriverManager(mad_location)
|
HookManager(const std::string& mad_location)
|
||||||
|
: DriverManager(mad_location)
|
||||||
|
, Listener("Hook Manager")
|
||||||
{
|
{
|
||||||
am.addListener(this);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual ~HookManager() = default;
|
virtual ~HookManager() = default;
|
||||||
@ -88,41 +48,12 @@ public:
|
|||||||
*/
|
*/
|
||||||
int start();
|
int start();
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the HookManager thread identification.
|
|
||||||
* @return pthread_t for the manager thread (that in the action loop).
|
|
||||||
*/
|
|
||||||
pthread_t get_thread_id() const
|
|
||||||
{
|
|
||||||
return hm_thread;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Loads Hook Manager Mads defined in configuration file
|
* Loads Hook Manager Mads defined in configuration file
|
||||||
* @param _mads configuration of drivers
|
* @param _mads configuration of drivers
|
||||||
*/
|
*/
|
||||||
int load_drivers(const std::vector<const VectorAttribute*>& _mads);
|
int load_drivers(const std::vector<const VectorAttribute*>& _mads);
|
||||||
|
|
||||||
/**
|
|
||||||
* Triggers specific actions to the Hook Manager.
|
|
||||||
* @param action the HM action
|
|
||||||
* @param message to send to the driver
|
|
||||||
*/
|
|
||||||
void trigger(HMAction::Actions action, const std::string& message)
|
|
||||||
{
|
|
||||||
HMAction hm_ar(action, message);
|
|
||||||
|
|
||||||
am.trigger(hm_ar);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Terminates the hook manager thread listener
|
|
||||||
*/
|
|
||||||
void finalize()
|
|
||||||
{
|
|
||||||
am.finalize();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a pointer to a Information Manager MAD. The driver is
|
* Returns a pointer to a Information Manager MAD. The driver is
|
||||||
* searched by its name and owned by oneadmin with uid=0.
|
* searched by its name and owned by oneadmin with uid=0.
|
||||||
@ -139,40 +70,24 @@ public:
|
|||||||
const std::string& remote_host,
|
const std::string& remote_host,
|
||||||
int hook_id);
|
int hook_id);
|
||||||
|
|
||||||
private:
|
|
||||||
/**
|
|
||||||
* Function to execute the Manager action loop method within a new pthread
|
|
||||||
* (requires C linkage)
|
|
||||||
*/
|
|
||||||
friend void * hm_action_loop(void *arg);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generic name for the Hook driver
|
* Generic name for the Hook driver
|
||||||
*/
|
*/
|
||||||
static const char * hook_driver_name;
|
static const char * hook_driver_name;
|
||||||
|
|
||||||
/**
|
|
||||||
* Thread id for the HookManager
|
|
||||||
*/
|
|
||||||
pthread_t hm_thread;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Action engine for the Manager
|
|
||||||
*/
|
|
||||||
ActionManager am;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Send event message to the driver
|
* Send event message to the driver
|
||||||
* @param message to pass to the driver
|
* @param message to pass to the driver
|
||||||
*/
|
*/
|
||||||
void send_event_action(const std::string& message);
|
void trigger_send_event(const std::string& message);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Send retry message to the driver
|
* Send retry message to the driver
|
||||||
* @param message to pass to the driver
|
* @param message to pass to the driver
|
||||||
*/
|
*/
|
||||||
void retry_action(const std::string& message);
|
void trigger_retry(const std::string& message);
|
||||||
|
|
||||||
|
private:
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
// Protocol implementation, procesing messages from driver
|
// Protocol implementation, procesing messages from driver
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
@ -201,14 +116,10 @@ private:
|
|||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
static const int drivers_timeout = 10;
|
static const int drivers_timeout = 10;
|
||||||
|
|
||||||
void finalize_action(const ActionRequest& ar)
|
void finalize_action() override
|
||||||
{
|
{
|
||||||
NebulaLog::log("HKM",Log::INFO,"Stopping Hook Manager...");
|
|
||||||
|
|
||||||
DriverManager::stop(drivers_timeout);
|
DriverManager::stop(drivers_timeout);
|
||||||
};
|
};
|
||||||
|
|
||||||
void user_action(const ActionRequest& ar);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /*HOOK_MANAGER_H*/
|
#endif /*HOOK_MANAGER_H*/
|
||||||
|
@ -21,7 +21,7 @@
|
|||||||
|
|
||||||
#include "ProtocolMessages.h"
|
#include "ProtocolMessages.h"
|
||||||
#include "DriverManager.h"
|
#include "DriverManager.h"
|
||||||
#include "ActionManager.h"
|
#include "Listener.h"
|
||||||
#include "NebulaLog.h"
|
#include "NebulaLog.h"
|
||||||
|
|
||||||
//Forward definitions
|
//Forward definitions
|
||||||
@ -31,77 +31,21 @@ class VectorAttribute;
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
class IPMAction : public ActionRequest
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
enum Actions
|
|
||||||
{
|
|
||||||
REGISTER_ADDRESS_RANGE, /**< Register/Request a new IP network */
|
|
||||||
UNREGISTER_ADDRESS_RANGE, /**< Unregister IP network */
|
|
||||||
ALLOCATE_ADDRESS, /**< Request a specific IP (or range) */
|
|
||||||
GET_ADDRESS, /**< Request any free IP (or range) */
|
|
||||||
FREE_ADDRESS /**< Frees a previously requested IP */
|
|
||||||
};
|
|
||||||
|
|
||||||
IPMAction(Actions a, IPAMRequest *r):ActionRequest(ActionRequest::USER),
|
|
||||||
_action(a), _request(r){};
|
|
||||||
|
|
||||||
IPMAction(const IPMAction& o):ActionRequest(o._type), _action(o._action),
|
|
||||||
_request(o._request){};
|
|
||||||
|
|
||||||
Actions action() const
|
|
||||||
{
|
|
||||||
return _action;
|
|
||||||
}
|
|
||||||
|
|
||||||
IPAMRequest * request() const
|
|
||||||
{
|
|
||||||
return _request;
|
|
||||||
}
|
|
||||||
|
|
||||||
ActionRequest * clone() const
|
|
||||||
{
|
|
||||||
return new IPMAction(*this);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
Actions _action;
|
|
||||||
|
|
||||||
IPAMRequest * _request;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
extern "C" void * ipamm_action_loop(void *arg);
|
|
||||||
|
|
||||||
class IPAMManager :
|
class IPAMManager :
|
||||||
public DriverManager<Driver<ipam_msg_t>>,
|
public DriverManager<Driver<ipam_msg_t>>,
|
||||||
public ActionListener
|
public Listener
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
IPAMManager(time_t timer, const std::string mad_location):
|
IPAMManager(time_t timer, const std::string mad_location)
|
||||||
DriverManager(mad_location), timer_period(timer)
|
: DriverManager(mad_location)
|
||||||
|
, Listener("IPAM Manager")
|
||||||
|
, timer_thread(timer, [this](){timer_action();})
|
||||||
{
|
{
|
||||||
am.addListener(this);
|
|
||||||
};
|
|
||||||
|
|
||||||
~IPAMManager(){};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Triggers specific action to the IPAM Manager. This function
|
|
||||||
* wraps the ActionManager trigger function.
|
|
||||||
* @param action to the IPAM Manager action
|
|
||||||
* @param request an IPAM request
|
|
||||||
*/
|
|
||||||
void trigger(IPMAction::Actions action, IPAMRequest* request)
|
|
||||||
{
|
|
||||||
IPMAction ipam_ar(action, request);
|
|
||||||
|
|
||||||
am.trigger(ipam_ar);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
~IPAMManager() = default;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This functions starts the associated listener thread, and creates a
|
* This functions starts the associated listener thread, and creates a
|
||||||
* new thread for the IPAMManager. This thread will wait in
|
* new thread for the IPAMManager. This thread will wait in
|
||||||
@ -117,37 +61,35 @@ public:
|
|||||||
int load_drivers(const std::vector<const VectorAttribute*>& _mads);
|
int load_drivers(const std::vector<const VectorAttribute*>& _mads);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the thread identification.
|
* Register (or requests) a new address range to the IPAM.
|
||||||
* @return pthread_t for the manager thread (that in the action loop).
|
|
||||||
*/
|
*/
|
||||||
pthread_t get_thread_id() const
|
void trigger_register_address_range(IPAMRequest& ir);
|
||||||
{
|
|
||||||
return ipamm_thread;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Finalizes the IPAM Manager
|
* Unregisters an address range.
|
||||||
*/
|
*/
|
||||||
void finalize()
|
void trigger_unregister_address_range(IPAMRequest& ir);
|
||||||
{
|
|
||||||
am.finalize();
|
/**
|
||||||
};
|
* Requests the IPAM a free address (or range)
|
||||||
|
*/
|
||||||
|
void trigger_get_address(IPAMRequest& ir);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Requests to set an address (or range) as used
|
||||||
|
*/
|
||||||
|
void trigger_allocate_address(IPAMRequest& ir);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Free an address in the IPAM
|
||||||
|
*/
|
||||||
|
void trigger_free_address(IPAMRequest& ir);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/**
|
/**
|
||||||
* Thread id for the IPAM Manager
|
* Timer action async execution
|
||||||
*/
|
*/
|
||||||
pthread_t ipamm_thread;
|
Timer timer_thread;
|
||||||
|
|
||||||
/**
|
|
||||||
* Action engine for the Manager
|
|
||||||
*/
|
|
||||||
ActionManager am;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Timer for the Manager (periocally triggers timer action)
|
|
||||||
*/
|
|
||||||
time_t timer_period;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generic name for the IPAM driver
|
* Generic name for the IPAM driver
|
||||||
@ -164,45 +106,14 @@ private:
|
|||||||
const Driver<ipam_msg_t> * get() const
|
const Driver<ipam_msg_t> * get() const
|
||||||
{
|
{
|
||||||
return DriverManager::get_driver(ipam_driver_name);
|
return DriverManager::get_driver(ipam_driver_name);
|
||||||
};
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Register (or requests) a new address range to the IPAM.
|
|
||||||
*/
|
|
||||||
void register_address_range_action(IPAMRequest * ir);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Unregisters an address range.
|
|
||||||
*/
|
|
||||||
void unregister_address_range_action(IPAMRequest * ir);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Requests the IPAM a free address (or range)
|
|
||||||
*/
|
|
||||||
void get_address_action(IPAMRequest * ir);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Requests to set an address (or range) as used
|
|
||||||
*/
|
|
||||||
void allocate_address_action(IPAMRequest * ir);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Free an address in the IPAM
|
|
||||||
*/
|
|
||||||
void free_address_action(IPAMRequest * ir);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function initializes a request to call the IPAM driver
|
* This function initializes a request to call the IPAM driver
|
||||||
* @param ir the IPAM request
|
* @param ir the IPAM request
|
||||||
* @return pointer to the IPAM driver to use, 0 on failure
|
* @return pointer to the IPAM driver to use, 0 on failure
|
||||||
*/
|
*/
|
||||||
void send_request(IPAMManagerMessages type, IPAMRequest * ir);
|
void send_request(IPAMManagerMessages type, IPAMRequest& ir);
|
||||||
|
|
||||||
/**
|
|
||||||
* Function to execute the Manager action loop method within a new pthread
|
|
||||||
* (requires C linkage)
|
|
||||||
*/
|
|
||||||
friend void * ipamm_action_loop(void *arg);
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
// Protocol implementation, procesing messages from driver
|
// Protocol implementation, procesing messages from driver
|
||||||
@ -225,21 +136,17 @@ private:
|
|||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
// Action Listener interface
|
// Action Listener interface
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
void timer_action(const ActionRequest& ar)
|
void timer_action()
|
||||||
{
|
{
|
||||||
check_time_outs_action();
|
check_time_outs_action();
|
||||||
};
|
}
|
||||||
|
|
||||||
static const int drivers_timeout = 10;
|
static const int drivers_timeout = 10;
|
||||||
|
|
||||||
void finalize_action(const ActionRequest& ar)
|
void finalize_action() override
|
||||||
{
|
{
|
||||||
NebulaLog::log("IPM",Log::INFO,"Stopping IPAM Manager...");
|
|
||||||
|
|
||||||
DriverManager::stop(drivers_timeout);
|
DriverManager::stop(drivers_timeout);
|
||||||
};
|
}
|
||||||
|
|
||||||
void user_action(const ActionRequest& ar);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /*IPAM_MANAGER_H*/
|
#endif /*IPAM_MANAGER_H*/
|
||||||
|
@ -19,10 +19,7 @@
|
|||||||
|
|
||||||
#include "DriverManager.h"
|
#include "DriverManager.h"
|
||||||
#include "ProtocolMessages.h"
|
#include "ProtocolMessages.h"
|
||||||
#include "ActionManager.h"
|
#include "Listener.h"
|
||||||
#include "NebulaLog.h"
|
|
||||||
|
|
||||||
extern "C" void * image_action_loop(void *arg);
|
|
||||||
|
|
||||||
class DatastorePool;
|
class DatastorePool;
|
||||||
class Image;
|
class Image;
|
||||||
@ -31,9 +28,7 @@ class Snapshots;
|
|||||||
class Template;
|
class Template;
|
||||||
|
|
||||||
|
|
||||||
class ImageManager :
|
class ImageManager : public DriverManager<Driver<image_msg_t>>
|
||||||
public DriverManager<Driver<image_msg_t>>,
|
|
||||||
public ActionListener
|
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
@ -44,16 +39,16 @@ public:
|
|||||||
const std::string& _mads_location,
|
const std::string& _mads_location,
|
||||||
int _monitor_vm_disk):
|
int _monitor_vm_disk):
|
||||||
DriverManager(_mads_location),
|
DriverManager(_mads_location),
|
||||||
timer_period(_timer_period),
|
timer_thread(_timer_period, [this](){timer_action();}),
|
||||||
|
timer_period(_monitor_period),
|
||||||
monitor_period(_monitor_period),
|
monitor_period(_monitor_period),
|
||||||
monitor_vm_disk(_monitor_vm_disk),
|
monitor_vm_disk(_monitor_vm_disk),
|
||||||
ipool(_ipool),
|
ipool(_ipool),
|
||||||
dspool(_dspool)
|
dspool(_dspool)
|
||||||
{
|
{
|
||||||
am.addListener(this);
|
}
|
||||||
};
|
|
||||||
|
|
||||||
~ImageManager(){};
|
~ImageManager() = default;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This functions starts the associated listener thread, and creates a
|
* This functions starts the associated listener thread, and creates a
|
||||||
@ -63,29 +58,19 @@ public:
|
|||||||
*/
|
*/
|
||||||
int start();
|
int start();
|
||||||
|
|
||||||
|
void finalize()
|
||||||
|
{
|
||||||
|
timer_thread.stop();
|
||||||
|
|
||||||
|
stop(drivers_timeout);
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Loads the Image Driver defined in configuration file
|
* Loads the Image Driver defined in configuration file
|
||||||
* @param _mads configuration of drivers
|
* @param _mads configuration of drivers
|
||||||
*/
|
*/
|
||||||
int load_drivers(const std::vector<const VectorAttribute*>& _mads);
|
int load_drivers(const std::vector<const VectorAttribute*>& _mads);
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the thread identification.
|
|
||||||
* @return pthread_t for the manager thread (that in the action loop).
|
|
||||||
*/
|
|
||||||
pthread_t get_thread_id() const
|
|
||||||
{
|
|
||||||
return imagem_thread;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Finalizes the Image Manager
|
|
||||||
*/
|
|
||||||
void finalize()
|
|
||||||
{
|
|
||||||
am.finalize();
|
|
||||||
};
|
|
||||||
|
|
||||||
/**************************************************************************/
|
/**************************************************************************/
|
||||||
/* Image Manager Actions */
|
/* Image Manager Actions */
|
||||||
/* Operates in a semi-sinchronous mode. Operations will be granted or not */
|
/* Operates in a semi-sinchronous mode. Operations will be granted or not */
|
||||||
@ -315,9 +300,9 @@ private:
|
|||||||
static const char * image_driver_name;
|
static const char * image_driver_name;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Thread id for the Image Manager
|
* Timer action async execution
|
||||||
*/
|
*/
|
||||||
pthread_t imagem_thread;
|
Timer timer_thread;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Timer period for the Image Manager.
|
* Timer period for the Image Manager.
|
||||||
@ -346,9 +331,9 @@ private:
|
|||||||
DatastorePool * dspool;
|
DatastorePool * dspool;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Action engine for the Manager
|
*
|
||||||
*/
|
*/
|
||||||
ActionManager am;
|
static const int drivers_timeout = 10;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a pointer to the Image Manager Driver used for the Repository
|
* Returns a pointer to the Image Manager Driver used for the Repository
|
||||||
@ -359,12 +344,6 @@ private:
|
|||||||
return DriverManager::get_driver(image_driver_name);
|
return DriverManager::get_driver(image_driver_name);
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* Function to execute the Manager action loop method within a new pthread
|
|
||||||
* (requires C linkage)
|
|
||||||
*/
|
|
||||||
friend void * image_action_loop(void *arg);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The action function executed when an action is triggered.
|
* The action function executed when an action is triggered.
|
||||||
* @param action the name of the action
|
* @param action the name of the action
|
||||||
@ -457,21 +436,12 @@ private:
|
|||||||
*/
|
*/
|
||||||
static void _log(std::unique_ptr<image_msg_t> msg);
|
static void _log(std::unique_ptr<image_msg_t> msg);
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
|
||||||
// Action Listener interface
|
|
||||||
// -------------------------------------------------------------------------
|
|
||||||
/**
|
/**
|
||||||
* This function is executed periodically to monitor Datastores.
|
* This function is executed periodically to monitor Datastores.
|
||||||
*/
|
*/
|
||||||
void timer_action(const ActionRequest& ar);
|
void timer_action();
|
||||||
|
|
||||||
static const int drivers_timeout = 10;
|
|
||||||
|
|
||||||
void finalize_action(const ActionRequest& ar)
|
|
||||||
{
|
|
||||||
NebulaLog::log("ImM",Log::INFO,"Stopping Image Manager...");
|
|
||||||
stop(drivers_timeout);
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /*IMAGE_MANAGER_H*/
|
#endif /*IMAGE_MANAGER_H*/
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
#define INFORMATION_MANAGER_H_
|
#define INFORMATION_MANAGER_H_
|
||||||
|
|
||||||
#include "DriverManager.h"
|
#include "DriverManager.h"
|
||||||
#include "ActionManager.h"
|
#include "Listener.h"
|
||||||
#include "ProtocolMessages.h"
|
#include "ProtocolMessages.h"
|
||||||
#include "RaftManager.h"
|
#include "RaftManager.h"
|
||||||
|
|
||||||
@ -26,9 +26,7 @@ class HostPool;
|
|||||||
class Host;
|
class Host;
|
||||||
class VirtualMachinePool;
|
class VirtualMachinePool;
|
||||||
|
|
||||||
class InformationManager :
|
class InformationManager : public DriverManager<Driver<im_msg_t>>
|
||||||
public DriverManager<Driver<im_msg_t>>,
|
|
||||||
public ActionListener
|
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
InformationManager(
|
InformationManager(
|
||||||
@ -39,7 +37,6 @@ public:
|
|||||||
, hpool(_hpool)
|
, hpool(_hpool)
|
||||||
, vmpool(_vmpool)
|
, vmpool(_vmpool)
|
||||||
{
|
{
|
||||||
am.addListener(this);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
~InformationManager() = default;
|
~InformationManager() = default;
|
||||||
@ -52,20 +49,9 @@ public:
|
|||||||
*/
|
*/
|
||||||
int start();
|
int start();
|
||||||
|
|
||||||
/**
|
|
||||||
* Join the action loop thread
|
|
||||||
*/
|
|
||||||
void join_thread()
|
|
||||||
{
|
|
||||||
return im_thread.join();
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
void finalize()
|
void finalize()
|
||||||
{
|
{
|
||||||
am.finalize();
|
stop(drivers_timeout);
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -123,11 +109,6 @@ protected:
|
|||||||
void _vm_state(std::unique_ptr<im_msg_t> msg);
|
void _vm_state(std::unique_ptr<im_msg_t> msg);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/**
|
|
||||||
* Thread for the Information Manager
|
|
||||||
*/
|
|
||||||
std::thread im_thread;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to the Host Pool
|
* Pointer to the Host Pool
|
||||||
*/
|
*/
|
||||||
@ -138,26 +119,11 @@ private:
|
|||||||
*/
|
*/
|
||||||
VirtualMachinePool * vmpool;
|
VirtualMachinePool * vmpool;
|
||||||
|
|
||||||
/**
|
|
||||||
* Action engine for the Manager
|
|
||||||
*/
|
|
||||||
ActionManager am;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Default timeout to wait for Information Driver (monitord)
|
* Default timeout to wait for Information Driver (monitord)
|
||||||
*/
|
*/
|
||||||
static const int drivers_timeout = 10;
|
static const int drivers_timeout = 10;
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
|
||||||
// ActioListener Interface
|
|
||||||
// ------------------------------------------------------------------------
|
|
||||||
void finalize_action(const ActionRequest& ar) override
|
|
||||||
{
|
|
||||||
NebulaLog::log("InM",Log::INFO,"Stopping Information Manager...");
|
|
||||||
|
|
||||||
stop(drivers_timeout);
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /*VIRTUAL_MACHINE_MANAGER_H*/
|
#endif /*INFORMATION_MANAGER_H_*/
|
||||||
|
|
||||||
|
@ -17,10 +17,8 @@
|
|||||||
#ifndef LIFE_CYCLE_MANAGER_H_
|
#ifndef LIFE_CYCLE_MANAGER_H_
|
||||||
#define LIFE_CYCLE_MANAGER_H_
|
#define LIFE_CYCLE_MANAGER_H_
|
||||||
|
|
||||||
#include "ActionManager.h"
|
#include "Listener.h"
|
||||||
#include "NebulaLog.h"
|
#include "VMActions.h"
|
||||||
|
|
||||||
extern "C" void * lcm_action_loop(void *arg);
|
|
||||||
|
|
||||||
//Forward definitions
|
//Forward definitions
|
||||||
class TransferManager;
|
class TransferManager;
|
||||||
@ -38,158 +36,21 @@ struct RequestAttributes;
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
class LCMAction : public ActionRequest
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
|
|
||||||
enum Actions
|
|
||||||
{
|
|
||||||
NONE,
|
|
||||||
SAVE_SUCCESS, /**< Sent by the VMM when a save action succeeds */
|
|
||||||
SAVE_FAILURE, /**< Sent by the VMM when a save action fails */
|
|
||||||
DEPLOY_SUCCESS, /**< Sent by the VMM deploy/restore/migrate succeeds*/
|
|
||||||
DEPLOY_FAILURE, /**< Sent by the VMM deploy/restore/migrate fails */
|
|
||||||
SHUTDOWN_SUCCESS, /**< Sent by the VMM when a shutdown action succeeds*/
|
|
||||||
SHUTDOWN_FAILURE, /**< Sent by the VMM when a shutdown action fails */
|
|
||||||
CANCEL_SUCCESS, /**< Sent by the VMM when a cancel action succeeds */
|
|
||||||
CANCEL_FAILURE, /**< Sent by the VMM when a cancel action fails */
|
|
||||||
MONITOR_SUSPEND, /**< Sent by the VMM when a VM is paused in active */
|
|
||||||
MONITOR_DONE, /**< Sent by the VMM when a Host cannot be monitored*/
|
|
||||||
MONITOR_POWEROFF, /**< Sent by the VMM when a VM is not found */
|
|
||||||
MONITOR_POWERON, /**< Sent by the VMM when a VM is found again */
|
|
||||||
PROLOG_SUCCESS, /**< Sent by the TM when the prolog phase succeeds */
|
|
||||||
PROLOG_FAILURE, /**< Sent by the TM when the prolog phase fails */
|
|
||||||
EPILOG_SUCCESS, /**< Sent by the TM when the epilog phase succeeds */
|
|
||||||
EPILOG_FAILURE, /**< Sent by the TM when the epilog phase fails */
|
|
||||||
ATTACH_SUCCESS, /**< Sent by the VMM when an attach action succeeds */
|
|
||||||
ATTACH_FAILURE, /**< Sent by the VMM when an attach action fails */
|
|
||||||
DETACH_SUCCESS, /**< Sent by the VMM when a detach action succeeds */
|
|
||||||
DETACH_FAILURE, /**< Sent by the VMM when a detach action fails */
|
|
||||||
ATTACH_NIC_SUCCESS,/**< Sent by the VMM when attach nic action succeeds*/
|
|
||||||
ATTACH_NIC_FAILURE,/**< Sent by the VMM when attach nic action fails */
|
|
||||||
DETACH_NIC_SUCCESS,/**< Sent by the VMM when detach nic action succeeds*/
|
|
||||||
DETACH_NIC_FAILURE,/**< Sent by the VMM when detach nic action fails */
|
|
||||||
CLEANUP_SUCCESS, /**< Sent by the VMM when a cleanup action succeeds */
|
|
||||||
CLEANUP_FAILURE, /**< Sent by the VMM when a cleanup action fails */
|
|
||||||
SAVEAS_SUCCESS, /**< Sent by the VMM when saveas succeeds */
|
|
||||||
SAVEAS_FAILURE, /**< Sent by the VMM when saveas fails */
|
|
||||||
SNAPSHOT_CREATE_SUCCESS, /**< Sent by the VMM on snap. create success */
|
|
||||||
SNAPSHOT_CREATE_FAILURE, /**< Sent by the VMM on snap. create failure */
|
|
||||||
SNAPSHOT_REVERT_SUCCESS, /**< Sent by the VMM on snap. revert success */
|
|
||||||
SNAPSHOT_REVERT_FAILURE, /**< Sent by the VMM on snap. revert failure */
|
|
||||||
SNAPSHOT_DELETE_SUCCESS, /**< Sent by the VMM on snap. revert success */
|
|
||||||
SNAPSHOT_DELETE_FAILURE, /**< Sent by the VMM on snap. revert failure */
|
|
||||||
DISK_SNAPSHOT_SUCCESS, /**< Sent by TM when a snap. succeeds */
|
|
||||||
DISK_SNAPSHOT_FAILURE, /**< Sent by TM when a snap. fails */
|
|
||||||
DEPLOY, /**< Sent by the DM to deploy a VM on a host */
|
|
||||||
SUSPEND, /**< Sent by the DM to suspend an running VM */
|
|
||||||
RESTORE, /**< Sent by the DM to restore a suspended VM */
|
|
||||||
STOP, /**< Sent by the DM to stop an running VM */
|
|
||||||
CANCEL, /**< Sent by the DM to cancel an running VM */
|
|
||||||
MIGRATE, /**< Sent by the DM to migrate a VM to other host */
|
|
||||||
LIVE_MIGRATE, /**< Sent by the DM to live-migrate a VM */
|
|
||||||
POFF_MIGRATE, /**< Sent by the DM to migrate a VM in a poff cycle */
|
|
||||||
POFF_HARD_MIGRATE,/**< Sent by the DM to migrate a VM in a poff hard cycle */
|
|
||||||
SHUTDOWN, /**< Sent by the DM to shutdown a running VM */
|
|
||||||
UNDEPLOY, /**< Sent by the DM to undeploy a running VM */
|
|
||||||
UNDEPLOY_HARD, /**< Sent by the DM to force undeploy a running VM */
|
|
||||||
POWEROFF, /**< Sent by the DM to power off a running VM */
|
|
||||||
POWEROFF_HARD, /**< Sent by the DM to power off hard a running VM */
|
|
||||||
RESTART, /**< Sent by the DM to restart a deployed VM */
|
|
||||||
DELETE, /**< Sent by the DM to delete a VM */
|
|
||||||
DELETE_RECREATE, /**< Sent by the DM to cleanup a VM for resubmission*/
|
|
||||||
UPDATESG, /**< Sent by RM/VMM to trigger the secgroup update*/
|
|
||||||
DISK_LOCK_SUCCESS, /**< Sent by IM, image moves from locked to ready */
|
|
||||||
DISK_LOCK_FAILURE, /**< Sent by IM, image moves from locked to error */
|
|
||||||
DISK_RESIZE_SUCCESS,/**< Sent by TM/VMM when a disk resize succeeds */
|
|
||||||
DISK_RESIZE_FAILURE,/**< Sent by TM/VMM when a disk resize fails */
|
|
||||||
UPDATE_CONF_SUCCESS,/**< Sent by TM/VMM when a update conf succeeds */
|
|
||||||
UPDATE_CONF_FAILURE /**< Sent by TM/VMM when a update conf fails */
|
|
||||||
};
|
|
||||||
|
|
||||||
LCMAction(Actions a, int v, int u, int g, int r):
|
|
||||||
ActionRequest(ActionRequest::USER), _action(a), _vm_id(v), _uid(u),
|
|
||||||
_gid(g), _req_id(r){}
|
|
||||||
|
|
||||||
LCMAction(const LCMAction& o):ActionRequest(o._type), _action(o._action),
|
|
||||||
_vm_id(o._vm_id), _uid(o._uid), _gid(o._gid), _req_id(o._req_id){}
|
|
||||||
|
|
||||||
Actions action() const
|
|
||||||
{
|
|
||||||
return _action;
|
|
||||||
}
|
|
||||||
|
|
||||||
int vm_id() const
|
|
||||||
{
|
|
||||||
return _vm_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
int uid() const
|
|
||||||
{
|
|
||||||
return _uid;
|
|
||||||
}
|
|
||||||
|
|
||||||
int gid() const
|
|
||||||
{
|
|
||||||
return _gid;
|
|
||||||
}
|
|
||||||
|
|
||||||
int req_id() const
|
|
||||||
{
|
|
||||||
return _req_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
ActionRequest * clone() const
|
|
||||||
{
|
|
||||||
return new LCMAction(*this);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
Actions _action;
|
|
||||||
|
|
||||||
int _vm_id;
|
|
||||||
|
|
||||||
int _uid;
|
|
||||||
int _gid;
|
|
||||||
|
|
||||||
int _req_id;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The Virtual Machine Life-cycle Manager module. This class is responsible for
|
* The Virtual Machine Life-cycle Manager module. This class is responsible for
|
||||||
* managing the life-cycle of a Virtual Machine.
|
* managing the life-cycle of a Virtual Machine.
|
||||||
*/
|
*/
|
||||||
class LifeCycleManager : public ActionListener
|
class LifeCycleManager : public Listener
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
LifeCycleManager():
|
LifeCycleManager()
|
||||||
vmpool(0), hpool(0), ipool(0), sgpool(0), clpool(0), tm(0), vmm(0),
|
: Listener("Life Cycle Manager")
|
||||||
dm(0), imagem(0)
|
|
||||||
{
|
{
|
||||||
am.addListener(this);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
~LifeCycleManager() = default;
|
~LifeCycleManager() = default;
|
||||||
|
|
||||||
/**
|
|
||||||
* Triggers specific actions to the Life-cycle Manager. This function
|
|
||||||
* wraps the ActionManager trigger function.
|
|
||||||
* @param action the LCM action
|
|
||||||
* @param vid VM unique id. This is the argument of the passed to the
|
|
||||||
* invoked action.
|
|
||||||
* @param r RM request attributes to copy to the action request: uid,
|
|
||||||
* gid and request_id.
|
|
||||||
*/
|
|
||||||
void trigger(LCMAction::Actions action, int id, const RequestAttributes& r);
|
|
||||||
|
|
||||||
void trigger(LCMAction::Actions action, int id);
|
|
||||||
|
|
||||||
void finalize()
|
|
||||||
{
|
|
||||||
am.finalize();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This functions starts a new thread for the Life-cycle Manager. This
|
* This functions starts a new thread for the Life-cycle Manager. This
|
||||||
* thread will wait in an action loop till it receives ACTION_FINALIZE.
|
* thread will wait in an action loop till it receives ACTION_FINALIZE.
|
||||||
@ -203,15 +64,6 @@ public:
|
|||||||
*/
|
*/
|
||||||
void init_managers();
|
void init_managers();
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the thread identification.
|
|
||||||
* @return pthread_t for the manager thread (that in the action loop).
|
|
||||||
*/
|
|
||||||
pthread_t get_thread_id() const
|
|
||||||
{
|
|
||||||
return lcm_thread;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Recovers a VM by self-triggering the associated lost transition.
|
* Recovers a VM by self-triggering the associated lost transition.
|
||||||
* @param vm to be recovered
|
* @param vm to be recovered
|
||||||
@ -225,77 +77,157 @@ public:
|
|||||||
*/
|
*/
|
||||||
void retry(VirtualMachine * vm);
|
void retry(VirtualMachine * vm);
|
||||||
|
|
||||||
private:
|
// -------------------------------------------------------------------------
|
||||||
/**
|
// Internal Actions, triggered by OpenNebula components & drivers
|
||||||
* Thread id for the Virtual Machine Manager
|
// -------------------------------------------------------------------------
|
||||||
*/
|
void start_prolog_migrate(VirtualMachine* vm);
|
||||||
pthread_t lcm_thread;
|
|
||||||
|
|
||||||
|
void revert_migrate_after_failure(VirtualMachine* vm);
|
||||||
|
|
||||||
|
void trigger_save_success(int vid);
|
||||||
|
void trigger_save_failure(int vid);
|
||||||
|
|
||||||
|
void trigger_deploy_success(int vid);
|
||||||
|
void trigger_deploy_failure(int vid);
|
||||||
|
|
||||||
|
void trigger_shutdown_success(int vid);
|
||||||
|
void trigger_shutdown_failure(int vid);
|
||||||
|
|
||||||
|
void trigger_monitor_suspend(int vid);
|
||||||
|
void trigger_monitor_done(int vid);
|
||||||
|
void trigger_monitor_poweroff(int vid);
|
||||||
|
void trigger_monitor_poweron(int vid);
|
||||||
|
|
||||||
|
void trigger_prolog_success(int vid);
|
||||||
|
void trigger_prolog_failure(int vid);
|
||||||
|
|
||||||
|
void trigger_epilog_success(int vid);
|
||||||
|
void trigger_epilog_failure(int vid);
|
||||||
|
|
||||||
|
void trigger_attach_success(int vid);
|
||||||
|
void trigger_attach_failure(int vid);
|
||||||
|
|
||||||
|
void trigger_detach_success(int vid);
|
||||||
|
void trigger_detach_failure(int vid);
|
||||||
|
|
||||||
|
void trigger_saveas_success(int vid);
|
||||||
|
void trigger_saveas_failure(int vid);
|
||||||
|
|
||||||
|
void trigger_attach_nic_success(int vid);
|
||||||
|
void trigger_attach_nic_failure(int vid);
|
||||||
|
|
||||||
|
void trigger_detach_nic_success(int vid);
|
||||||
|
void trigger_detach_nic_failure(int vid);
|
||||||
|
|
||||||
|
void trigger_cleanup_callback(int vid);
|
||||||
|
|
||||||
|
void trigger_snapshot_create_success(int vid);
|
||||||
|
void trigger_snapshot_create_failure(int vid);
|
||||||
|
|
||||||
|
void trigger_snapshot_revert_success(int vid);
|
||||||
|
void trigger_snapshot_revert_failure(int vid);
|
||||||
|
|
||||||
|
void trigger_snapshot_delete_success(int vid);
|
||||||
|
void trigger_snapshot_delete_failure(int vid);
|
||||||
|
|
||||||
|
void trigger_disk_snapshot_success(int vid);
|
||||||
|
void trigger_disk_snapshot_failure(int vid);
|
||||||
|
|
||||||
|
void trigger_disk_lock_success(int vid);
|
||||||
|
void trigger_disk_lock_failure(int vid);
|
||||||
|
|
||||||
|
void trigger_disk_resize_success(int vid);
|
||||||
|
void trigger_disk_resize_failure(int vid);
|
||||||
|
|
||||||
|
void trigger_update_conf_success(int vid);
|
||||||
|
void trigger_update_conf_failure(int vid);
|
||||||
|
|
||||||
|
// -------------------------------------------------------------------------
|
||||||
|
// External Actions, triggered by user requests
|
||||||
|
// -------------------------------------------------------------------------
|
||||||
|
void trigger_deploy(int vid);
|
||||||
|
void trigger_suspend(int vid, const RequestAttributes& ra);
|
||||||
|
void trigger_restore(int vid, const RequestAttributes& ra);
|
||||||
|
void trigger_stop(int vid, const RequestAttributes& ra);
|
||||||
|
void trigger_checkpoint(int vid);
|
||||||
|
void trigger_migrate(int vid, const RequestAttributes& ra,
|
||||||
|
VMActions::Action vm_action);
|
||||||
|
void trigger_migrate(int vid, const RequestAttributes& ra)
|
||||||
|
{
|
||||||
|
trigger_migrate(vid, ra, VMActions::MIGRATE_ACTION);
|
||||||
|
}
|
||||||
|
void trigger_migrate_poweroff(int vid, const RequestAttributes& ra)
|
||||||
|
{
|
||||||
|
trigger_migrate(vid, ra, VMActions::POFF_MIGRATE_ACTION);
|
||||||
|
}
|
||||||
|
void trigger_migrate_poweroff_hard(int vid, const RequestAttributes& ra)
|
||||||
|
{
|
||||||
|
trigger_migrate(vid, ra, VMActions::POFF_HARD_MIGRATE_ACTION);
|
||||||
|
}
|
||||||
|
void trigger_live_migrate(int vid, const RequestAttributes& ra);
|
||||||
|
void trigger_shutdown(int vid, bool hard, const RequestAttributes& ra);
|
||||||
|
void trigger_undeploy(int vid, bool hard, const RequestAttributes& ra);
|
||||||
|
void trigger_undeploy(int vid, const RequestAttributes& ra)
|
||||||
|
{
|
||||||
|
trigger_undeploy(vid, false, ra);
|
||||||
|
}
|
||||||
|
void trigger_undeploy_hard(int vid, const RequestAttributes& ra)
|
||||||
|
{
|
||||||
|
trigger_undeploy(vid, true, ra);
|
||||||
|
}
|
||||||
|
void trigger_poweroff(int vid, const RequestAttributes& ra);
|
||||||
|
void trigger_poweroff_hard(int vid, const RequestAttributes& ra);
|
||||||
|
void trigger_poweroff(int vid, bool hard, const RequestAttributes& ra);
|
||||||
|
void trigger_updatesg(int vid);
|
||||||
|
void trigger_restart(int vid, const RequestAttributes& ra);
|
||||||
|
void trigger_delete(int vid, const RequestAttributes& ra);
|
||||||
|
void trigger_delete_recreate(int vid, const RequestAttributes& ra);
|
||||||
|
|
||||||
|
private:
|
||||||
/**
|
/**
|
||||||
* Pointer to the Virtual Machine Pool, to access VMs
|
* Pointer to the Virtual Machine Pool, to access VMs
|
||||||
*/
|
*/
|
||||||
VirtualMachinePool * vmpool;
|
VirtualMachinePool * vmpool = nullptr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to the Host Pool, to access hosts
|
* Pointer to the Host Pool, to access hosts
|
||||||
*/
|
*/
|
||||||
HostPool * hpool;
|
HostPool * hpool = nullptr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to the Image Pool, to access images
|
* Pointer to the Image Pool, to access images
|
||||||
*/
|
*/
|
||||||
ImagePool * ipool;
|
ImagePool * ipool = nullptr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to the SecurityGroup Pool
|
* Pointer to the SecurityGroup Pool
|
||||||
*/
|
*/
|
||||||
SecurityGroupPool * sgpool;
|
SecurityGroupPool * sgpool = nullptr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to the Cluster Pool
|
* Pointer to the Cluster Pool
|
||||||
*/
|
*/
|
||||||
ClusterPool * clpool;
|
ClusterPool * clpool = nullptr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to TransferManager
|
* Pointer to TransferManager
|
||||||
*/
|
*/
|
||||||
TransferManager * tm;
|
TransferManager * tm = nullptr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to VirtualMachineManager
|
* Pointer to VirtualMachineManager
|
||||||
*/
|
*/
|
||||||
VirtualMachineManager * vmm;
|
VirtualMachineManager * vmm = nullptr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to DispatchManager
|
* Pointer to DispatchManager
|
||||||
*/
|
*/
|
||||||
DispatchManager * dm;
|
DispatchManager * dm = nullptr;
|
||||||
|
|
||||||
/**
|
|
||||||
* Action engine for the Manager
|
|
||||||
*/
|
|
||||||
ActionManager am;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to ImageManager
|
* Pointer to ImageManager
|
||||||
*/
|
*/
|
||||||
ImageManager * imagem;
|
ImageManager * imagem = nullptr;
|
||||||
|
|
||||||
/**
|
|
||||||
* Function to execute the Manager action loop method within a new pthread
|
|
||||||
* (requires C linkage)
|
|
||||||
*/
|
|
||||||
friend void * lcm_action_loop(void *arg);
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
|
||||||
// Action Listener interface
|
|
||||||
// -------------------------------------------------------------------------
|
|
||||||
void finalize_action(const ActionRequest& ar)
|
|
||||||
{
|
|
||||||
NebulaLog::log("LCM",Log::INFO,"Stopping Life-cycle Manager...");
|
|
||||||
};
|
|
||||||
|
|
||||||
void user_action(const ActionRequest& ar);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Cleans up a VM, canceling any pending or ongoing action and closing
|
* Cleans up a VM, canceling any pending or ongoing action and closing
|
||||||
@ -307,107 +239,7 @@ private:
|
|||||||
* image may need to be set to error state.
|
* image may need to be set to error state.
|
||||||
*/
|
*/
|
||||||
void clean_up_vm(VirtualMachine *vm, bool dispose, int& image_id,
|
void clean_up_vm(VirtualMachine *vm, bool dispose, int& image_id,
|
||||||
const LCMAction& la);
|
int uid, int gid, int req_id);
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
|
||||||
// Internal Actions, triggered by OpenNebula components & drivers
|
|
||||||
// -------------------------------------------------------------------------
|
|
||||||
void start_prolog_migrate(VirtualMachine* vm);
|
|
||||||
|
|
||||||
void revert_migrate_after_failure(VirtualMachine* vm);
|
|
||||||
|
|
||||||
void save_success_action(int vid);
|
|
||||||
void save_failure_action(int vid);
|
|
||||||
|
|
||||||
void deploy_success_action(int vid);
|
|
||||||
void deploy_failure_action(int vid);
|
|
||||||
|
|
||||||
void shutdown_success_action(int vid);
|
|
||||||
void shutdown_failure_action(int vid);
|
|
||||||
|
|
||||||
void monitor_suspend_action(int vid);
|
|
||||||
void monitor_done_action(int vid);
|
|
||||||
void monitor_poweroff_action(int vid);
|
|
||||||
void monitor_poweron_action(int vid);
|
|
||||||
|
|
||||||
void prolog_success_action(int vid);
|
|
||||||
void prolog_failure_action(int vid);
|
|
||||||
|
|
||||||
void epilog_success_action(int vid);
|
|
||||||
void epilog_failure_action(int vid);
|
|
||||||
|
|
||||||
void attach_success_action(int vid);
|
|
||||||
void attach_failure_action(int vid);
|
|
||||||
|
|
||||||
void detach_success_action(int vid);
|
|
||||||
void detach_failure_action(int vid);
|
|
||||||
|
|
||||||
void saveas_success_action(int vid);
|
|
||||||
void saveas_failure_action(int vid);
|
|
||||||
|
|
||||||
void attach_nic_success_action(int vid);
|
|
||||||
void attach_nic_failure_action(int vid);
|
|
||||||
|
|
||||||
void detach_nic_success_action(int vid);
|
|
||||||
void detach_nic_failure_action(int vid);
|
|
||||||
|
|
||||||
void cleanup_callback_action(int vid);
|
|
||||||
|
|
||||||
void snapshot_create_success(int vid);
|
|
||||||
void snapshot_create_failure(int vid);
|
|
||||||
|
|
||||||
void snapshot_revert_success(int vid);
|
|
||||||
void snapshot_revert_failure(int vid);
|
|
||||||
|
|
||||||
void snapshot_delete_success(int vid);
|
|
||||||
void snapshot_delete_failure(int vid);
|
|
||||||
|
|
||||||
void disk_snapshot_success(int vid);
|
|
||||||
void disk_snapshot_failure(int vid);
|
|
||||||
|
|
||||||
void disk_lock_success(int vid);
|
|
||||||
void disk_lock_failure(int vid);
|
|
||||||
|
|
||||||
void disk_resize_success(int vid);
|
|
||||||
void disk_resize_failure(int vid);
|
|
||||||
|
|
||||||
void update_conf_success(int vid);
|
|
||||||
void update_conf_failure(int vid);
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
|
||||||
// External Actions, triggered by user requests
|
|
||||||
// -------------------------------------------------------------------------
|
|
||||||
void deploy_action(const LCMAction& la);
|
|
||||||
|
|
||||||
void suspend_action(const LCMAction& la);
|
|
||||||
|
|
||||||
void restore_action(const LCMAction& la);
|
|
||||||
|
|
||||||
void stop_action(const LCMAction& la);
|
|
||||||
|
|
||||||
void checkpoint_action(const LCMAction& la);
|
|
||||||
|
|
||||||
void migrate_action(const LCMAction& la);
|
|
||||||
|
|
||||||
void live_migrate_action(const LCMAction& la);
|
|
||||||
|
|
||||||
void shutdown_action(const LCMAction& la, bool hard);
|
|
||||||
|
|
||||||
void undeploy_action(const LCMAction& la, bool hard);
|
|
||||||
|
|
||||||
void poweroff_action(const LCMAction& la);
|
|
||||||
|
|
||||||
void poweroff_hard_action(const LCMAction& la);
|
|
||||||
|
|
||||||
void poweroff_action(int vid, bool hard, const LCMAction& la);
|
|
||||||
|
|
||||||
void updatesg_action(const LCMAction& la);
|
|
||||||
|
|
||||||
void restart_action(const LCMAction& la);
|
|
||||||
|
|
||||||
void delete_action(const LCMAction& la);
|
|
||||||
|
|
||||||
void delete_recreate_action(const LCMAction& la);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /*LIFE_CYCLE_MANAGER_H_*/
|
#endif /*LIFE_CYCLE_MANAGER_H_*/
|
||||||
|
201
include/Listener.h
Normal file
201
include/Listener.h
Normal file
@ -0,0 +1,201 @@
|
|||||||
|
/* -------------------------------------------------------------------------- */
|
||||||
|
/* Copyright 2002-2019, OpenNebula Project, OpenNebula Systems */
|
||||||
|
/* */
|
||||||
|
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
||||||
|
/* not use this file except in compliance with the License. You may obtain */
|
||||||
|
/* a copy of the License at */
|
||||||
|
/* */
|
||||||
|
/* http://www.apache.org/licenses/LICENSE-2.0 */
|
||||||
|
/* */
|
||||||
|
/* Unless required by applicable law or agreed to in writing, software */
|
||||||
|
/* distributed under the License is distributed on an "AS IS" BASIS, */
|
||||||
|
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
|
||||||
|
/* See the License for the specific language governing permissions and */
|
||||||
|
/* limitations under the License. */
|
||||||
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
#ifndef LISTENER_H_
|
||||||
|
#define LISTENER_H_
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
|
#include <queue>
|
||||||
|
#include <atomic>
|
||||||
|
#include <mutex>
|
||||||
|
#include <chrono>
|
||||||
|
#include <condition_variable>
|
||||||
|
#include <thread>
|
||||||
|
|
||||||
|
#include "NebulaLog.h"
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The Timer class executes a given action periodically in a separate thread.
|
||||||
|
* The thread is terminated when the object is deleted
|
||||||
|
*/
|
||||||
|
class Timer
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
Timer(double s, std::function<void()> timer)
|
||||||
|
{
|
||||||
|
end = false;
|
||||||
|
|
||||||
|
timer_thread = std::thread([&, s, timer]{
|
||||||
|
|
||||||
|
std::unique_lock<std::mutex> ul(lock);
|
||||||
|
|
||||||
|
while (true)
|
||||||
|
{
|
||||||
|
bool tout = cond.wait_for(ul, std::chrono::duration<double>(s), [&]{
|
||||||
|
return end == true;
|
||||||
|
});
|
||||||
|
|
||||||
|
if (end)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
else if (!tout)
|
||||||
|
{
|
||||||
|
timer();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
~Timer()
|
||||||
|
{
|
||||||
|
stop();
|
||||||
|
|
||||||
|
timer_thread.join();
|
||||||
|
}
|
||||||
|
|
||||||
|
void stop()
|
||||||
|
{
|
||||||
|
std::unique_lock<std::mutex> ul(lock);
|
||||||
|
|
||||||
|
end = true;
|
||||||
|
|
||||||
|
cond.notify_one();
|
||||||
|
}
|
||||||
|
private:
|
||||||
|
std::atomic<bool> end;
|
||||||
|
|
||||||
|
std::thread timer_thread;
|
||||||
|
|
||||||
|
std::mutex lock;
|
||||||
|
std::condition_variable cond;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This class implements basic functionality to listen for events. Events are
|
||||||
|
* triggered in separate threads. The class store them in a queue and executed
|
||||||
|
* them in the listner thread.
|
||||||
|
*/
|
||||||
|
class Listener
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
Listener(std::string _name)
|
||||||
|
: name(_name)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual ~Listener()
|
||||||
|
{
|
||||||
|
join_thread();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Trigger an event in the listner. For example:
|
||||||
|
* listener.trigger(std::bind(&Class::callback, this, param1, param2);
|
||||||
|
*
|
||||||
|
* @param f, callback function for the event
|
||||||
|
*/
|
||||||
|
void trigger(std::function<void()> f)
|
||||||
|
{
|
||||||
|
std::unique_lock<std::mutex> ul(lock);
|
||||||
|
|
||||||
|
pending.push(f);
|
||||||
|
|
||||||
|
ul.unlock();
|
||||||
|
|
||||||
|
cond.notify_one();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Async stops the event loop
|
||||||
|
*/
|
||||||
|
void finalize()
|
||||||
|
{
|
||||||
|
trigger([&] {
|
||||||
|
NebulaLog::info("Lis", "Stopping " + name);
|
||||||
|
|
||||||
|
finalize_action();
|
||||||
|
|
||||||
|
end = true;
|
||||||
|
|
||||||
|
cond.notify_one();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
void join_thread()
|
||||||
|
{
|
||||||
|
if (loop_thread.joinable())
|
||||||
|
{
|
||||||
|
loop_thread.join();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
/**
|
||||||
|
* Async starts the event loop waiting for events.
|
||||||
|
*/
|
||||||
|
void start()
|
||||||
|
{
|
||||||
|
end = false;
|
||||||
|
|
||||||
|
loop_thread = std::thread([&] {
|
||||||
|
NebulaLog::info("Lis", name + " started.");
|
||||||
|
|
||||||
|
loop();
|
||||||
|
|
||||||
|
NebulaLog::info("Lis", name + " stopped.");
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
void loop()
|
||||||
|
{
|
||||||
|
std::unique_lock<std::mutex> ul(lock);
|
||||||
|
|
||||||
|
while (true)
|
||||||
|
{
|
||||||
|
cond.wait(ul, [&]{return (end || !pending.empty());});
|
||||||
|
|
||||||
|
if (end)
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto fn = pending.front();
|
||||||
|
pending.pop();
|
||||||
|
|
||||||
|
fn();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Action called on finalize action
|
||||||
|
*/
|
||||||
|
virtual void finalize_action() {};
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::string name;
|
||||||
|
|
||||||
|
std::thread loop_thread;
|
||||||
|
|
||||||
|
std::atomic<bool> end;
|
||||||
|
|
||||||
|
std::mutex lock;
|
||||||
|
std::condition_variable cond;
|
||||||
|
|
||||||
|
std::queue<std::function<void()>> pending;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif /*LISTENER_H_*/
|
@ -19,10 +19,7 @@
|
|||||||
|
|
||||||
#include "ProtocolMessages.h"
|
#include "ProtocolMessages.h"
|
||||||
#include "DriverManager.h"
|
#include "DriverManager.h"
|
||||||
#include "ActionManager.h"
|
#include "Listener.h"
|
||||||
#include "NebulaLog.h"
|
|
||||||
|
|
||||||
extern "C" void * marketplace_action_loop(void *arg);
|
|
||||||
|
|
||||||
class MarketPlacePool;
|
class MarketPlacePool;
|
||||||
class MarketPlaceAppPool;
|
class MarketPlaceAppPool;
|
||||||
@ -32,9 +29,7 @@ class DatastorePool;
|
|||||||
class ImageManager;
|
class ImageManager;
|
||||||
class RaftManager;
|
class RaftManager;
|
||||||
|
|
||||||
class MarketPlaceManager :
|
class MarketPlaceManager : public DriverManager<Driver<market_msg_t>>
|
||||||
public DriverManager<Driver<market_msg_t>>,
|
|
||||||
public ActionListener
|
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
@ -46,7 +41,7 @@ public:
|
|||||||
*/
|
*/
|
||||||
MarketPlaceManager(time_t t, time_t m, const std::string& _mad_location);
|
MarketPlaceManager(time_t t, time_t m, const std::string& _mad_location);
|
||||||
|
|
||||||
~MarketPlaceManager(){};
|
~MarketPlaceManager() = default;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Initializes internal pointers to other managers. Must be called when
|
* Initializes internal pointers to other managers. Must be called when
|
||||||
@ -55,36 +50,27 @@ public:
|
|||||||
void init_managers();
|
void init_managers();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This functions starts the associated listener thread, and creates a
|
* This functions starts the associated timer thread and drivers.
|
||||||
* new thread for the MarketPlace Manager. This thread will wait in
|
|
||||||
* an action loop till it receives ACTION_FINALIZE.
|
|
||||||
* @return 0 on success.
|
* @return 0 on success.
|
||||||
*/
|
*/
|
||||||
int start();
|
int start();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stops timer and drivers
|
||||||
|
*/
|
||||||
|
void finalize()
|
||||||
|
{
|
||||||
|
timer_thread.stop();
|
||||||
|
|
||||||
|
DriverManager::stop(drivers_timeout);
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Loads the MarketPlace Driver defined in configuration file
|
* Loads the MarketPlace Driver defined in configuration file
|
||||||
* @param _mads configuration of drivers
|
* @param _mads configuration of drivers
|
||||||
*/
|
*/
|
||||||
int load_drivers(const std::vector<const VectorAttribute*>& _mads);
|
int load_drivers(const std::vector<const VectorAttribute*>& _mads);
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the thread identification.
|
|
||||||
* @return pthread_t for the manager thread (that in the action loop).
|
|
||||||
*/
|
|
||||||
pthread_t get_thread_id() const
|
|
||||||
{
|
|
||||||
return marketm_thread;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Finalizes the Image Manager
|
|
||||||
*/
|
|
||||||
void finalize()
|
|
||||||
{
|
|
||||||
am.finalize();
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Imports a new app into the marketplace. The marketplace app needs to
|
* Imports a new app into the marketplace. The marketplace app needs to
|
||||||
* include the ORIGIN_ID attribute so the driver can locate the app. An
|
* include the ORIGIN_ID attribute so the driver can locate the app. An
|
||||||
@ -137,9 +123,9 @@ private:
|
|||||||
static const char * market_driver_name;
|
static const char * market_driver_name;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Thread id for the MarketPlace Manager
|
* Timer action async execution
|
||||||
*/
|
*/
|
||||||
pthread_t marketm_thread;
|
Timer timer_thread;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Timer period for the Image Manager.
|
* Timer period for the Image Manager.
|
||||||
@ -154,37 +140,32 @@ private:
|
|||||||
/**
|
/**
|
||||||
* Pointer to the marketplace pool
|
* Pointer to the marketplace pool
|
||||||
*/
|
*/
|
||||||
MarketPlacePool * mppool;
|
MarketPlacePool * mppool = nullptr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to the app pool
|
* Pointer to the app pool
|
||||||
*/
|
*/
|
||||||
MarketPlaceAppPool * apppool;
|
MarketPlaceAppPool * apppool = nullptr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to the image pool
|
* Pointer to the image pool
|
||||||
*/
|
*/
|
||||||
ImagePool * ipool;
|
ImagePool * ipool = nullptr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to the image pool
|
* Pointer to the image pool
|
||||||
*/
|
*/
|
||||||
DatastorePool * dspool;
|
DatastorePool * dspool = nullptr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to the Image Manger
|
* Pointer to the Image Manger
|
||||||
*/
|
*/
|
||||||
ImageManager * imagem;
|
ImageManager * imagem = nullptr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to the Raft Manger
|
* Pointer to the Raft Manger
|
||||||
*/
|
*/
|
||||||
RaftManager * raftm;
|
RaftManager * raftm = nullptr;
|
||||||
|
|
||||||
/**
|
|
||||||
* Action engine for the Manager
|
|
||||||
*/
|
|
||||||
ActionManager am;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a pointer to the marketplace driver.
|
* Returns a pointer to the marketplace driver.
|
||||||
@ -193,13 +174,7 @@ private:
|
|||||||
const Driver<market_msg_t> * get() const
|
const Driver<market_msg_t> * get() const
|
||||||
{
|
{
|
||||||
return DriverManager::get_driver(market_driver_name);
|
return DriverManager::get_driver(market_driver_name);
|
||||||
};
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Function to execute the Manager action loop method within a new pthread
|
|
||||||
* (requires C linkage)
|
|
||||||
*/
|
|
||||||
friend void * marketplace_action_loop(void *arg);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Formats an XML message for the MAD
|
* Formats an XML message for the MAD
|
||||||
@ -230,15 +205,9 @@ private:
|
|||||||
/**
|
/**
|
||||||
* This function is executed periodically to monitor marketplaces..
|
* This function is executed periodically to monitor marketplaces..
|
||||||
*/
|
*/
|
||||||
void timer_action(const ActionRequest& ar);
|
void timer_action();
|
||||||
|
|
||||||
static const int drivers_timeout = 10;
|
static const int drivers_timeout = 10;
|
||||||
|
|
||||||
void finalize_action(const ActionRequest& ar)
|
|
||||||
{
|
|
||||||
NebulaLog::log("MKP", Log::INFO, "Stopping Marketplace Manager...");
|
|
||||||
DriverManager::stop(drivers_timeout);
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /*MARKETPLACE_MANAGER_H*/
|
#endif /*MARKETPLACE_MANAGER_H*/
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
|
|
||||||
#include "DefaultQuotas.h"
|
#include "DefaultQuotas.h"
|
||||||
#include "UserPool.h"
|
#include "UserPool.h"
|
||||||
|
#include "NebulaLog.h"
|
||||||
|
|
||||||
class LogDB;
|
class LogDB;
|
||||||
class FedLogDB;
|
class FedLogDB;
|
||||||
|
@ -17,22 +17,18 @@
|
|||||||
#ifndef RAFT_MANAGER_H_
|
#ifndef RAFT_MANAGER_H_
|
||||||
#define RAFT_MANAGER_H_
|
#define RAFT_MANAGER_H_
|
||||||
|
|
||||||
#include "ActionManager.h"
|
#include "Listener.h"
|
||||||
#include "ReplicaManager.h"
|
#include "ReplicaManager.h"
|
||||||
#include "ReplicaRequest.h"
|
#include "ReplicaRequest.h"
|
||||||
#include "Template.h"
|
#include "Template.h"
|
||||||
#include "ExecuteHook.h"
|
#include "ExecuteHook.h"
|
||||||
|
|
||||||
extern "C" void * raft_manager_loop(void *arg);
|
|
||||||
|
|
||||||
extern "C" void * reconciling_thread(void *arg);
|
|
||||||
|
|
||||||
class LogDBRecord;
|
class LogDBRecord;
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
class RaftManager : public ActionListener
|
class RaftManager
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
/**
|
/**
|
||||||
@ -64,8 +60,6 @@ public:
|
|||||||
{
|
{
|
||||||
delete leader_hook;
|
delete leader_hook;
|
||||||
delete follower_hook;
|
delete follower_hook;
|
||||||
|
|
||||||
pthread_mutex_destroy(&mutex);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
@ -103,22 +97,9 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Finalizes the Raft Consensus Manager
|
* Termination function
|
||||||
*/
|
*/
|
||||||
void finalize()
|
void finalize();
|
||||||
{
|
|
||||||
am.finalize();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Starts the Raft Consensus Manager
|
|
||||||
*/
|
|
||||||
int start();
|
|
||||||
|
|
||||||
pthread_t get_thread_id() const
|
|
||||||
{
|
|
||||||
return raft_thread;
|
|
||||||
};
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
// Raft state query functions
|
// Raft state query functions
|
||||||
@ -158,41 +139,23 @@ public:
|
|||||||
|
|
||||||
State get_state()
|
State get_state()
|
||||||
{
|
{
|
||||||
State _state;
|
std::lock_guard<std::mutex> lock(raft_mutex);
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
return state;
|
||||||
|
|
||||||
_state = state;
|
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
return _state;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned int get_term()
|
unsigned int get_term()
|
||||||
{
|
{
|
||||||
unsigned int _term;
|
std::lock_guard<std::mutex> lock(raft_mutex);
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
return term;
|
||||||
|
|
||||||
_term = term;
|
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
return _term;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t get_commit()
|
uint64_t get_commit()
|
||||||
{
|
{
|
||||||
uint64_t _commit;
|
std::lock_guard<std::mutex> lock(raft_mutex);
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
return commit;
|
||||||
|
|
||||||
_commit = commit;
|
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
return _commit;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -247,12 +210,10 @@ public:
|
|||||||
{
|
{
|
||||||
bool _reconciling;
|
bool _reconciling;
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
std::lock_guard<std::mutex> lock(raft_mutex);
|
||||||
|
|
||||||
_reconciling = reconciling;
|
_reconciling = reconciling;
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
return _reconciling;
|
return _reconciling;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -266,7 +227,7 @@ public:
|
|||||||
std::map<int, uint64_t>::iterator it;
|
std::map<int, uint64_t>::iterator it;
|
||||||
uint64_t _index = UINT64_MAX;
|
uint64_t _index = UINT64_MAX;
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
std::lock_guard<std::mutex> lock(raft_mutex);
|
||||||
|
|
||||||
it = next.find(follower_id);
|
it = next.find(follower_id);
|
||||||
|
|
||||||
@ -275,8 +236,6 @@ public:
|
|||||||
_index = it->second;
|
_index = it->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
return _index;
|
return _index;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -341,21 +300,7 @@ public:
|
|||||||
void reset_index(int follower_id);
|
void reset_index(int follower_id);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
friend void * raft_manager_loop(void *arg);
|
std::mutex raft_mutex;
|
||||||
|
|
||||||
friend void * reconciling_thread(void *arg);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Thread id of the main event loop
|
|
||||||
*/
|
|
||||||
pthread_t raft_thread;
|
|
||||||
|
|
||||||
pthread_mutex_t mutex;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Event engine for the RaftManager
|
|
||||||
*/
|
|
||||||
ActionManager am;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Clients waiting for a log replication
|
* Clients waiting for a log replication
|
||||||
@ -435,6 +380,11 @@ private:
|
|||||||
|
|
||||||
struct timespec broadcast_timeout;
|
struct timespec broadcast_timeout;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Timer action async execution
|
||||||
|
*/
|
||||||
|
Timer timer_thread;
|
||||||
|
|
||||||
//--------------------------------------------------------------------------
|
//--------------------------------------------------------------------------
|
||||||
// Volatile log index variables
|
// Volatile log index variables
|
||||||
// - commit, highest log known to be committed
|
// - commit, highest log known to be committed
|
||||||
@ -466,17 +416,12 @@ private:
|
|||||||
ExecuteHook * follower_hook;
|
ExecuteHook * follower_hook;
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
// Action Listener interface
|
// Internal Raft functions
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
/**
|
|
||||||
* Termination function
|
|
||||||
*/
|
|
||||||
void finalize_action(const ActionRequest& ar);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function is executed periodically to purge the state log
|
* This function is executed periodically to purge the state log
|
||||||
*/
|
*/
|
||||||
void timer_action(const ActionRequest& ar);
|
void timer_action();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param s the state to check
|
* @param s the state to check
|
||||||
@ -486,18 +431,13 @@ private:
|
|||||||
{
|
{
|
||||||
bool _is_state;
|
bool _is_state;
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
std::lock_guard<std::mutex> lock(raft_mutex);
|
||||||
|
|
||||||
_is_state = state == s;
|
_is_state = state == s;
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
return _is_state;
|
return _is_state;
|
||||||
}
|
}
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
|
||||||
// Internal Raft functions
|
|
||||||
// -------------------------------------------------------------------------
|
|
||||||
/**
|
/**
|
||||||
* Request votes of followers
|
* Request votes of followers
|
||||||
*/
|
*/
|
||||||
|
@ -17,8 +17,6 @@
|
|||||||
#ifndef REQUEST_MANAGER_H_
|
#ifndef REQUEST_MANAGER_H_
|
||||||
#define REQUEST_MANAGER_H_
|
#define REQUEST_MANAGER_H_
|
||||||
|
|
||||||
#include "ActionManager.h"
|
|
||||||
|
|
||||||
#include <xmlrpc-c/base.hpp>
|
#include <xmlrpc-c/base.hpp>
|
||||||
#include <xmlrpc-c/registry.hpp>
|
#include <xmlrpc-c/registry.hpp>
|
||||||
#include <xmlrpc-c/server_abyss.hpp>
|
#include <xmlrpc-c/server_abyss.hpp>
|
||||||
@ -26,11 +24,9 @@
|
|||||||
#include <set>
|
#include <set>
|
||||||
|
|
||||||
|
|
||||||
extern "C" void * rm_action_loop(void *arg);
|
|
||||||
|
|
||||||
extern "C" void * rm_xml_server_loop(void *arg);
|
extern "C" void * rm_xml_server_loop(void *arg);
|
||||||
|
|
||||||
class RequestManager : public ActionListener
|
class RequestManager
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
@ -46,7 +42,7 @@ public:
|
|||||||
const std::string& _listen_address,
|
const std::string& _listen_address,
|
||||||
int message_size);
|
int message_size);
|
||||||
|
|
||||||
~RequestManager(){};
|
~RequestManager() = default;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This functions starts the associated listener thread (XML server), and
|
* This functions starts the associated listener thread (XML server), and
|
||||||
@ -56,22 +52,7 @@ public:
|
|||||||
*/
|
*/
|
||||||
int start();
|
int start();
|
||||||
|
|
||||||
/**
|
void finalize();
|
||||||
* Gets the thread identification.
|
|
||||||
* @return pthread_t for the manager thread (that in the action loop).
|
|
||||||
*/
|
|
||||||
pthread_t get_thread_id() const
|
|
||||||
{
|
|
||||||
return rm_thread;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Stops the main RM thread.
|
|
||||||
*/
|
|
||||||
void finalize()
|
|
||||||
{
|
|
||||||
am.finalize();
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return an AbyssServer to run xmlrpc connections
|
* @return an AbyssServer to run xmlrpc connections
|
||||||
@ -108,13 +89,6 @@ private:
|
|||||||
|
|
||||||
friend void * rm_xml_server_loop(void *arg);
|
friend void * rm_xml_server_loop(void *arg);
|
||||||
|
|
||||||
friend void * rm_action_loop(void *arg);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Thread id for the RequestManager
|
|
||||||
*/
|
|
||||||
pthread_t rm_thread;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Thread id for the XML Server
|
* Thread id for the XML Server
|
||||||
*/
|
*/
|
||||||
@ -165,11 +139,6 @@ private:
|
|||||||
*/
|
*/
|
||||||
std::string listen_address;
|
std::string listen_address;
|
||||||
|
|
||||||
/**
|
|
||||||
* Action engine for the Manager
|
|
||||||
*/
|
|
||||||
ActionManager am;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* To register XML-RPC methods
|
* To register XML-RPC methods
|
||||||
*/
|
*/
|
||||||
@ -181,11 +150,6 @@ private:
|
|||||||
void register_xml_methods();
|
void register_xml_methods();
|
||||||
|
|
||||||
int setup_socket();
|
int setup_socket();
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
|
||||||
// ActioListener Interface
|
|
||||||
// ------------------------------------------------------------------------
|
|
||||||
void finalize_action(const ActionRequest& ar) override;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -19,26 +19,26 @@
|
|||||||
|
|
||||||
#include <time.h>
|
#include <time.h>
|
||||||
|
|
||||||
#include "ActionManager.h"
|
#include "Listener.h"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Base class to implement synchronous operation in the MadManagers. This class
|
* Base class to implement synchronous operation in the MadManagers. This class
|
||||||
* cannot be directly instantiated.
|
* cannot be directly instantiated.
|
||||||
*/
|
*/
|
||||||
class SyncRequest: public ActionListener
|
class SyncRequest: public Listener
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
SyncRequest():
|
SyncRequest():
|
||||||
|
Listener(""),
|
||||||
result(false),
|
result(false),
|
||||||
message(""),
|
message(""),
|
||||||
timeout(false),
|
timeout(false),
|
||||||
id(-1),
|
id(-1),
|
||||||
time_out(0)
|
time_out(0)
|
||||||
{
|
{
|
||||||
am.addListener(this);
|
}
|
||||||
};
|
|
||||||
|
|
||||||
virtual ~SyncRequest(){};
|
virtual ~SyncRequest() = default;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The result of the request, true if the operation succeeded
|
* The result of the request, true if the operation succeeded
|
||||||
@ -65,8 +65,8 @@ public:
|
|||||||
*/
|
*/
|
||||||
void notify()
|
void notify()
|
||||||
{
|
{
|
||||||
am.finalize();
|
finalize();
|
||||||
};
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Wait for the AuthRequest to be completed
|
* Wait for the AuthRequest to be completed
|
||||||
@ -75,40 +75,13 @@ public:
|
|||||||
{
|
{
|
||||||
time_out = time(0) + 90;//Requests will expire in 1.5 minutes
|
time_out = time(0) + 90;//Requests will expire in 1.5 minutes
|
||||||
|
|
||||||
am.loop();
|
loop();
|
||||||
};
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Wait for the AuthRequest to be completed
|
|
||||||
*/
|
|
||||||
void wait(time_t t)
|
|
||||||
{
|
|
||||||
am.loop(t);
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Time in seconds when this request will expire
|
* Time in seconds when this request will expire
|
||||||
*/
|
*/
|
||||||
time_t time_out;
|
time_t time_out;
|
||||||
|
|
||||||
protected:
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The ActionManager that will be notify when the request is ready.
|
|
||||||
*/
|
|
||||||
ActionManager am;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Timer action to finalize time-out waits
|
|
||||||
*/
|
|
||||||
void timer_action(const ActionRequest& ar)
|
|
||||||
{
|
|
||||||
result = false;
|
|
||||||
timeout = true;
|
|
||||||
message = "Operation time out";
|
|
||||||
|
|
||||||
am.finalize();
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /*SYNC_REQUEST_H_*/
|
#endif /*SYNC_REQUEST_H_*/
|
||||||
|
@ -19,74 +19,20 @@
|
|||||||
|
|
||||||
#include "ProtocolMessages.h"
|
#include "ProtocolMessages.h"
|
||||||
#include "DriverManager.h"
|
#include "DriverManager.h"
|
||||||
#include "ActionManager.h"
|
#include "Listener.h"
|
||||||
|
|
||||||
extern "C" void * tm_action_loop(void *arg);
|
|
||||||
|
|
||||||
class HostPool;
|
class HostPool;
|
||||||
class VirtualMachine;
|
class VirtualMachine;
|
||||||
class VirtualMachineDisk;
|
class VirtualMachineDisk;
|
||||||
class VirtualMachinePool;
|
class VirtualMachinePool;
|
||||||
|
class LifeCycleManager;
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
class TMAction : public ActionRequest
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
enum Actions
|
|
||||||
{
|
|
||||||
PROLOG,
|
|
||||||
PROLOG_MIGR,
|
|
||||||
PROLOG_RESUME,
|
|
||||||
PROLOG_ATTACH,
|
|
||||||
EPILOG,
|
|
||||||
EPILOG_LOCAL,
|
|
||||||
EPILOG_STOP,
|
|
||||||
EPILOG_DELETE,
|
|
||||||
EPILOG_DELETE_PREVIOUS,
|
|
||||||
EPILOG_DELETE_STOP,
|
|
||||||
EPILOG_DELETE_BOTH,
|
|
||||||
EPILOG_DETACH,
|
|
||||||
CHECKPOINT,
|
|
||||||
DRIVER_CANCEL,
|
|
||||||
SAVEAS_HOT,
|
|
||||||
SNAPSHOT_CREATE,
|
|
||||||
SNAPSHOT_REVERT,
|
|
||||||
SNAPSHOT_DELETE,
|
|
||||||
RESIZE
|
|
||||||
};
|
|
||||||
|
|
||||||
TMAction(Actions a, int v):ActionRequest(ActionRequest::USER),
|
|
||||||
_action(a), _vm_id(v){}
|
|
||||||
|
|
||||||
TMAction(const TMAction& o):ActionRequest(o._type), _action(o._action),
|
|
||||||
_vm_id(o._vm_id){}
|
|
||||||
|
|
||||||
Actions action() const
|
|
||||||
{
|
|
||||||
return _action;
|
|
||||||
}
|
|
||||||
|
|
||||||
int vm_id() const
|
|
||||||
{
|
|
||||||
return _vm_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
ActionRequest * clone() const
|
|
||||||
{
|
|
||||||
return new TMAction(*this);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
Actions _action;
|
|
||||||
|
|
||||||
int _vm_id;
|
|
||||||
};
|
|
||||||
|
|
||||||
class TransferManager :
|
class TransferManager :
|
||||||
public DriverManager<Driver<transfer_msg_t>>,
|
public DriverManager<Driver<transfer_msg_t>>,
|
||||||
public ActionListener
|
public Listener
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
@ -95,33 +41,14 @@ public:
|
|||||||
HostPool * _hpool,
|
HostPool * _hpool,
|
||||||
const std::string& _mad_location):
|
const std::string& _mad_location):
|
||||||
DriverManager(_mad_location),
|
DriverManager(_mad_location),
|
||||||
|
Listener("Transfer Manager"),
|
||||||
vmpool(_vmpool),
|
vmpool(_vmpool),
|
||||||
hpool(_hpool)
|
hpool(_hpool)
|
||||||
{
|
{
|
||||||
am.addListener(this);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
~TransferManager() = default;
|
~TransferManager() = default;
|
||||||
|
|
||||||
/**
|
|
||||||
* Triggers specific actions to the Information Manager. This function
|
|
||||||
* wraps the ActionManager trigger function.
|
|
||||||
* @param action the IM action
|
|
||||||
* @param vid VM unique id. This is the argument of the passed to the
|
|
||||||
* invoked action.
|
|
||||||
*/
|
|
||||||
void trigger(TMAction::Actions action, int vid)
|
|
||||||
{
|
|
||||||
TMAction tm_ar(action, vid);
|
|
||||||
|
|
||||||
am.trigger(tm_ar);
|
|
||||||
}
|
|
||||||
|
|
||||||
void finalize()
|
|
||||||
{
|
|
||||||
am.finalize();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This functions starts the associated listener thread, and creates a
|
* This functions starts the associated listener thread, and creates a
|
||||||
* new thread for the Information Manager. This thread will wait in
|
* new thread for the Information Manager. This thread will wait in
|
||||||
@ -136,15 +63,6 @@ public:
|
|||||||
*/
|
*/
|
||||||
int load_drivers(const std::vector<const VectorAttribute*>& _mads);
|
int load_drivers(const std::vector<const VectorAttribute*>& _mads);
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the thread identification.
|
|
||||||
* @return pthread_t for the manager thread (that in the action loop).
|
|
||||||
*/
|
|
||||||
pthread_t get_thread_id() const
|
|
||||||
{
|
|
||||||
return tm_thread;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Inserts a transfer command in the xfs stream
|
* Inserts a transfer command in the xfs stream
|
||||||
*
|
*
|
||||||
@ -245,11 +163,6 @@ public:
|
|||||||
const VirtualMachineDisk * disk,
|
const VirtualMachineDisk * disk,
|
||||||
std::ostream& xfr);
|
std::ostream& xfr);
|
||||||
private:
|
private:
|
||||||
/**
|
|
||||||
* Thread id for the Transfer Manager
|
|
||||||
*/
|
|
||||||
pthread_t tm_thread;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to the Virtual Machine Pool, to access VMs
|
* Pointer to the Virtual Machine Pool, to access VMs
|
||||||
*/
|
*/
|
||||||
@ -260,11 +173,6 @@ private:
|
|||||||
*/
|
*/
|
||||||
HostPool * hpool;
|
HostPool * hpool;
|
||||||
|
|
||||||
/**
|
|
||||||
* Action engine for the Manager
|
|
||||||
*/
|
|
||||||
ActionManager am;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generic name for the TransferManager driver
|
* Generic name for the TransferManager driver
|
||||||
*/
|
*/
|
||||||
@ -292,17 +200,13 @@ private:
|
|||||||
return DriverManager::get_driver(transfer_driver_name);
|
return DriverManager::get_driver(transfer_driver_name);
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* Function to execute the Manager action loop method within a new pthread
|
|
||||||
* (requires C linkage)
|
|
||||||
*/
|
|
||||||
friend void * tm_action_loop(void *arg);
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
// Protocol implementation, procesing messages from driver
|
// Protocol implementation, procesing messages from driver
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
static void _undefined(std::unique_ptr<transfer_msg_t> msg);
|
static void _undefined(std::unique_ptr<transfer_msg_t> msg);
|
||||||
|
|
||||||
void _transfer(std::unique_ptr<transfer_msg_t> msg);
|
void _transfer(std::unique_ptr<transfer_msg_t> msg);
|
||||||
|
|
||||||
static void _log(std::unique_ptr<transfer_msg_t> msg);
|
static void _log(std::unique_ptr<transfer_msg_t> msg);
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
@ -310,52 +214,49 @@ private:
|
|||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
static const int drivers_timeout = 10;
|
static const int drivers_timeout = 10;
|
||||||
|
|
||||||
void finalize_action(const ActionRequest& ar)
|
void finalize_action()
|
||||||
{
|
{
|
||||||
NebulaLog::log("TM",Log::INFO,"Stopping Transfer Manager...");
|
|
||||||
|
|
||||||
DriverManager::stop(drivers_timeout);
|
DriverManager::stop(drivers_timeout);
|
||||||
};
|
};
|
||||||
|
|
||||||
void user_action(const ActionRequest& ar);
|
public:
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function starts the prolog sequence
|
* This function starts the prolog sequence
|
||||||
*/
|
*/
|
||||||
void prolog_action(int vid);
|
void trigger_prolog(VirtualMachine * vm);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function starts the prolog migration sequence
|
* This function starts the prolog migration sequence
|
||||||
*/
|
*/
|
||||||
void prolog_migr_action(int vid);
|
void trigger_prolog_migr(VirtualMachine * vm);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function starts the prolog resume sequence
|
* This function starts the prolog resume sequence
|
||||||
*/
|
*/
|
||||||
void prolog_resume_action(int vid);
|
void trigger_prolog_resume(VirtualMachine * vm);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function starts the prolog attach sequence
|
* This function starts the prolog attach sequence
|
||||||
*/
|
*/
|
||||||
void prolog_attach_action(int vid);
|
void trigger_prolog_attach(VirtualMachine * vm);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function starts the epilog sequence
|
* This function starts the epilog sequence
|
||||||
*/
|
*/
|
||||||
void epilog_action(bool local, int vid);
|
void trigger_epilog(bool local, VirtualMachine * vm);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function starts the epilog_stop sequence
|
* This function starts the epilog_stop sequence
|
||||||
*/
|
*/
|
||||||
void epilog_stop_action(int vid);
|
void trigger_epilog_stop(VirtualMachine * vm);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function starts the epilog_delete sequence in the current host
|
* This function starts the epilog_delete sequence in the current host
|
||||||
* @param vid the Virtual Machine ID
|
* @param vid the Virtual Machine ID
|
||||||
*/
|
*/
|
||||||
void epilog_delete_action(int vid)
|
void trigger_epilog_delete(VirtualMachine * vm)
|
||||||
{
|
{
|
||||||
epilog_delete_action(false, vid);
|
trigger_epilog_delete(false, vm);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -363,48 +264,48 @@ private:
|
|||||||
* i.e. the front-end (the VM is not running)
|
* i.e. the front-end (the VM is not running)
|
||||||
* @param vid the Virtual Machine ID
|
* @param vid the Virtual Machine ID
|
||||||
*/
|
*/
|
||||||
void epilog_delete_stop_action(int vid)
|
void trigger_epilog_delete_stop(VirtualMachine * vm)
|
||||||
{
|
{
|
||||||
epilog_delete_action(true, vid);
|
trigger_epilog_delete(true, vm);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function starts the epilog_delete sequence on the previous host
|
* This function starts the epilog_delete sequence on the previous host
|
||||||
* @param vid the Virtual Machine ID
|
* @param vid the Virtual Machine ID
|
||||||
*/
|
*/
|
||||||
void epilog_delete_previous_action(int vid);
|
void trigger_epilog_delete_previous(VirtualMachine * vm);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function starts the epilog_delete sequence on the current and
|
* This function starts the epilog_delete sequence on the current and
|
||||||
* previous hosts
|
* previous hosts
|
||||||
* @param vid the Virtual Machine ID
|
* @param vid the Virtual Machine ID
|
||||||
*/
|
*/
|
||||||
void epilog_delete_both_action(int vid);
|
void trigger_epilog_delete_both(VirtualMachine * vm);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function starts the epilog_delete sequence
|
* This function starts the epilog_delete sequence
|
||||||
*/
|
*/
|
||||||
void epilog_delete_action(bool local, int vid);
|
void trigger_epilog_delete(bool local, VirtualMachine * vm);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function starts the epilog detach sequence
|
* This function starts the epilog detach sequence
|
||||||
*/
|
*/
|
||||||
void epilog_detach_action(int vid);
|
void trigger_epilog_detach(VirtualMachine * vm);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function starts the epilog sequence
|
* This function starts the epilog sequence
|
||||||
*/
|
*/
|
||||||
void checkpoint_action(int vid);
|
void trigger_checkpoint(int vid);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function cancels the operation being performed by the driver
|
* This function cancels the operation being performed by the driver
|
||||||
*/
|
*/
|
||||||
void driver_cancel_action(int vid);
|
void trigger_driver_cancel(int vid);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function starts the saveas of the given disk
|
* This function starts the saveas of the given disk
|
||||||
*/
|
*/
|
||||||
void saveas_hot_action(int vid);
|
void trigger_saveas_hot(int vid);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function performs a generic snapshot action
|
* This function performs a generic snapshot action
|
||||||
@ -414,22 +315,22 @@ private:
|
|||||||
/**
|
/**
|
||||||
* This function takes an snapshot of a disk
|
* This function takes an snapshot of a disk
|
||||||
*/
|
*/
|
||||||
void snapshot_create_action(int vid);
|
void trigger_snapshot_create(int vid);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function takes an snapshot of a disk
|
* This function takes an snapshot of a disk
|
||||||
*/
|
*/
|
||||||
void snapshot_revert_action(int vid);
|
void trigger_snapshot_revert(int vid);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function deletes an snapshot of a disk
|
* This function deletes an snapshot of a disk
|
||||||
*/
|
*/
|
||||||
void snapshot_delete_action(int vid);
|
void trigger_snapshot_delete(int vid);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function resizes a VM disk
|
* This function resizes a VM disk
|
||||||
*/
|
*/
|
||||||
void resize_action(int vid);
|
void trigger_resize(int vid);
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /*TRANSFER_MANAGER_H*/
|
#endif /*TRANSFER_MANAGER_H*/
|
||||||
|
@ -19,106 +19,27 @@
|
|||||||
|
|
||||||
#include "VirtualMachineManagerDriver.h"
|
#include "VirtualMachineManagerDriver.h"
|
||||||
#include "DriverManager.h"
|
#include "DriverManager.h"
|
||||||
#include "ActionManager.h"
|
#include "Listener.h"
|
||||||
|
|
||||||
class DatastorePool;
|
class DatastorePool;
|
||||||
class HostPool;
|
class HostPool;
|
||||||
class VirtualMachinePool;
|
class VirtualMachinePool;
|
||||||
|
|
||||||
extern "C" void * vmm_action_loop(void *arg);
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
class VMMAction : public ActionRequest
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
enum Actions
|
|
||||||
{
|
|
||||||
DEPLOY,
|
|
||||||
SAVE,
|
|
||||||
SHUTDOWN,
|
|
||||||
CANCEL,
|
|
||||||
CANCEL_PREVIOUS,
|
|
||||||
CLEANUP,
|
|
||||||
CLEANUP_BOTH,
|
|
||||||
CLEANUP_PREVIOUS,
|
|
||||||
MIGRATE,
|
|
||||||
RESTORE,
|
|
||||||
REBOOT,
|
|
||||||
RESET,
|
|
||||||
DRIVER_CANCEL,
|
|
||||||
ATTACH,
|
|
||||||
DETACH,
|
|
||||||
ATTACH_NIC,
|
|
||||||
DETACH_NIC,
|
|
||||||
SNAPSHOT_CREATE,
|
|
||||||
SNAPSHOT_REVERT,
|
|
||||||
SNAPSHOT_DELETE,
|
|
||||||
DISK_SNAPSHOT_CREATE,
|
|
||||||
DISK_RESIZE,
|
|
||||||
UPDATE_CONF
|
|
||||||
};
|
|
||||||
|
|
||||||
VMMAction(Actions a, int v):ActionRequest(ActionRequest::USER),
|
|
||||||
_action(a), _vm_id(v){};
|
|
||||||
|
|
||||||
VMMAction(const VMMAction& o):ActionRequest(o._type), _action(o._action),
|
|
||||||
_vm_id(o._vm_id){};
|
|
||||||
|
|
||||||
Actions action() const
|
|
||||||
{
|
|
||||||
return _action;
|
|
||||||
}
|
|
||||||
|
|
||||||
int vm_id() const
|
|
||||||
{
|
|
||||||
return _vm_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
ActionRequest * clone() const
|
|
||||||
{
|
|
||||||
return new VMMAction(*this);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
Actions _action;
|
|
||||||
|
|
||||||
int _vm_id;
|
|
||||||
};
|
|
||||||
|
|
||||||
class VirtualMachineManager :
|
class VirtualMachineManager :
|
||||||
public DriverManager<VirtualMachineManagerDriver>,
|
public DriverManager<VirtualMachineManagerDriver>,
|
||||||
public ActionListener
|
public Listener
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
VirtualMachineManager(
|
VirtualMachineManager(
|
||||||
time_t _timer_period,
|
|
||||||
int _vm_limit,
|
int _vm_limit,
|
||||||
const std::string& _mads);
|
const std::string& _mads);
|
||||||
|
|
||||||
~VirtualMachineManager() = default;
|
~VirtualMachineManager() = default;
|
||||||
|
|
||||||
/**
|
|
||||||
* Triggers specific actions to the Virtual Machine Manager. This function
|
|
||||||
* wraps the ActionManager trigger function.
|
|
||||||
* @param action the VMM action
|
|
||||||
* @param vid VM unique id. This is the argument of the passed to the
|
|
||||||
* invoked action.
|
|
||||||
*/
|
|
||||||
void trigger(VMMAction::Actions action, int vid)
|
|
||||||
{
|
|
||||||
VMMAction vmm_ar(action, vid);
|
|
||||||
|
|
||||||
am.trigger(vmm_ar);
|
|
||||||
}
|
|
||||||
|
|
||||||
void finalize()
|
|
||||||
{
|
|
||||||
am.finalize();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This functions starts the associated listener thread, and creates a
|
* This functions starts the associated listener thread, and creates a
|
||||||
* new thread for the Virtual Machine Manager. This thread will wait in
|
* new thread for the Virtual Machine Manager. This thread will wait in
|
||||||
@ -127,15 +48,6 @@ public:
|
|||||||
*/
|
*/
|
||||||
int start();
|
int start();
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the thread identification.
|
|
||||||
* @return pthread_t for the manager thread (that in the action loop).
|
|
||||||
*/
|
|
||||||
pthread_t get_thread_id() const
|
|
||||||
{
|
|
||||||
return vmm_thread;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Loads Virtual Machine Manager Mads defined in configuration file
|
* Loads Virtual Machine Manager Mads defined in configuration file
|
||||||
* @param _mads configuration of drivers
|
* @param _mads configuration of drivers
|
||||||
@ -223,11 +135,6 @@ public:
|
|||||||
int validate_raw(const Template * vmt, std::string& error_str);
|
int validate_raw(const Template * vmt, std::string& error_str);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/**
|
|
||||||
* Thread id for the Virtual Machine Manager
|
|
||||||
*/
|
|
||||||
pthread_t vmm_thread;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pointer to the Virtual Machine Pool, to access VMs
|
* Pointer to the Virtual Machine Pool, to access VMs
|
||||||
*/
|
*/
|
||||||
@ -243,27 +150,11 @@ private:
|
|||||||
*/
|
*/
|
||||||
DatastorePool * ds_pool;
|
DatastorePool * ds_pool;
|
||||||
|
|
||||||
/**
|
|
||||||
* Timer period for the Virtual Machine Manager.
|
|
||||||
*/
|
|
||||||
time_t timer_period;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Virtual Machine polling limit
|
* Virtual Machine polling limit
|
||||||
*/
|
*/
|
||||||
int vm_limit;
|
int vm_limit;
|
||||||
|
|
||||||
/**
|
|
||||||
* Action engine for the Manager
|
|
||||||
*/
|
|
||||||
ActionManager am;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Function to execute the Manager action loop method within a new pthread
|
|
||||||
* (requires C linkage)
|
|
||||||
*/
|
|
||||||
friend void * vmm_action_loop(void *arg);
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
// Protocol implementation, procesing messages from driver
|
// Protocol implementation, procesing messages from driver
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
@ -413,15 +304,11 @@ private:
|
|||||||
|
|
||||||
static const int drivers_timeout = 10;
|
static const int drivers_timeout = 10;
|
||||||
|
|
||||||
void finalize_action(const ActionRequest& ar)
|
void finalize_action()
|
||||||
{
|
{
|
||||||
NebulaLog::log("VMM",Log::INFO,"Stopping Virtual Machine Manager...");
|
|
||||||
|
|
||||||
DriverManager::stop(drivers_timeout);
|
DriverManager::stop(drivers_timeout);
|
||||||
};
|
};
|
||||||
|
|
||||||
void user_action(const ActionRequest& ar);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Function to format a VMM Driver message in the form:
|
* Function to format a VMM Driver message in the form:
|
||||||
* <VMM_DRIVER_ACTION_DATA>
|
* <VMM_DRIVER_ACTION_DATA>
|
||||||
@ -465,43 +352,39 @@ private:
|
|||||||
int ds_id,
|
int ds_id,
|
||||||
int sgid);
|
int sgid);
|
||||||
|
|
||||||
|
public:
|
||||||
/**
|
/**
|
||||||
* Function executed when a DEPLOY action is received. It deploys a VM on
|
* Function executed when a DEPLOY action is received. It deploys a VM on
|
||||||
* a Host.
|
* a Host.
|
||||||
* @param vid the id of the VM to be deployed.
|
* @param vid the id of the VM to be deployed.
|
||||||
*/
|
*/
|
||||||
void deploy_action(
|
void trigger_deploy(int vid);
|
||||||
int vid);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Function to stop a running VM and generate a checkpoint file. This
|
* Function to stop a running VM and generate a checkpoint file. This
|
||||||
* function is executed when a SAVE action is triggered.
|
* function is executed when a SAVE action is triggered.
|
||||||
* @param vid the id of the VM.
|
* @param vid the id of the VM.
|
||||||
*/
|
*/
|
||||||
void save_action(
|
void trigger_save(int vid);
|
||||||
int vid);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Shutdowns a VM when a SHUTDOWN action is received.
|
* Shutdowns a VM when a SHUTDOWN action is received.
|
||||||
* @param vid the id of the VM.
|
* @param vid the id of the VM.
|
||||||
*/
|
*/
|
||||||
void shutdown_action(
|
void trigger_shutdown(int vid);
|
||||||
int vid);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Cancels a VM when a CANCEL action is received.
|
* Cancels a VM when a CANCEL action is received.
|
||||||
* @param vid the id of the VM.
|
* @param vid the id of the VM.
|
||||||
*/
|
*/
|
||||||
void cancel_action(
|
void trigger_cancel(int vid);
|
||||||
int vid);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Cancels a VM (in the previous host) when a CANCEL action is received.
|
* Cancels a VM (in the previous host) when a CANCEL action is received.
|
||||||
* Note that the domain-id is the last one returned by a boot action
|
* Note that the domain-id is the last one returned by a boot action
|
||||||
* @param vid the id of the VM.
|
* @param vid the id of the VM.
|
||||||
*/
|
*/
|
||||||
void cancel_previous_action(
|
void trigger_cancel_previous(int vid);
|
||||||
int vid);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Cleanups a host (cancel VM + delete disk images).
|
* Cleanups a host (cancel VM + delete disk images).
|
||||||
@ -509,81 +392,70 @@ private:
|
|||||||
* @param cancel_previous if true the VM will be canceled in the previous
|
* @param cancel_previous if true the VM will be canceled in the previous
|
||||||
* host (only relevant to delete VM's in MIGRATE state)
|
* host (only relevant to delete VM's in MIGRATE state)
|
||||||
*/
|
*/
|
||||||
void cleanup_action(
|
void trigger_cleanup(int vid, bool cancel_previous);
|
||||||
int vid, bool cancel_previous);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Cleanups the previous host (cancel VM + delete disk images).
|
* Cleanups the previous host (cancel VM + delete disk images).
|
||||||
* @param vid the id of the VM.
|
* @param vid the id of the VM.
|
||||||
*/
|
*/
|
||||||
void cleanup_previous_action(
|
void trigger_cleanup_previous(int vid);
|
||||||
int vid);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Function to migrate (live) a VM (MIGRATE action).
|
* Function to migrate (live) a VM (MIGRATE action).
|
||||||
* @param vid the id of the VM.
|
* @param vid the id of the VM.
|
||||||
*/
|
*/
|
||||||
void migrate_action(
|
void trigger_migrate(int vid);
|
||||||
int vid);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Restores a VM from a checkpoint file.
|
* Restores a VM from a checkpoint file.
|
||||||
* @param vid the id of the VM.
|
* @param vid the id of the VM.
|
||||||
*/
|
*/
|
||||||
void restore_action(
|
void trigger_restore(int vid);
|
||||||
int vid);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Reboots a running VM.
|
* Reboots a running VM.
|
||||||
* @param vid the id of the VM.
|
* @param vid the id of the VM.
|
||||||
*/
|
*/
|
||||||
void reboot_action(
|
void trigger_reboot(int vid);
|
||||||
int vid);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Resets a running VM.
|
* Resets a running VM.
|
||||||
* @param vid the id of the VM.
|
* @param vid the id of the VM.
|
||||||
*/
|
*/
|
||||||
void reset_action(
|
void trigger_reset(int vid);
|
||||||
int vid);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Attaches a new disk to a VM. The VM must have a disk with the
|
* Attaches a new disk to a VM. The VM must have a disk with the
|
||||||
* attribute ATTACH = YES
|
* attribute ATTACH = YES
|
||||||
* @param vid the id of the VM.
|
* @param vid the id of the VM.
|
||||||
*/
|
*/
|
||||||
void attach_action(
|
void trigger_attach(int vid);
|
||||||
int vid);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Detaches a disk from a VM. The VM must have a disk with the
|
* Detaches a disk from a VM. The VM must have a disk with the
|
||||||
* attribute ATTACH = YES
|
* attribute ATTACH = YES
|
||||||
* @param vid the id of the VM.
|
* @param vid the id of the VM.
|
||||||
*/
|
*/
|
||||||
void detach_action(
|
void trigger_detach(int vid);
|
||||||
int vid);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Attaches a new NIC to a VM. The VM must have a NIC with the
|
* Attaches a new NIC to a VM. The VM must have a NIC with the
|
||||||
* attribute ATTACH = YES
|
* attribute ATTACH = YES
|
||||||
* @param vid the id of the VM.
|
* @param vid the id of the VM.
|
||||||
*/
|
*/
|
||||||
void attach_nic_action(
|
void trigger_attach_nic(int vid);
|
||||||
int vid);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Detaches a NIC from a VM. The VM must have a NIC with the
|
* Detaches a NIC from a VM. The VM must have a NIC with the
|
||||||
* attribute ATTACH = YES
|
* attribute ATTACH = YES
|
||||||
* @param vid the id of the VM.
|
* @param vid the id of the VM.
|
||||||
*/
|
*/
|
||||||
void detach_nic_action(
|
void trigger_detach_nic(int vid);
|
||||||
int vid);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function cancels the current driver operation
|
* This function cancels the current driver operation
|
||||||
*/
|
*/
|
||||||
void driver_cancel_action(
|
void trigger_driver_cancel(int vid);
|
||||||
int vid);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new system snapshot. The VM must have a snapshot with the
|
* Creates a new system snapshot. The VM must have a snapshot with the
|
||||||
@ -591,8 +463,7 @@ private:
|
|||||||
*
|
*
|
||||||
* @param vid the id of the VM.
|
* @param vid the id of the VM.
|
||||||
*/
|
*/
|
||||||
void snapshot_create_action(
|
void trigger_snapshot_create(int vid);
|
||||||
int vid);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Reverts to a snapshot. The VM must have a snapshot with the
|
* Reverts to a snapshot. The VM must have a snapshot with the
|
||||||
@ -600,8 +471,7 @@ private:
|
|||||||
*
|
*
|
||||||
* @param vid the id of the VM.
|
* @param vid the id of the VM.
|
||||||
*/
|
*/
|
||||||
void snapshot_revert_action(
|
void trigger_snapshot_revert(int vid);
|
||||||
int vid);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Deletes a snapshot. The VM must have a snapshot with the
|
* Deletes a snapshot. The VM must have a snapshot with the
|
||||||
@ -609,32 +479,28 @@ private:
|
|||||||
*
|
*
|
||||||
* @param vid the id of the VM.
|
* @param vid the id of the VM.
|
||||||
*/
|
*/
|
||||||
void snapshot_delete_action(
|
void trigger_snapshot_delete(int vid);
|
||||||
int vid);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new disk system snapshot.
|
* Creates a new disk system snapshot.
|
||||||
*
|
*
|
||||||
* @param vid the id of the VM.
|
* @param vid the id of the VM.
|
||||||
*/
|
*/
|
||||||
void disk_snapshot_create_action(
|
void trigger_disk_snapshot_create(int vid);
|
||||||
int vid);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Resize a VM disk
|
* Resize a VM disk
|
||||||
*
|
*
|
||||||
* @param vid the id of the VM.
|
* @param vid the id of the VM.
|
||||||
*/
|
*/
|
||||||
void disk_resize_action(
|
void trigger_disk_resize(int vid);
|
||||||
int vid);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Update VM context
|
* Update VM context
|
||||||
*
|
*
|
||||||
* @param vid the id of the VM.
|
* @param vid the id of the VM.
|
||||||
*/
|
*/
|
||||||
void update_conf_action(
|
void trigger_update_conf(int vid);
|
||||||
int vid);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /*VIRTUAL_MACHINE_MANAGER_H*/
|
#endif /*VIRTUAL_MACHINE_MANAGER_H*/
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include <climits>
|
#include <climits>
|
||||||
|
|
||||||
#include "AclManager.h"
|
#include "AclManager.h"
|
||||||
|
#include "AclRule.h"
|
||||||
#include "PoolObjectAuth.h"
|
#include "PoolObjectAuth.h"
|
||||||
#include "SqlDB.h"
|
#include "SqlDB.h"
|
||||||
#include "OneDB.h"
|
#include "OneDB.h"
|
||||||
@ -65,15 +66,13 @@ AclManager::AclManager(
|
|||||||
int _zone_id,
|
int _zone_id,
|
||||||
bool _is_federation_slave,
|
bool _is_federation_slave,
|
||||||
time_t _timer_period)
|
time_t _timer_period)
|
||||||
:zone_id(_zone_id), db(_db), is_federation_slave(_is_federation_slave),
|
: zone_id(_zone_id)
|
||||||
timer_period(_timer_period)
|
, db(_db)
|
||||||
|
, is_federation_slave(_is_federation_slave)
|
||||||
|
, timer_period(_timer_period)
|
||||||
{
|
{
|
||||||
int lastOID;
|
int lastOID;
|
||||||
|
|
||||||
pthread_mutex_init(&mutex, 0);
|
|
||||||
|
|
||||||
am.addListener(this);
|
|
||||||
|
|
||||||
//Federation slaves do not need to init the pool
|
//Federation slaves do not need to init the pool
|
||||||
if (is_federation_slave)
|
if (is_federation_slave)
|
||||||
{
|
{
|
||||||
@ -124,29 +123,6 @@ AclManager::AclManager(
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
extern "C" void * acl_action_loop(void *arg)
|
|
||||||
{
|
|
||||||
AclManager * aclm;
|
|
||||||
|
|
||||||
if ( arg == 0 )
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
NebulaLog::log("ACL",Log::INFO,"ACL Manager started.");
|
|
||||||
|
|
||||||
aclm = static_cast<AclManager *>(arg);
|
|
||||||
|
|
||||||
aclm->am.loop(aclm->timer_period);
|
|
||||||
|
|
||||||
NebulaLog::log("ACL",Log::INFO,"ACL Manager stopped.");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
int AclManager::start()
|
int AclManager::start()
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
@ -157,17 +133,10 @@ int AclManager::start()
|
|||||||
|
|
||||||
if (is_federation_slave)
|
if (is_federation_slave)
|
||||||
{
|
{
|
||||||
pthread_attr_t pattr;
|
timer_thread.reset(new Timer(timer_period, [this](){timer_action();}));
|
||||||
|
|
||||||
pthread_attr_init (&pattr);
|
|
||||||
pthread_attr_setdetachstate (&pattr, PTHREAD_CREATE_JOINABLE);
|
|
||||||
|
|
||||||
rc += pthread_create(&acl_thread,&pattr,acl_action_loop,(void *) this);
|
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
NebulaLog::log("ACL",Log::INFO,"ACL Manager started.");
|
NebulaLog::log("ACL",Log::INFO,"ACL Manager started.");
|
||||||
}
|
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
@ -177,14 +146,25 @@ int AclManager::start()
|
|||||||
|
|
||||||
void AclManager::finalize()
|
void AclManager::finalize()
|
||||||
{
|
{
|
||||||
|
NebulaLog::info("ACL", "Stopping ACL Manager...");
|
||||||
|
|
||||||
if (is_federation_slave)
|
if (is_federation_slave)
|
||||||
{
|
{
|
||||||
am.finalize();
|
timer_thread->stop();
|
||||||
}
|
}
|
||||||
else
|
}
|
||||||
|
|
||||||
|
/* -------------------------------------------------------------------------- */
|
||||||
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
void AclManager::join_thread()
|
||||||
{
|
{
|
||||||
NebulaLog::log("ACL",Log::INFO,"ACL Manager stopped.");
|
if (is_federation_slave)
|
||||||
|
{
|
||||||
|
timer_thread->stop();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
NebulaLog::info("ACL", "ACL Manager stopped.");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
@ -192,18 +172,12 @@ void AclManager::finalize()
|
|||||||
|
|
||||||
AclManager::~AclManager()
|
AclManager::~AclManager()
|
||||||
{
|
{
|
||||||
multimap<long long, AclRule *>::iterator it;
|
lock_guard<std::mutex> ul(acl_mutex);
|
||||||
|
|
||||||
lock();
|
for (auto& rule : acl_rules)
|
||||||
|
|
||||||
for ( it = acl_rules.begin(); it != acl_rules.end(); it++ )
|
|
||||||
{
|
{
|
||||||
delete it->second;
|
delete rule.second;
|
||||||
}
|
}
|
||||||
|
|
||||||
unlock();
|
|
||||||
|
|
||||||
pthread_mutex_destroy(&mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
@ -464,7 +438,7 @@ bool AclManager::match_rules_wrapper(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Match against the internal rules
|
// Match against the internal rules
|
||||||
lock();
|
lock_guard<std::mutex> ul(acl_mutex);
|
||||||
|
|
||||||
auth = match_rules(
|
auth = match_rules(
|
||||||
user_req,
|
user_req,
|
||||||
@ -478,8 +452,6 @@ bool AclManager::match_rules_wrapper(
|
|||||||
cluster_obj_type,
|
cluster_obj_type,
|
||||||
acl_rules);
|
acl_rules);
|
||||||
|
|
||||||
unlock();
|
|
||||||
|
|
||||||
return auth;
|
return auth;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -596,7 +568,7 @@ int AclManager::add_rule(long long user, long long resource, long long rights,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
lock();
|
lock_guard<std::mutex> ul(acl_mutex);
|
||||||
|
|
||||||
int lastOID = get_lastOID(db);
|
int lastOID = get_lastOID(db);
|
||||||
|
|
||||||
@ -645,8 +617,6 @@ int AclManager::add_rule(long long user, long long resource, long long rights,
|
|||||||
|
|
||||||
set_lastOID(db, lastOID);
|
set_lastOID(db, lastOID);
|
||||||
|
|
||||||
unlock();
|
|
||||||
|
|
||||||
return lastOID;
|
return lastOID;
|
||||||
|
|
||||||
|
|
||||||
@ -673,8 +643,6 @@ error_common:
|
|||||||
|
|
||||||
delete rule;
|
delete rule;
|
||||||
|
|
||||||
unlock();
|
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -702,7 +670,7 @@ int AclManager::del_rule(int oid, string& error_str)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
lock();
|
lock_guard<std::mutex> ul(acl_mutex);
|
||||||
|
|
||||||
// Check the rule exists
|
// Check the rule exists
|
||||||
found = acl_rules_oids.count(oid) > 0;
|
found = acl_rules_oids.count(oid) > 0;
|
||||||
@ -713,7 +681,6 @@ int AclManager::del_rule(int oid, string& error_str)
|
|||||||
oss << "Rule " << oid << " does not exist";
|
oss << "Rule " << oid << " does not exist";
|
||||||
error_str = oss.str();
|
error_str = oss.str();
|
||||||
|
|
||||||
unlock();
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -744,7 +711,6 @@ int AclManager::del_rule(int oid, string& error_str)
|
|||||||
|
|
||||||
NebulaLog::log("ACL",Log::ERROR,oss);
|
NebulaLog::log("ACL",Log::ERROR,oss);
|
||||||
|
|
||||||
unlock();
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -755,7 +721,6 @@ int AclManager::del_rule(int oid, string& error_str)
|
|||||||
{
|
{
|
||||||
error_str = "SQL DB error";
|
error_str = "SQL DB error";
|
||||||
|
|
||||||
unlock();
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -766,7 +731,6 @@ int AclManager::del_rule(int oid, string& error_str)
|
|||||||
|
|
||||||
delete rule;
|
delete rule;
|
||||||
|
|
||||||
unlock();
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -780,20 +744,17 @@ int AclManager::del_rule(
|
|||||||
long long zone,
|
long long zone,
|
||||||
string& error_str)
|
string& error_str)
|
||||||
{
|
{
|
||||||
lock();
|
|
||||||
|
|
||||||
AclRule rule(-1, user, resource, rights, zone);
|
AclRule rule(-1, user, resource, rights, zone);
|
||||||
|
|
||||||
int oid = -1;
|
int oid = -1;
|
||||||
bool found = false;
|
bool found = false;
|
||||||
|
|
||||||
multimap<long long, AclRule *>::iterator it;
|
{
|
||||||
pair<multimap<long long, AclRule *>::iterator,
|
lock_guard<std::mutex> ul(acl_mutex);
|
||||||
multimap<long long, AclRule *>::iterator> index;
|
|
||||||
|
|
||||||
index = acl_rules.equal_range( user );
|
auto index = acl_rules.equal_range(user);
|
||||||
|
|
||||||
for ( it = index.first; (it != index.second && !found); it++)
|
for (auto it = index.first; (it != index.second && !found); it++)
|
||||||
{
|
{
|
||||||
found = *(it->second) == rule;
|
found = *(it->second) == rule;
|
||||||
|
|
||||||
@ -802,8 +763,7 @@ int AclManager::del_rule(
|
|||||||
oid = it->second->get_oid();
|
oid = it->second->get_oid();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
unlock();
|
|
||||||
|
|
||||||
if (oid != -1)
|
if (oid != -1)
|
||||||
{
|
{
|
||||||
@ -893,24 +853,20 @@ void AclManager::del_resource_rules(int oid, PoolObjectSQL::ObjectType obj_type)
|
|||||||
|
|
||||||
void AclManager::del_user_matching_rules(long long user_req)
|
void AclManager::del_user_matching_rules(long long user_req)
|
||||||
{
|
{
|
||||||
multimap<long long, AclRule *>::iterator it;
|
|
||||||
pair<multimap<long long, AclRule *>::iterator,
|
|
||||||
multimap<long long, AclRule *>::iterator> index;
|
|
||||||
|
|
||||||
vector<int> oids;
|
vector<int> oids;
|
||||||
vector<int>::iterator oid_it;
|
vector<int>::iterator oid_it;
|
||||||
string error_str;
|
string error_str;
|
||||||
|
|
||||||
lock();
|
{
|
||||||
|
lock_guard<std::mutex> ul(acl_mutex);
|
||||||
|
|
||||||
index = acl_rules.equal_range( user_req );
|
auto index = acl_rules.equal_range( user_req );
|
||||||
|
|
||||||
for ( it = index.first; it != index.second; it++)
|
for ( auto it = index.first; it != index.second; it++)
|
||||||
{
|
{
|
||||||
oids.push_back(it->second->oid);
|
oids.push_back(it->second->oid);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
unlock();
|
|
||||||
|
|
||||||
for ( oid_it = oids.begin() ; oid_it < oids.end(); oid_it++ )
|
for ( oid_it = oids.begin() ; oid_it < oids.end(); oid_it++ )
|
||||||
{
|
{
|
||||||
@ -924,25 +880,22 @@ void AclManager::del_user_matching_rules(long long user_req)
|
|||||||
void AclManager::del_resource_matching_rules(long long resource_req,
|
void AclManager::del_resource_matching_rules(long long resource_req,
|
||||||
long long resource_mask)
|
long long resource_mask)
|
||||||
{
|
{
|
||||||
multimap<long long, AclRule *>::iterator it;
|
|
||||||
|
|
||||||
vector<int> oids;
|
vector<int> oids;
|
||||||
vector<int>::iterator oid_it;
|
|
||||||
string error_str;
|
string error_str;
|
||||||
|
|
||||||
lock();
|
{
|
||||||
|
lock_guard<std::mutex> ul(acl_mutex);
|
||||||
|
|
||||||
for ( it = acl_rules.begin(); it != acl_rules.end(); it++ )
|
for ( auto it = acl_rules.begin(); it != acl_rules.end(); it++ )
|
||||||
{
|
{
|
||||||
if ( ( it->second->resource & resource_mask ) == resource_req )
|
if ( ( it->second->resource & resource_mask ) == resource_req )
|
||||||
{
|
{
|
||||||
oids.push_back(it->second->oid);
|
oids.push_back(it->second->oid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
unlock();
|
for ( auto oid_it = oids.begin() ; oid_it < oids.end(); oid_it++ )
|
||||||
|
|
||||||
for ( oid_it = oids.begin() ; oid_it < oids.end(); oid_it++ )
|
|
||||||
{
|
{
|
||||||
del_rule(*oid_it, error_str);
|
del_rule(*oid_it, error_str);
|
||||||
}
|
}
|
||||||
@ -953,25 +906,22 @@ void AclManager::del_resource_matching_rules(long long resource_req,
|
|||||||
|
|
||||||
void AclManager::del_zone_matching_rules(long long zone_req)
|
void AclManager::del_zone_matching_rules(long long zone_req)
|
||||||
{
|
{
|
||||||
multimap<long long, AclRule *>::iterator it;
|
|
||||||
|
|
||||||
vector<int> oids;
|
vector<int> oids;
|
||||||
vector<int>::iterator oid_it;
|
|
||||||
string error_str;
|
string error_str;
|
||||||
|
|
||||||
lock();
|
{
|
||||||
|
lock_guard<std::mutex> ul(acl_mutex);
|
||||||
|
|
||||||
for ( it = acl_rules.begin(); it != acl_rules.end(); it++ )
|
for (auto it = acl_rules.begin(); it != acl_rules.end(); it++)
|
||||||
{
|
{
|
||||||
if ( it->second->zone == zone_req )
|
if ( it->second->zone == zone_req )
|
||||||
{
|
{
|
||||||
oids.push_back(it->second->oid);
|
oids.push_back(it->second->oid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
unlock();
|
for (auto oid_it = oids.begin() ; oid_it < oids.end(); oid_it++)
|
||||||
|
|
||||||
for ( oid_it = oids.begin() ; oid_it < oids.end(); oid_it++ )
|
|
||||||
{
|
{
|
||||||
del_rule(*oid_it, error_str);
|
del_rule(*oid_it, error_str);
|
||||||
}
|
}
|
||||||
@ -994,10 +944,6 @@ void AclManager::reverse_search(int uid,
|
|||||||
{
|
{
|
||||||
ostringstream oss;
|
ostringstream oss;
|
||||||
|
|
||||||
multimap<long long, AclRule *>::iterator it;
|
|
||||||
pair<multimap<long long, AclRule *>::iterator,
|
|
||||||
multimap<long long, AclRule *>::iterator> index;
|
|
||||||
|
|
||||||
// Build masks for request
|
// Build masks for request
|
||||||
long long resource_oid_req = obj_type | AclRule::INDIVIDUAL_ID;
|
long long resource_oid_req = obj_type | AclRule::INDIVIDUAL_ID;
|
||||||
long long resource_gid_req = obj_type | AclRule::GROUP_ID;
|
long long resource_gid_req = obj_type | AclRule::GROUP_ID;
|
||||||
@ -1041,9 +987,6 @@ void AclManager::reverse_search(int uid,
|
|||||||
// ---------------------------------------------------
|
// ---------------------------------------------------
|
||||||
|
|
||||||
vector<long long> user_reqs;
|
vector<long long> user_reqs;
|
||||||
vector<long long>::iterator reqs_it;
|
|
||||||
|
|
||||||
set<int>::iterator g_it;
|
|
||||||
|
|
||||||
// rules that apply to everyone
|
// rules that apply to everyone
|
||||||
user_reqs.push_back(AclRule::ALL_ID);
|
user_reqs.push_back(AclRule::ALL_ID);
|
||||||
@ -1052,20 +995,21 @@ void AclManager::reverse_search(int uid,
|
|||||||
user_reqs.push_back(AclRule::INDIVIDUAL_ID | uid);
|
user_reqs.push_back(AclRule::INDIVIDUAL_ID | uid);
|
||||||
|
|
||||||
// rules that apply to each one of the user's groups
|
// rules that apply to each one of the user's groups
|
||||||
for (g_it = user_groups.begin(); g_it != user_groups.end(); g_it++)
|
for (auto g_it = user_groups.begin(); g_it != user_groups.end(); g_it++)
|
||||||
{
|
{
|
||||||
user_reqs.push_back(AclRule::GROUP_ID | *g_it);
|
user_reqs.push_back(AclRule::GROUP_ID | *g_it);
|
||||||
}
|
}
|
||||||
|
|
||||||
all = false;
|
all = false;
|
||||||
|
|
||||||
for (reqs_it = user_reqs.begin(); reqs_it != user_reqs.end(); reqs_it++)
|
|
||||||
{
|
{
|
||||||
lock();
|
lock_guard<std::mutex> ul(acl_mutex);
|
||||||
|
|
||||||
index = acl_rules.equal_range( *reqs_it );
|
for (auto r_it : user_reqs)
|
||||||
|
{
|
||||||
|
auto index = acl_rules.equal_range( r_it );
|
||||||
|
|
||||||
for ( it = index.first; it != index.second; it++)
|
for (auto it = index.first; it != index.second; it++)
|
||||||
{
|
{
|
||||||
// Rule grants the requested rights
|
// Rule grants the requested rights
|
||||||
if ( ( ( it->second->rights & rights_req ) == rights_req )
|
if ( ( ( it->second->rights & rights_req ) == rights_req )
|
||||||
@ -1111,8 +1055,6 @@ void AclManager::reverse_search(int uid,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unlock();
|
|
||||||
|
|
||||||
if ( all == true )
|
if ( all == true )
|
||||||
{
|
{
|
||||||
oids.clear();
|
oids.clear();
|
||||||
@ -1121,6 +1063,7 @@ void AclManager::reverse_search(int uid,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
@ -1193,8 +1136,6 @@ int AclManager::select_cb(void *nil, int num, char **values, char **names)
|
|||||||
|
|
||||||
int AclManager::select()
|
int AclManager::select()
|
||||||
{
|
{
|
||||||
multimap<long long, AclRule *>::iterator it;
|
|
||||||
|
|
||||||
ostringstream oss;
|
ostringstream oss;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
@ -1202,9 +1143,10 @@ int AclManager::select()
|
|||||||
|
|
||||||
set_callback(static_cast<Callbackable::Callback>(&AclManager::select_cb));
|
set_callback(static_cast<Callbackable::Callback>(&AclManager::select_cb));
|
||||||
|
|
||||||
lock();
|
{
|
||||||
|
lock_guard<std::mutex> ul(acl_mutex);
|
||||||
|
|
||||||
for ( it = acl_rules.begin(); it != acl_rules.end(); it++ )
|
for (auto it = acl_rules.begin(); it != acl_rules.end(); it++)
|
||||||
{
|
{
|
||||||
delete it->second;
|
delete it->second;
|
||||||
}
|
}
|
||||||
@ -1213,8 +1155,7 @@ int AclManager::select()
|
|||||||
acl_rules_oids.clear();
|
acl_rules_oids.clear();
|
||||||
|
|
||||||
rc = db->exec_rd(oss, this);
|
rc = db->exec_rd(oss, this);
|
||||||
|
}
|
||||||
unlock();
|
|
||||||
|
|
||||||
unset_callback();
|
unset_callback();
|
||||||
|
|
||||||
@ -1266,22 +1207,19 @@ int AclManager::drop(int oid)
|
|||||||
|
|
||||||
int AclManager::dump(ostringstream& oss)
|
int AclManager::dump(ostringstream& oss)
|
||||||
{
|
{
|
||||||
map<int, AclRule *>::iterator it;
|
|
||||||
string xml;
|
string xml;
|
||||||
|
|
||||||
lock();
|
lock_guard<std::mutex> ul(acl_mutex);
|
||||||
|
|
||||||
oss << "<ACL_POOL>";
|
oss << "<ACL_POOL>";
|
||||||
|
|
||||||
for ( it = acl_rules_oids.begin() ; it != acl_rules_oids.end(); it++ )
|
for (auto& rule : acl_rules)
|
||||||
{
|
{
|
||||||
oss << it->second->to_xml(xml);
|
oss << rule.second->to_xml(xml);
|
||||||
}
|
}
|
||||||
|
|
||||||
oss << "</ACL_POOL>";
|
oss << "</ACL_POOL>";
|
||||||
|
|
||||||
unlock();
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -120,32 +120,8 @@ void AuthRequest::add_auth(Operation op,
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
extern "C" void * authm_action_loop(void *arg)
|
|
||||||
{
|
|
||||||
AuthManager * authm;
|
|
||||||
|
|
||||||
if ( arg == nullptr )
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
authm = static_cast<AuthManager *>(arg);
|
|
||||||
|
|
||||||
NebulaLog::log("AuM",Log::INFO,"Authorization Manager started.");
|
|
||||||
|
|
||||||
authm->am.loop(authm->timer_period);
|
|
||||||
|
|
||||||
NebulaLog::log("AuM",Log::INFO,"Authorization Manager stopped.");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
int AuthManager::start()
|
int AuthManager::start()
|
||||||
{
|
{
|
||||||
pthread_attr_t pattr;
|
|
||||||
|
|
||||||
using namespace std::placeholders; // for _1
|
using namespace std::placeholders; // for _1
|
||||||
|
|
||||||
register_action(AuthManagerMessages::UNDEFINED,
|
register_action(AuthManagerMessages::UNDEFINED,
|
||||||
@ -171,44 +147,17 @@ int AuthManager::start()
|
|||||||
|
|
||||||
NebulaLog::log("AuM",Log::INFO,"Starting Auth Manager...");
|
NebulaLog::log("AuM",Log::INFO,"Starting Auth Manager...");
|
||||||
|
|
||||||
pthread_attr_init(&pattr);
|
Listener::start();
|
||||||
pthread_attr_setdetachstate(&pattr, PTHREAD_CREATE_JOINABLE);
|
|
||||||
|
|
||||||
int rc = pthread_create(&authm_thread,&pattr,authm_action_loop,(void *) this);
|
return 0;
|
||||||
|
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void AuthManager::user_action(const ActionRequest& ar)
|
void AuthManager::trigger_authenticate(AuthRequest& ar)
|
||||||
{
|
|
||||||
const AMAction& auth_ar = static_cast<const AMAction& >(ar);
|
|
||||||
AuthRequest * request = auth_ar.request();
|
|
||||||
|
|
||||||
if ( request == nullptr )
|
|
||||||
{
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (auth_ar.action())
|
|
||||||
{
|
|
||||||
case AMAction::AUTHENTICATE:
|
|
||||||
authenticate_action(request);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case AMAction::AUTHORIZE:
|
|
||||||
authorize_action(request);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
void AuthManager::authenticate_action(AuthRequest * ar)
|
|
||||||
{
|
{
|
||||||
|
trigger([&] {
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
// Get the driver
|
// Get the driver
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
@ -217,9 +166,9 @@ void AuthManager::authenticate_action(AuthRequest * ar)
|
|||||||
|
|
||||||
if (authm_md == nullptr)
|
if (authm_md == nullptr)
|
||||||
{
|
{
|
||||||
ar->result = false;
|
ar.result = false;
|
||||||
ar->message = "Could not find Authorization driver";
|
ar.message = "Could not find Authorization driver";
|
||||||
ar->notify();
|
ar.notify();
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -228,7 +177,7 @@ void AuthManager::authenticate_action(AuthRequest * ar)
|
|||||||
// Queue the request
|
// Queue the request
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
|
|
||||||
add_request(ar);
|
add_request(&ar);
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
// Make the request to the driver
|
// Make the request to the driver
|
||||||
@ -236,22 +185,24 @@ void AuthManager::authenticate_action(AuthRequest * ar)
|
|||||||
|
|
||||||
ostringstream oss;
|
ostringstream oss;
|
||||||
|
|
||||||
oss << ar->uid << " "
|
oss << ar.uid << " "
|
||||||
<< ar->driver << " "
|
<< ar.driver << " "
|
||||||
<< ar->username << " "
|
<< ar.username << " "
|
||||||
<< ar->password << " "
|
<< ar.password << " "
|
||||||
<< ar->session << " " << endl;
|
<< ar.session << " " << endl;
|
||||||
|
|
||||||
auth_msg_t msg(AuthManagerMessages::AUTHENTICATE, "", ar->id, oss.str());
|
auth_msg_t msg(AuthManagerMessages::AUTHENTICATE, "", ar.id, oss.str());
|
||||||
|
|
||||||
authm_md->write(msg);
|
authm_md->write(msg);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void AuthManager::authorize_action(AuthRequest * ar)
|
void AuthManager::trigger_authorize(AuthRequest& ar)
|
||||||
{
|
{
|
||||||
|
trigger([&] {
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
// Get the driver
|
// Get the driver
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
@ -260,20 +211,20 @@ void AuthManager::authorize_action(AuthRequest * ar)
|
|||||||
|
|
||||||
if (authm_md == nullptr)
|
if (authm_md == nullptr)
|
||||||
{
|
{
|
||||||
ar->message = "Could not find Authorization driver";
|
ar.message = "Could not find Authorization driver";
|
||||||
ar->result = false;
|
ar.result = false;
|
||||||
ar->notify();
|
ar.notify();
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto auths = ar->get_auths();
|
auto auths = ar.get_auths();
|
||||||
|
|
||||||
if (auths.empty())
|
if (auths.empty())
|
||||||
{
|
{
|
||||||
ar->message = "Empty authorization string";
|
ar.message = "Empty authorization string";
|
||||||
ar->result = false;
|
ar.result = false;
|
||||||
ar->notify();
|
ar.notify();
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -282,7 +233,7 @@ void AuthManager::authorize_action(AuthRequest * ar)
|
|||||||
// Queue the request
|
// Queue the request
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
|
|
||||||
add_request(ar);
|
add_request(&ar);
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
// Make the request to the driver
|
// Make the request to the driver
|
||||||
@ -290,13 +241,14 @@ void AuthManager::authorize_action(AuthRequest * ar)
|
|||||||
|
|
||||||
ostringstream oss;
|
ostringstream oss;
|
||||||
|
|
||||||
oss << ar->uid << " "
|
oss << ar.uid << " "
|
||||||
<< auths << " "
|
<< auths << " "
|
||||||
<< ar->self_authorize << endl;
|
<< ar.self_authorize << endl;
|
||||||
|
|
||||||
auth_msg_t msg(AuthManagerMessages::AUTHORIZE, "", ar->id, oss.str());
|
auth_msg_t msg(AuthManagerMessages::AUTHORIZE, "", ar.id, oss.str());
|
||||||
|
|
||||||
authm_md->write(msg);
|
authm_md->write(msg);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************** */
|
/* ************************************************************************** */
|
||||||
@ -358,3 +310,13 @@ int AuthManager::load_drivers(const std::vector<const VectorAttribute*>& _mads)
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* -------------------------------------------------------------------------- */
|
||||||
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
void AuthManager::finalize_action()
|
||||||
|
{
|
||||||
|
timer_thread.stop();
|
||||||
|
|
||||||
|
DriverManager::stop(drivers_timeout);
|
||||||
|
}
|
||||||
|
@ -1,134 +0,0 @@
|
|||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* Copyright 2002-2020, OpenNebula Project, OpenNebula Systems */
|
|
||||||
/* */
|
|
||||||
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
|
||||||
/* not use this file except in compliance with the License. You may obtain */
|
|
||||||
/* a copy of the License at */
|
|
||||||
/* */
|
|
||||||
/* http://www.apache.org/licenses/LICENSE-2.0 */
|
|
||||||
/* */
|
|
||||||
/* Unless required by applicable law or agreed to in writing, software */
|
|
||||||
/* distributed under the License is distributed on an "AS IS" BASIS, */
|
|
||||||
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
|
|
||||||
/* See the License for the specific language governing permissions and */
|
|
||||||
/* limitations under the License. */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
#include "ActionManager.h"
|
|
||||||
#include <ctime>
|
|
||||||
#include <errno.h>
|
|
||||||
|
|
||||||
/* ************************************************************************** */
|
|
||||||
/* ActionManager constructor & destructor */
|
|
||||||
/* ************************************************************************** */
|
|
||||||
|
|
||||||
ActionManager::ActionManager(): listener(0)
|
|
||||||
{
|
|
||||||
pthread_mutex_init(&mutex,0);
|
|
||||||
|
|
||||||
pthread_cond_init(&cond,0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
ActionManager::~ActionManager()
|
|
||||||
{
|
|
||||||
while (!actions.empty())
|
|
||||||
{
|
|
||||||
delete actions.front();
|
|
||||||
actions.pop();
|
|
||||||
}
|
|
||||||
|
|
||||||
pthread_mutex_destroy(&mutex);
|
|
||||||
|
|
||||||
pthread_cond_destroy(&cond);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ************************************************************************** */
|
|
||||||
/* NeActionManager public interface */
|
|
||||||
/* ************************************************************************** */
|
|
||||||
|
|
||||||
void ActionManager::trigger(const ActionRequest& ar )
|
|
||||||
{
|
|
||||||
lock();
|
|
||||||
|
|
||||||
actions.push(ar.clone());
|
|
||||||
|
|
||||||
pthread_cond_signal(&cond);
|
|
||||||
|
|
||||||
unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
static void set_timeout(struct timespec& timeout, struct timespec& _tout)
|
|
||||||
{
|
|
||||||
clock_gettime(CLOCK_REALTIME, &timeout);
|
|
||||||
|
|
||||||
timeout.tv_sec += _tout.tv_sec;
|
|
||||||
timeout.tv_nsec += _tout.tv_nsec;
|
|
||||||
|
|
||||||
while ( timeout.tv_nsec >= 1000000000 )
|
|
||||||
{
|
|
||||||
timeout.tv_sec += 1;
|
|
||||||
timeout.tv_nsec -= 1000000000;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ActionManager::loop(struct timespec& _tout, const ActionRequest& trequest)
|
|
||||||
{
|
|
||||||
struct timespec timeout;
|
|
||||||
|
|
||||||
int finalize = 0;
|
|
||||||
int rc;
|
|
||||||
|
|
||||||
ActionRequest * action;
|
|
||||||
|
|
||||||
set_timeout(timeout, _tout);
|
|
||||||
|
|
||||||
//Action Loop, end when a finalize action is triggered to this manager
|
|
||||||
while (finalize == 0)
|
|
||||||
{
|
|
||||||
lock();
|
|
||||||
|
|
||||||
while ( actions.empty() == true )
|
|
||||||
{
|
|
||||||
if ( _tout.tv_sec != 0 || _tout.tv_nsec != 0 )
|
|
||||||
{
|
|
||||||
rc = pthread_cond_timedwait(&cond, &mutex, &timeout);
|
|
||||||
|
|
||||||
if ( rc == ETIMEDOUT )
|
|
||||||
actions.push(trequest.clone());
|
|
||||||
}
|
|
||||||
else
|
|
||||||
pthread_cond_wait(&cond,&mutex);
|
|
||||||
}
|
|
||||||
|
|
||||||
action = actions.front();
|
|
||||||
actions.pop();
|
|
||||||
|
|
||||||
unlock();
|
|
||||||
|
|
||||||
listener->_do_action(*action);
|
|
||||||
|
|
||||||
switch(action->type())
|
|
||||||
{
|
|
||||||
case ActionRequest::TIMER:
|
|
||||||
set_timeout(timeout, _tout);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case ActionRequest::FINALIZE:
|
|
||||||
finalize = 1;
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
delete action;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
@ -22,7 +22,6 @@ lib_name='nebula_common'
|
|||||||
|
|
||||||
# Sources to generate the library
|
# Sources to generate the library
|
||||||
source_files=[
|
source_files=[
|
||||||
'ActionManager.cc',
|
|
||||||
'Attribute.cc',
|
'Attribute.cc',
|
||||||
'ExtendedAttribute.cc',
|
'ExtendedAttribute.cc',
|
||||||
'NebulaService.cc',
|
'NebulaService.cc',
|
||||||
|
@ -17,83 +17,19 @@
|
|||||||
#include "DispatchManager.h"
|
#include "DispatchManager.h"
|
||||||
#include "Nebula.h"
|
#include "Nebula.h"
|
||||||
#include "NebulaLog.h"
|
#include "NebulaLog.h"
|
||||||
|
#include "VirtualMachine.h"
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
extern "C" void * dm_action_loop(void *arg)
|
|
||||||
{
|
|
||||||
DispatchManager * dm;
|
|
||||||
|
|
||||||
if ( arg == 0 )
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
dm = static_cast<DispatchManager *>(arg);
|
|
||||||
|
|
||||||
NebulaLog::log("DiM",Log::INFO,"Dispatch Manager started.");
|
|
||||||
|
|
||||||
dm->am.loop();
|
|
||||||
|
|
||||||
NebulaLog::log("DiM",Log::INFO,"Dispatch Manager stopped.");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
int DispatchManager::start()
|
int DispatchManager::start()
|
||||||
{
|
{
|
||||||
int rc;
|
|
||||||
pthread_attr_t pattr;
|
|
||||||
|
|
||||||
pthread_attr_init (&pattr);
|
|
||||||
pthread_attr_setdetachstate (&pattr, PTHREAD_CREATE_JOINABLE);
|
|
||||||
|
|
||||||
NebulaLog::log("DiM",Log::INFO,"Starting Dispatch Manager...");
|
NebulaLog::log("DiM",Log::INFO,"Starting Dispatch Manager...");
|
||||||
|
|
||||||
rc = pthread_create(&dm_thread, &pattr, dm_action_loop,(void *) this);
|
Listener::start();
|
||||||
|
|
||||||
return rc;
|
return 0;
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
void DispatchManager::user_action(const ActionRequest& ar)
|
|
||||||
{
|
|
||||||
const DMAction& dm_ar = static_cast<const DMAction& >(ar);
|
|
||||||
int vid = dm_ar.vm_id();
|
|
||||||
|
|
||||||
switch (dm_ar.action())
|
|
||||||
{
|
|
||||||
case DMAction::SUSPEND_SUCCESS:
|
|
||||||
suspend_success_action(vid);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case DMAction::STOP_SUCCESS:
|
|
||||||
stop_success_action(vid);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case DMAction::UNDEPLOY_SUCCESS:
|
|
||||||
undeploy_success_action(vid);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case DMAction::POWEROFF_SUCCESS:
|
|
||||||
poweroff_success_action(vid);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case DMAction::DONE:
|
|
||||||
done_action(vid);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case DMAction::RESUBMIT:
|
|
||||||
resubmit_action(vid);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#include "Nebula.h"
|
#include "Nebula.h"
|
||||||
#include "ClusterPool.h"
|
#include "ClusterPool.h"
|
||||||
#include "HostPool.h"
|
#include "HostPool.h"
|
||||||
|
#include "VirtualMachinePool.h"
|
||||||
#include "VirtualRouterPool.h"
|
#include "VirtualRouterPool.h"
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
@ -75,7 +76,7 @@ int DispatchManager::deploy(VirtualMachine * vm, const RequestAttributes& ra)
|
|||||||
get_quota_template(vm, quota_tmpl, true);
|
get_quota_template(vm, quota_tmpl, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
lcm->trigger(LCMAction::DEPLOY, vid, ra);
|
lcm->trigger_deploy(vid);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -208,17 +209,17 @@ int DispatchManager::migrate(VirtualMachine * vm, int poff_migrate,
|
|||||||
{
|
{
|
||||||
switch (poff_migrate) {
|
switch (poff_migrate) {
|
||||||
case 0:
|
case 0:
|
||||||
lcm->trigger(LCMAction::MIGRATE, vid, ra);
|
lcm->trigger_migrate(vid, ra);
|
||||||
break;
|
break;
|
||||||
case 1:
|
case 1:
|
||||||
lcm->trigger(LCMAction::POFF_MIGRATE, vid, ra);
|
lcm->trigger_migrate_poweroff(vid, ra);
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
lcm->trigger(LCMAction::POFF_HARD_MIGRATE, vid, ra);
|
lcm->trigger_migrate_poweroff_hard(vid, ra);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default: /* Defaults to <5.8 behavior */
|
default: /* Defaults to <5.8 behavior */
|
||||||
lcm->trigger(LCMAction::MIGRATE, vid, ra);
|
lcm->trigger_migrate(vid, ra);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -260,7 +261,7 @@ int DispatchManager::live_migrate(VirtualMachine * vm,
|
|||||||
if (vm->get_state() == VirtualMachine::ACTIVE &&
|
if (vm->get_state() == VirtualMachine::ACTIVE &&
|
||||||
vm->get_lcm_state() == VirtualMachine::RUNNING )
|
vm->get_lcm_state() == VirtualMachine::RUNNING )
|
||||||
{
|
{
|
||||||
lcm->trigger(LCMAction::LIVE_MIGRATE, vid, ra);
|
lcm->trigger_live_migrate(vid, ra);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -406,7 +407,7 @@ int DispatchManager::terminate(int vid, bool hard, const RequestAttributes& ra,
|
|||||||
case VirtualMachine::POWEROFF:
|
case VirtualMachine::POWEROFF:
|
||||||
case VirtualMachine::STOPPED:
|
case VirtualMachine::STOPPED:
|
||||||
case VirtualMachine::UNDEPLOYED:
|
case VirtualMachine::UNDEPLOYED:
|
||||||
lcm->trigger(LCMAction::SHUTDOWN, vid, ra);
|
lcm->trigger_shutdown(vid, false, ra);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -427,14 +428,7 @@ int DispatchManager::terminate(int vid, bool hard, const RequestAttributes& ra,
|
|||||||
{
|
{
|
||||||
case VirtualMachine::RUNNING:
|
case VirtualMachine::RUNNING:
|
||||||
case VirtualMachine::UNKNOWN:
|
case VirtualMachine::UNKNOWN:
|
||||||
if (hard)
|
lcm->trigger_shutdown(vid, hard, ra);
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::CANCEL, vid, ra);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::SHUTDOWN, vid, ra);
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::BOOT_FAILURE:
|
case VirtualMachine::BOOT_FAILURE:
|
||||||
@ -451,7 +445,7 @@ int DispatchManager::terminate(int vid, bool hard, const RequestAttributes& ra,
|
|||||||
case VirtualMachine::PROLOG_RESUME_FAILURE:
|
case VirtualMachine::PROLOG_RESUME_FAILURE:
|
||||||
case VirtualMachine::PROLOG_UNDEPLOY_FAILURE:
|
case VirtualMachine::PROLOG_UNDEPLOY_FAILURE:
|
||||||
case VirtualMachine::PROLOG_MIGRATE_UNKNOWN_FAILURE:
|
case VirtualMachine::PROLOG_MIGRATE_UNKNOWN_FAILURE:
|
||||||
lcm->trigger(LCMAction::DELETE, vid, ra);
|
lcm->trigger_delete(vid, ra);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@ -497,11 +491,11 @@ int DispatchManager::undeploy(int vid, bool hard, const RequestAttributes& ra,
|
|||||||
{
|
{
|
||||||
if (hard)
|
if (hard)
|
||||||
{
|
{
|
||||||
lcm->trigger(LCMAction::UNDEPLOY_HARD, vid, ra);
|
lcm->trigger_undeploy_hard(vid, ra);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
lcm->trigger(LCMAction::UNDEPLOY, vid, ra);
|
lcm->trigger_undeploy(vid, ra);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -551,11 +545,11 @@ int DispatchManager::poweroff(int vid, bool hard, const RequestAttributes& ra,
|
|||||||
{
|
{
|
||||||
if (hard)
|
if (hard)
|
||||||
{
|
{
|
||||||
lcm->trigger(LCMAction::POWEROFF_HARD, vid, ra);
|
lcm->trigger_poweroff_hard(vid, ra);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
lcm->trigger(LCMAction::POWEROFF, vid, ra);
|
lcm->trigger_poweroff(vid, ra);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -720,7 +714,7 @@ int DispatchManager::stop(int vid, const RequestAttributes& ra,
|
|||||||
(vm->get_state() == VirtualMachine::ACTIVE &&
|
(vm->get_state() == VirtualMachine::ACTIVE &&
|
||||||
vm->get_lcm_state() == VirtualMachine::RUNNING ))
|
vm->get_lcm_state() == VirtualMachine::RUNNING ))
|
||||||
{
|
{
|
||||||
lcm->trigger(LCMAction::STOP, vid, ra);
|
lcm->trigger_stop(vid, ra);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -766,7 +760,7 @@ int DispatchManager::suspend(int vid, const RequestAttributes& ra,
|
|||||||
if (vm->get_state() == VirtualMachine::ACTIVE &&
|
if (vm->get_state() == VirtualMachine::ACTIVE &&
|
||||||
vm->get_lcm_state() == VirtualMachine::RUNNING )
|
vm->get_lcm_state() == VirtualMachine::RUNNING )
|
||||||
{
|
{
|
||||||
lcm->trigger(LCMAction::SUSPEND, vid, ra);
|
lcm->trigger_suspend(vid, ra);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -828,13 +822,13 @@ int DispatchManager::resume(int vid, const RequestAttributes& ra,
|
|||||||
}
|
}
|
||||||
else if (vm->get_state() == VirtualMachine::SUSPENDED)
|
else if (vm->get_state() == VirtualMachine::SUSPENDED)
|
||||||
{
|
{
|
||||||
lcm->trigger(LCMAction::RESTORE, vid, ra);
|
lcm->trigger_restore(vid, ra);
|
||||||
}
|
}
|
||||||
else if ( vm->get_state() == VirtualMachine::POWEROFF ||
|
else if ( vm->get_state() == VirtualMachine::POWEROFF ||
|
||||||
(vm->get_state() == VirtualMachine::ACTIVE &&
|
(vm->get_state() == VirtualMachine::ACTIVE &&
|
||||||
vm->get_lcm_state() == VirtualMachine::UNKNOWN))
|
vm->get_lcm_state() == VirtualMachine::UNKNOWN))
|
||||||
{
|
{
|
||||||
lcm->trigger(LCMAction::RESTART, vid, ra);
|
lcm->trigger_restart(vid, ra);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -893,11 +887,11 @@ int DispatchManager::reboot(int vid, bool hard, const RequestAttributes& ra,
|
|||||||
{
|
{
|
||||||
if (hard)
|
if (hard)
|
||||||
{
|
{
|
||||||
vmm->trigger(VMMAction::RESET, vid);
|
vmm->trigger_reset(vid);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
vmm->trigger(VMMAction::REBOOT, vid);
|
vmm->trigger_reboot(vid);
|
||||||
}
|
}
|
||||||
|
|
||||||
vm->set_resched(false); //Rebooting cancels re-scheduling actions
|
vm->set_resched(false); //Rebooting cancels re-scheduling actions
|
||||||
@ -1016,11 +1010,11 @@ int DispatchManager::recover(VirtualMachine * vm, bool success,
|
|||||||
case VirtualMachine::CLONING_FAILURE:
|
case VirtualMachine::CLONING_FAILURE:
|
||||||
if (success)
|
if (success)
|
||||||
{
|
{
|
||||||
lcm->trigger(LCMAction::DISK_LOCK_SUCCESS, vid, ra);
|
lcm->trigger_disk_lock_success(vid);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
lcm->trigger(LCMAction::DISK_LOCK_FAILURE, vid, ra);
|
lcm->trigger_disk_lock_failure(vid);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -1126,11 +1120,11 @@ int DispatchManager::delete_vm(VirtualMachine * vm, const RequestAttributes& ra,
|
|||||||
|
|
||||||
if (is_public_host)
|
if (is_public_host)
|
||||||
{
|
{
|
||||||
vmm->trigger(VMMAction::CLEANUP, vid);
|
vmm->trigger_cleanup(vid, false);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
tm->trigger(TMAction::EPILOG_DELETE, vid);
|
tm->trigger_epilog_delete(vm);
|
||||||
}
|
}
|
||||||
|
|
||||||
free_vm_resources(vm, true);
|
free_vm_resources(vm, true);
|
||||||
@ -1140,11 +1134,11 @@ int DispatchManager::delete_vm(VirtualMachine * vm, const RequestAttributes& ra,
|
|||||||
case VirtualMachine::UNDEPLOYED:
|
case VirtualMachine::UNDEPLOYED:
|
||||||
if (is_public_host)
|
if (is_public_host)
|
||||||
{
|
{
|
||||||
vmm->trigger(VMMAction::CLEANUP, vid);
|
vmm->trigger_cleanup(vid, false);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
tm->trigger(TMAction::EPILOG_DELETE, vid);
|
tm->trigger_epilog_delete(vm);
|
||||||
}
|
}
|
||||||
|
|
||||||
free_vm_resources(vm, true);
|
free_vm_resources(vm, true);
|
||||||
@ -1159,7 +1153,7 @@ int DispatchManager::delete_vm(VirtualMachine * vm, const RequestAttributes& ra,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::ACTIVE:
|
case VirtualMachine::ACTIVE:
|
||||||
lcm->trigger(LCMAction::DELETE, vid, ra);
|
lcm->trigger_delete(vid, ra);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -1174,6 +1168,23 @@ int DispatchManager::delete_vm(VirtualMachine * vm, const RequestAttributes& ra,
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
int DispatchManager::delete_vm(int vid, const RequestAttributes& ra,
|
||||||
|
std::string& error_str)
|
||||||
|
{
|
||||||
|
VirtualMachine * vm = vmpool->get(vid);
|
||||||
|
|
||||||
|
if ( vm == nullptr )
|
||||||
|
{
|
||||||
|
error_str = "Virtual machine does not exist";
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return delete_vm(vm, ra, error_str);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* -------------------------------------------------------------------------- */
|
||||||
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
int DispatchManager::delete_recreate(VirtualMachine * vm,
|
int DispatchManager::delete_recreate(VirtualMachine * vm,
|
||||||
const RequestAttributes& ra, string& error)
|
const RequestAttributes& ra, string& error)
|
||||||
{
|
{
|
||||||
@ -1242,7 +1253,7 @@ int DispatchManager::delete_recreate(VirtualMachine * vm,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::ACTIVE: //Cleanup VM resources before PENDING
|
case VirtualMachine::ACTIVE: //Cleanup VM resources before PENDING
|
||||||
lcm->trigger(LCMAction::DELETE_RECREATE, vm->get_oid(), ra);
|
lcm->trigger_delete_recreate(vm->get_oid(), ra);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::DONE:
|
case VirtualMachine::DONE:
|
||||||
@ -1411,11 +1422,11 @@ int DispatchManager::attach(int vid, VirtualMachineTemplate * tmpl,
|
|||||||
|
|
||||||
if ( vm->get_lcm_state() == VirtualMachine::HOTPLUG )
|
if ( vm->get_lcm_state() == VirtualMachine::HOTPLUG )
|
||||||
{
|
{
|
||||||
vmm->trigger(VMMAction::ATTACH, vid);
|
vmm->trigger_attach(vid);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
tm->trigger(TMAction::PROLOG_ATTACH, vid);
|
tm->trigger_prolog_attach(vm);
|
||||||
}
|
}
|
||||||
|
|
||||||
vmpool->update(vm);
|
vmpool->update(vm);
|
||||||
@ -1479,14 +1490,14 @@ int DispatchManager::detach(int vid, int disk_id, const RequestAttributes& ra,
|
|||||||
{
|
{
|
||||||
vm->set_state(VirtualMachine::HOTPLUG);
|
vm->set_state(VirtualMachine::HOTPLUG);
|
||||||
|
|
||||||
vmm->trigger(VMMAction::DETACH, vid);
|
vmm->trigger_detach(vid);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
vm->set_state(VirtualMachine::ACTIVE);
|
vm->set_state(VirtualMachine::ACTIVE);
|
||||||
vm->set_state(VirtualMachine::HOTPLUG_EPILOG_POWEROFF);
|
vm->set_state(VirtualMachine::HOTPLUG_EPILOG_POWEROFF);
|
||||||
|
|
||||||
tm->trigger(TMAction::EPILOG_DETACH, vid);
|
tm->trigger_epilog_detach(vm);
|
||||||
}
|
}
|
||||||
|
|
||||||
vmpool->update(vm);
|
vmpool->update(vm);
|
||||||
@ -1540,7 +1551,7 @@ int DispatchManager::snapshot_create(int vid, string& name, int& snap_id,
|
|||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
|
||||||
vmm->trigger(VMMAction::SNAPSHOT_CREATE, vid);
|
vmm->trigger_snapshot_create(vid);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1603,7 +1614,7 @@ int DispatchManager::snapshot_revert(int vid, int snap_id,
|
|||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
|
||||||
vmm->trigger(VMMAction::SNAPSHOT_REVERT, vid);
|
vmm->trigger_snapshot_revert(vid);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1675,7 +1686,7 @@ int DispatchManager::snapshot_delete(int vid, int snap_id,
|
|||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
|
||||||
vmm->trigger(VMMAction::SNAPSHOT_DELETE, vid);
|
vmm->trigger_snapshot_delete(vid);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1781,7 +1792,7 @@ int DispatchManager::attach_nic(int vid, VirtualMachineTemplate* tmpl,
|
|||||||
|
|
||||||
if (vm->get_state() == VirtualMachine::ACTIVE)
|
if (vm->get_state() == VirtualMachine::ACTIVE)
|
||||||
{
|
{
|
||||||
vmm->trigger(VMMAction::ATTACH_NIC, vid);
|
vmm->trigger_attach_nic(vid);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -1890,7 +1901,7 @@ int DispatchManager::detach_nic(int vid, int nic_id, const RequestAttributes& ra
|
|||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
|
||||||
vmm->trigger(VMMAction::DETACH_NIC, vid);
|
vmm->trigger_detach_nic(vid);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -1984,12 +1995,12 @@ int DispatchManager::disk_snapshot_create(int vid, int did, const string& name,
|
|||||||
{
|
{
|
||||||
case VirtualMachine::POWEROFF:
|
case VirtualMachine::POWEROFF:
|
||||||
case VirtualMachine::SUSPENDED:
|
case VirtualMachine::SUSPENDED:
|
||||||
tm->trigger(TMAction::SNAPSHOT_CREATE, vid);
|
tm->trigger_snapshot_create(vid);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::ACTIVE:
|
case VirtualMachine::ACTIVE:
|
||||||
|
|
||||||
vmm->trigger(VMMAction::DISK_SNAPSHOT_CREATE, vid);
|
vmm->trigger_disk_snapshot_create(vid);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default: break;
|
default: break;
|
||||||
@ -2080,7 +2091,7 @@ int DispatchManager::disk_snapshot_revert(int vid, int did, int snap_id,
|
|||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
|
||||||
tm->trigger(TMAction::SNAPSHOT_REVERT, vid);
|
tm->trigger_snapshot_revert(vid);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -2171,7 +2182,7 @@ int DispatchManager::disk_snapshot_delete(int vid, int did, int snap_id,
|
|||||||
|
|
||||||
close_cp_history(vmpool, vm, VMActions::DISK_SNAPSHOT_DELETE_ACTION, ra);
|
close_cp_history(vmpool, vm, VMActions::DISK_SNAPSHOT_DELETE_ACTION, ra);
|
||||||
|
|
||||||
tm->trigger(TMAction::SNAPSHOT_DELETE, vid);
|
tm->trigger_snapshot_delete(vid);
|
||||||
|
|
||||||
vmpool->update(vm);
|
vmpool->update(vm);
|
||||||
|
|
||||||
@ -2254,11 +2265,11 @@ int DispatchManager::disk_resize(int vid, int did, long long new_size,
|
|||||||
{
|
{
|
||||||
case VirtualMachine::POWEROFF:
|
case VirtualMachine::POWEROFF:
|
||||||
case VirtualMachine::UNDEPLOYED:
|
case VirtualMachine::UNDEPLOYED:
|
||||||
tm->trigger(TMAction::RESIZE, vid);
|
tm->trigger_resize(vid);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::ACTIVE:
|
case VirtualMachine::ACTIVE:
|
||||||
vmm->trigger(VMMAction::DISK_RESIZE, vid);
|
vmm->trigger_disk_resize(vid);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default: break;
|
default: break;
|
||||||
@ -2303,7 +2314,7 @@ int DispatchManager::live_updateconf(int vid, const RequestAttributes& ra, strin
|
|||||||
vm->set_resched(false);
|
vm->set_resched(false);
|
||||||
|
|
||||||
// Trigger UPDATE CONF action
|
// Trigger UPDATE CONF action
|
||||||
vmm->trigger(VMMAction::UPDATE_CONF, vid);
|
vmm->trigger_update_conf(vid);
|
||||||
|
|
||||||
vmpool->update(vm);
|
vmpool->update(vm);
|
||||||
vmpool->update_search(vm);
|
vmpool->update_search(vm);
|
||||||
|
@ -18,21 +18,22 @@
|
|||||||
#include "NebulaLog.h"
|
#include "NebulaLog.h"
|
||||||
#include "Quotas.h"
|
#include "Quotas.h"
|
||||||
#include "Nebula.h"
|
#include "Nebula.h"
|
||||||
|
#include "VirtualMachinePool.h"
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
|
|
||||||
void DispatchManager::suspend_success_action(int vid)
|
void DispatchManager::trigger_suspend_success(int vid)
|
||||||
{
|
{
|
||||||
VirtualMachine * vm;
|
trigger([this, vid] {
|
||||||
VirtualMachineTemplate quota_tmpl;
|
VirtualMachineTemplate quota_tmpl;
|
||||||
string error_str;
|
string error_str;
|
||||||
|
|
||||||
int uid, gid;
|
int uid, gid;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
VirtualMachine * vm = vmpool->get(vid);
|
||||||
|
|
||||||
if ( vm == 0 )
|
if (vm == nullptr)
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -71,24 +72,23 @@ void DispatchManager::suspend_success_action(int vid)
|
|||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
|
||||||
Quotas::vm_del(uid, gid, "a_tmpl);
|
Quotas::vm_del(uid, gid, "a_tmpl);
|
||||||
|
});
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void DispatchManager::stop_success_action(int vid)
|
void DispatchManager::trigger_stop_success(int vid)
|
||||||
{
|
{
|
||||||
VirtualMachine * vm;
|
trigger([this, vid] {
|
||||||
VirtualMachineTemplate quota_tmpl;
|
VirtualMachineTemplate quota_tmpl;
|
||||||
string error_str;
|
string error_str;
|
||||||
|
|
||||||
int uid, gid;
|
int uid, gid;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
VirtualMachine * vm = vmpool->get(vid);
|
||||||
|
|
||||||
if ( vm == 0 )
|
if (vm == nullptr)
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -130,24 +130,23 @@ void DispatchManager::stop_success_action(int vid)
|
|||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
|
||||||
Quotas::vm_del(uid, gid, "a_tmpl);
|
Quotas::vm_del(uid, gid, "a_tmpl);
|
||||||
|
});
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void DispatchManager::undeploy_success_action(int vid)
|
void DispatchManager::trigger_undeploy_success(int vid)
|
||||||
{
|
{
|
||||||
VirtualMachine * vm;
|
trigger([this, vid] {
|
||||||
VirtualMachineTemplate quota_tmpl;
|
VirtualMachineTemplate quota_tmpl;
|
||||||
string error_str;
|
string error_str;
|
||||||
|
|
||||||
int uid, gid;
|
int uid, gid;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
VirtualMachine * vm = vmpool->get(vid);
|
||||||
|
|
||||||
if ( vm == 0 )
|
if (vm == nullptr)
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -191,25 +190,23 @@ void DispatchManager::undeploy_success_action(int vid)
|
|||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
|
||||||
Quotas::vm_del(uid, gid, "a_tmpl);
|
Quotas::vm_del(uid, gid, "a_tmpl);
|
||||||
|
});
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void DispatchManager::poweroff_success_action(int vid)
|
void DispatchManager::trigger_poweroff_success(int vid)
|
||||||
{
|
{
|
||||||
VirtualMachine * vm;
|
trigger([this, vid] {
|
||||||
VirtualMachineTemplate quota_tmpl;
|
VirtualMachineTemplate quota_tmpl;
|
||||||
string error_str;
|
string error_str;
|
||||||
|
|
||||||
int uid, gid;
|
int uid, gid;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
VirtualMachine * vm = vmpool->get(vid);
|
||||||
|
|
||||||
if ( vm == 0 )
|
if (vm == nullptr)
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -259,24 +256,23 @@ void DispatchManager::poweroff_success_action(int vid)
|
|||||||
{
|
{
|
||||||
Quotas::vm_del(uid, gid, "a_tmpl);
|
Quotas::vm_del(uid, gid, "a_tmpl);
|
||||||
}
|
}
|
||||||
|
});
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void DispatchManager::done_action(int vid)
|
void DispatchManager::trigger_done(int vid)
|
||||||
{
|
{
|
||||||
VirtualMachine * vm;
|
trigger([this, vid] {
|
||||||
string error_str;
|
string error_str;
|
||||||
|
|
||||||
VirtualMachine::LcmState lcm_state;
|
VirtualMachine::LcmState lcm_state;
|
||||||
VirtualMachine::VmState dm_state;
|
VirtualMachine::VmState dm_state;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
VirtualMachine * vm = vmpool->get(vid);
|
||||||
|
|
||||||
if ( vm == 0 )
|
if (vm == nullptr)
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -299,20 +295,18 @@ void DispatchManager::done_action(int vid)
|
|||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
}
|
}
|
||||||
|
});
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void DispatchManager::resubmit_action(int vid)
|
void DispatchManager::trigger_resubmit(int vid)
|
||||||
{
|
{
|
||||||
VirtualMachine * vm;
|
trigger([this, vid] {
|
||||||
|
VirtualMachine * vm = vmpool->get(vid);
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
if (vm == nullptr)
|
||||||
|
|
||||||
if ( vm == 0 )
|
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -331,6 +325,7 @@ void DispatchManager::resubmit_action(int vid)
|
|||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
}
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include "Group.h"
|
#include "Group.h"
|
||||||
#include "Nebula.h"
|
#include "Nebula.h"
|
||||||
#include "AclManager.h"
|
#include "AclManager.h"
|
||||||
|
#include "AclRule.h"
|
||||||
#include "OneDB.h"
|
#include "OneDB.h"
|
||||||
|
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
|
@ -262,7 +262,7 @@ int HookLog::retry(int hkid, int exeid, std::string& err_msg)
|
|||||||
|
|
||||||
string message = HookManager::format_message(args64, host, hkid);
|
string message = HookManager::format_message(args64, host, hkid);
|
||||||
|
|
||||||
hm->trigger(HMAction::RETRY, message);
|
hm->trigger_retry(message);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -24,33 +24,8 @@ const char * HookManager::hook_driver_name = "hook_exe";
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
extern "C" void * hm_action_loop(void *arg)
|
|
||||||
{
|
|
||||||
HookManager * hm;
|
|
||||||
|
|
||||||
if ( arg == nullptr )
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
NebulaLog::log("HKM",Log::INFO,"Hook Manager started.");
|
|
||||||
|
|
||||||
hm = static_cast<HookManager *>(arg);
|
|
||||||
|
|
||||||
hm->am.loop();
|
|
||||||
|
|
||||||
NebulaLog::log("HKM",Log::INFO,"Hook Manager stopped.");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
int HookManager::start()
|
int HookManager::start()
|
||||||
{
|
{
|
||||||
pthread_attr_t pattr;
|
|
||||||
|
|
||||||
using namespace std::placeholders; // for _1
|
using namespace std::placeholders; // for _1
|
||||||
|
|
||||||
register_action(HookManagerMessages::UNDEFINED,
|
register_action(HookManagerMessages::UNDEFINED,
|
||||||
@ -74,12 +49,9 @@ int HookManager::start()
|
|||||||
|
|
||||||
NebulaLog::log("HKM",Log::INFO,"Starting Hook Manager...");
|
NebulaLog::log("HKM",Log::INFO,"Starting Hook Manager...");
|
||||||
|
|
||||||
pthread_attr_init(&pattr);
|
Listener::start();
|
||||||
pthread_attr_setdetachstate(&pattr, PTHREAD_CREATE_JOINABLE);
|
|
||||||
|
|
||||||
int rc = pthread_create(&hm_thread,&pattr,hm_action_loop,(void *) this);
|
return 0;
|
||||||
|
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
@ -120,27 +92,9 @@ int HookManager::load_drivers(const std::vector<const VectorAttribute*>& _mads)
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void HookManager::user_action(const ActionRequest& ar)
|
void HookManager::trigger_send_event(const std::string& message)
|
||||||
{
|
|
||||||
const HMAction& hm_ar = static_cast<const HMAction& >(ar);
|
|
||||||
const std::string& message = hm_ar.message();
|
|
||||||
|
|
||||||
switch (hm_ar.action())
|
|
||||||
{
|
|
||||||
case HMAction::SEND_EVENT:
|
|
||||||
send_event_action(message);
|
|
||||||
break;
|
|
||||||
case HMAction::RETRY:
|
|
||||||
retry_action(message);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
void HookManager::send_event_action(const std::string& message)
|
|
||||||
{
|
{
|
||||||
|
trigger([this, message] {
|
||||||
auto hmd = get();
|
auto hmd = get();
|
||||||
|
|
||||||
if ( hmd == nullptr )
|
if ( hmd == nullptr )
|
||||||
@ -150,13 +104,15 @@ void HookManager::send_event_action(const std::string& message)
|
|||||||
|
|
||||||
hook_msg_t msg(HookManagerMessages::EXECUTE, "", -1, message);
|
hook_msg_t msg(HookManagerMessages::EXECUTE, "", -1, message);
|
||||||
hmd->write(msg);
|
hmd->write(msg);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void HookManager::retry_action(const std::string& message)
|
void HookManager::trigger_retry(const std::string& message)
|
||||||
{
|
{
|
||||||
|
trigger([this, message] {
|
||||||
auto hmd = get();
|
auto hmd = get();
|
||||||
|
|
||||||
if ( hmd == nullptr )
|
if ( hmd == nullptr )
|
||||||
@ -166,6 +122,7 @@ void HookManager::retry_action(const std::string& message)
|
|||||||
|
|
||||||
hook_msg_t msg(HookManagerMessages::RETRY, "", -1, message);
|
hook_msg_t msg(HookManagerMessages::RETRY, "", -1, message);
|
||||||
hmd->write(msg);
|
hmd->write(msg);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include "HookStateVM.h"
|
#include "HookStateVM.h"
|
||||||
#include "VirtualMachine.h"
|
#include "VirtualMachine.h"
|
||||||
#include "NebulaUtil.h"
|
#include "NebulaUtil.h"
|
||||||
|
#include "SSLUtil.h"
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ int HostPool::allocate (
|
|||||||
{
|
{
|
||||||
std::string event = HookStateHost::format_message(host);
|
std::string event = HookStateHost::format_message(host);
|
||||||
|
|
||||||
Nebula::instance().get_hm()->trigger(HMAction::SEND_EVENT, event);
|
Nebula::instance().get_hm()->trigger_send_event(event);
|
||||||
|
|
||||||
auto *im = Nebula::instance().get_im();
|
auto *im = Nebula::instance().get_im();
|
||||||
im->update_host(host);
|
im->update_host(host);
|
||||||
@ -148,7 +148,7 @@ int HostPool::update(PoolObjectSQL * objsql)
|
|||||||
{
|
{
|
||||||
std::string event = HookStateHost::format_message(host);
|
std::string event = HookStateHost::format_message(host);
|
||||||
|
|
||||||
Nebula::instance().get_hm()->trigger(HMAction::SEND_EVENT, event);
|
Nebula::instance().get_hm()->trigger_send_event(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
host->set_prev_state();
|
host->set_prev_state();
|
||||||
|
@ -31,6 +31,8 @@ int InformationManager::start()
|
|||||||
|
|
||||||
using namespace std::placeholders; // for _1
|
using namespace std::placeholders; // for _1
|
||||||
|
|
||||||
|
NebulaLog::info("InM", "Starting Information Manager...");
|
||||||
|
|
||||||
register_action(InformationManagerMessages::UNDEFINED,
|
register_action(InformationManagerMessages::UNDEFINED,
|
||||||
&InformationManager::_undefined);
|
&InformationManager::_undefined);
|
||||||
|
|
||||||
@ -51,16 +53,6 @@ int InformationManager::start()
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
NebulaLog::info("InM", "Starting Information Manager...");
|
|
||||||
|
|
||||||
im_thread = std::thread([&] {
|
|
||||||
NebulaLog::info("InM", "Information Manager started.");
|
|
||||||
|
|
||||||
am.loop();
|
|
||||||
|
|
||||||
NebulaLog::info("InM", "Information Manager stopped.");
|
|
||||||
});
|
|
||||||
|
|
||||||
auto rftm = Nebula::instance().get_raftm();
|
auto rftm = Nebula::instance().get_raftm();
|
||||||
raft_status(rftm->get_state());
|
raft_status(rftm->get_state());
|
||||||
|
|
||||||
@ -265,7 +257,7 @@ void InformationManager::_host_state(unique_ptr<im_msg_t> msg)
|
|||||||
|
|
||||||
for (const auto& vmid : host->get_vm_ids())
|
for (const auto& vmid : host->get_vm_ids())
|
||||||
{
|
{
|
||||||
lcm->trigger(LCMAction::MONITOR_DONE, vmid);
|
lcm->trigger_monitor_done(vmid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -334,10 +326,12 @@ void InformationManager::_host_system(unique_ptr<im_msg_t> msg)
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
static LCMAction::Actions test_and_trigger(const string& state_str,
|
static void test_and_trigger(const string& state_str, VirtualMachine * vm)
|
||||||
VirtualMachine::VmState state, VirtualMachine::LcmState lcm_state,
|
|
||||||
string& vm_message)
|
|
||||||
{
|
{
|
||||||
|
auto state = vm->get_state();
|
||||||
|
auto lcm_state = vm->get_lcm_state();
|
||||||
|
auto lcm = Nebula::instance().get_lcm();
|
||||||
|
|
||||||
if (state_str == "RUNNING")
|
if (state_str == "RUNNING")
|
||||||
{
|
{
|
||||||
if (state == VirtualMachine::POWEROFF ||
|
if (state == VirtualMachine::POWEROFF ||
|
||||||
@ -356,7 +350,8 @@ static LCMAction::Actions test_and_trigger(const string& state_str,
|
|||||||
lcm_state == VirtualMachine::BOOT_UNDEPLOY_FAILURE ||
|
lcm_state == VirtualMachine::BOOT_UNDEPLOY_FAILURE ||
|
||||||
lcm_state == VirtualMachine::BOOT_FAILURE)))
|
lcm_state == VirtualMachine::BOOT_FAILURE)))
|
||||||
{
|
{
|
||||||
return LCMAction::MONITOR_POWERON;
|
lcm->trigger_monitor_poweron(vm->get_oid());
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (state_str == "FAILURE")
|
else if (state_str == "FAILURE")
|
||||||
@ -365,9 +360,12 @@ static LCMAction::Actions test_and_trigger(const string& state_str,
|
|||||||
(lcm_state == VirtualMachine::RUNNING ||
|
(lcm_state == VirtualMachine::RUNNING ||
|
||||||
lcm_state == VirtualMachine::UNKNOWN))
|
lcm_state == VirtualMachine::UNKNOWN))
|
||||||
{
|
{
|
||||||
vm_message = "VM running but monitor state is ERROR.";
|
lcm->trigger_monitor_done(vm->get_oid());
|
||||||
|
|
||||||
return LCMAction::MONITOR_DONE;
|
vm->log("VMM", Log::INFO,
|
||||||
|
"VM running but monitor state is ERROR.");
|
||||||
|
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (state_str == "SUSPENDED")
|
else if (state_str == "SUSPENDED")
|
||||||
@ -376,9 +374,12 @@ static LCMAction::Actions test_and_trigger(const string& state_str,
|
|||||||
(lcm_state == VirtualMachine::RUNNING ||
|
(lcm_state == VirtualMachine::RUNNING ||
|
||||||
lcm_state == VirtualMachine::UNKNOWN))
|
lcm_state == VirtualMachine::UNKNOWN))
|
||||||
{
|
{
|
||||||
vm_message = "VM running but monitor state is PAUSED.";
|
lcm->trigger_monitor_suspend(vm->get_oid());
|
||||||
|
|
||||||
return LCMAction::MONITOR_SUSPEND;
|
vm->log("VMM", Log::INFO,
|
||||||
|
"VM running but monitor state is PAUSED.");
|
||||||
|
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (state_str == "POWEROFF")
|
else if (state_str == "POWEROFF")
|
||||||
@ -390,11 +391,10 @@ static LCMAction::Actions test_and_trigger(const string& state_str,
|
|||||||
lcm_state == VirtualMachine::SHUTDOWN_POWEROFF ||
|
lcm_state == VirtualMachine::SHUTDOWN_POWEROFF ||
|
||||||
lcm_state == VirtualMachine::SHUTDOWN_UNDEPLOY))
|
lcm_state == VirtualMachine::SHUTDOWN_UNDEPLOY))
|
||||||
{
|
{
|
||||||
return LCMAction::MONITOR_POWEROFF;
|
lcm->trigger_monitor_poweroff(vm->get_oid());
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return LCMAction::NONE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -424,7 +424,6 @@ void InformationManager::_vm_state(unique_ptr<im_msg_t> msg)
|
|||||||
|
|
||||||
string deploy_id;
|
string deploy_id;
|
||||||
string state_str;
|
string state_str;
|
||||||
string vm_msg;
|
|
||||||
|
|
||||||
vector<VectorAttribute*> vms;
|
vector<VectorAttribute*> vms;
|
||||||
tmpl.get("VM", vms);
|
tmpl.get("VM", vms);
|
||||||
@ -483,18 +482,7 @@ void InformationManager::_vm_state(unique_ptr<im_msg_t> msg)
|
|||||||
/* ------------------------------------------------------------------ */
|
/* ------------------------------------------------------------------ */
|
||||||
/* Apply state changes */
|
/* Apply state changes */
|
||||||
/* ------------------------------------------------------------------ */
|
/* ------------------------------------------------------------------ */
|
||||||
LCMAction::Actions action = test_and_trigger(state_str, vm->get_state(),
|
test_and_trigger(state_str, vm);
|
||||||
vm->get_lcm_state(), vm_msg);
|
|
||||||
|
|
||||||
if ( action != LCMAction::NONE )
|
|
||||||
{
|
|
||||||
lcm->trigger(action, vm->get_oid());
|
|
||||||
|
|
||||||
if ( !vm_msg.empty() )
|
|
||||||
{
|
|
||||||
vm->log("VMM", Log::INFO, vm_msg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
}
|
}
|
||||||
@ -562,26 +550,18 @@ void InformationManager::_vm_state(unique_ptr<im_msg_t> msg)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
LCMAction::Actions action;
|
|
||||||
|
|
||||||
if ( missing_state == "POWEROFF" )
|
|
||||||
{
|
|
||||||
action = LCMAction::MONITOR_POWEROFF;
|
|
||||||
}
|
|
||||||
else if ( missing_state == "UNKNOWN" )
|
|
||||||
{
|
|
||||||
action = LCMAction::MONITOR_DONE;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
action = LCMAction::MONITOR_POWEROFF;
|
|
||||||
}
|
|
||||||
|
|
||||||
NebulaLog::debug("InM", "VM_STATE update from host: " +
|
NebulaLog::debug("InM", "VM_STATE update from host: " +
|
||||||
to_string(msg->oid()) + ". VM id: " + to_string(vm->get_oid()) +
|
to_string(msg->oid()) + ". VM id: " + to_string(vm->get_oid()) +
|
||||||
", state: " + missing_state);
|
", state: " + missing_state);
|
||||||
|
|
||||||
lcm->trigger(action, vm->get_oid());
|
if (missing_state == "UNKNOWN")
|
||||||
|
{
|
||||||
|
lcm->trigger_monitor_done(vm->get_oid());
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
lcm->trigger_monitor_poweroff(vm->get_oid());
|
||||||
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
}
|
}
|
||||||
|
@ -848,7 +848,7 @@ void Image::set_state(ImageState _state)
|
|||||||
|
|
||||||
for (set<int>::iterator i = vms.begin(); i != vms.end(); i++)
|
for (set<int>::iterator i = vms.begin(); i != vms.end(); i++)
|
||||||
{
|
{
|
||||||
lcm->trigger(LCMAction::DISK_LOCK_FAILURE, *i);
|
lcm->trigger_disk_lock_failure(*i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (state == LOCKED)
|
else if (state == LOCKED)
|
||||||
@ -918,7 +918,7 @@ void Image::set_state_unlock()
|
|||||||
|
|
||||||
for (set<int>::iterator i = vms.begin(); i != vms.end(); i++)
|
for (set<int>::iterator i = vms.begin(); i != vms.end(); i++)
|
||||||
{
|
{
|
||||||
lcm->trigger(LCMAction::DISK_LOCK_SUCCESS, *i);
|
lcm->trigger_disk_lock_success(*i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -27,29 +27,6 @@ const char * ImageManager::image_driver_name = "image_exe";
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
extern "C" void * image_action_loop(void *arg)
|
|
||||||
{
|
|
||||||
ImageManager * im;
|
|
||||||
|
|
||||||
if ( arg == 0 )
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
NebulaLog::log("ImM",Log::INFO,"Image Manager started.");
|
|
||||||
|
|
||||||
im = static_cast<ImageManager *>(arg);
|
|
||||||
|
|
||||||
im->am.loop(im->timer_period);
|
|
||||||
|
|
||||||
NebulaLog::log("ImM",Log::INFO,"Image Manager stopped.");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
int ImageManager::load_drivers(const std::vector<const VectorAttribute*>& _mads)
|
int ImageManager::load_drivers(const std::vector<const VectorAttribute*>& _mads)
|
||||||
{
|
{
|
||||||
const VectorAttribute * vattr = 0;
|
const VectorAttribute * vattr = 0;
|
||||||
@ -87,9 +64,6 @@ int ImageManager::load_drivers(const std::vector<const VectorAttribute*>& _mads)
|
|||||||
|
|
||||||
int ImageManager::start()
|
int ImageManager::start()
|
||||||
{
|
{
|
||||||
int rc;
|
|
||||||
pthread_attr_t pattr;
|
|
||||||
|
|
||||||
using namespace std::placeholders; // for _1
|
using namespace std::placeholders; // for _1
|
||||||
|
|
||||||
register_action(ImageManagerMessages::UNDEFINED,
|
register_action(ImageManagerMessages::UNDEFINED,
|
||||||
@ -135,18 +109,13 @@ int ImageManager::start()
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_attr_init(&pattr);
|
return 0;
|
||||||
pthread_attr_setdetachstate(&pattr, PTHREAD_CREATE_JOINABLE);
|
|
||||||
|
|
||||||
rc = pthread_create(&imagem_thread,&pattr,image_action_loop,(void *) this);
|
|
||||||
|
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void ImageManager::timer_action(const ActionRequest& ar)
|
void ImageManager::timer_action()
|
||||||
{
|
{
|
||||||
static int mark = 0;
|
static int mark = 0;
|
||||||
static int tics = monitor_period;
|
static int tics = monitor_period;
|
||||||
|
@ -314,7 +314,7 @@ void ImageManager::_mkfs(unique_ptr<image_msg_t> msg)
|
|||||||
goto error_save_state;
|
goto error_save_state;
|
||||||
}
|
}
|
||||||
|
|
||||||
tm->trigger(TMAction::SAVEAS_HOT, vm_id);
|
tm->trigger_saveas_hot(vm_id);
|
||||||
|
|
||||||
vmpool->update(vm);
|
vmpool->update(vm);
|
||||||
|
|
||||||
|
@ -24,35 +24,10 @@ using std::string;
|
|||||||
|
|
||||||
const char * IPAMManager::ipam_driver_name = "ipam_exe";
|
const char * IPAMManager::ipam_driver_name = "ipam_exe";
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
extern "C" void * ipamm_action_loop(void *arg)
|
|
||||||
{
|
|
||||||
IPAMManager * ipamm;
|
|
||||||
|
|
||||||
if ( arg == nullptr )
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
ipamm = static_cast<IPAMManager *>(arg);
|
|
||||||
|
|
||||||
NebulaLog::log("IPM",Log::INFO,"IPAM Manager started.");
|
|
||||||
|
|
||||||
ipamm->am.loop(ipamm->timer_period);
|
|
||||||
|
|
||||||
NebulaLog::log("IPM",Log::INFO,"IPAM Manager stopped.");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
int IPAMManager::start()
|
int IPAMManager::start()
|
||||||
{
|
{
|
||||||
pthread_attr_t pattr;
|
|
||||||
|
|
||||||
using namespace std::placeholders; // for _1
|
using namespace std::placeholders; // for _1
|
||||||
|
|
||||||
register_action(IPAMManagerMessages::UNDEFINED,
|
register_action(IPAMManagerMessages::UNDEFINED,
|
||||||
@ -85,110 +60,79 @@ int IPAMManager::start()
|
|||||||
|
|
||||||
NebulaLog::log("IPM",Log::INFO,"Starting IPAM Manager...");
|
NebulaLog::log("IPM",Log::INFO,"Starting IPAM Manager...");
|
||||||
|
|
||||||
pthread_attr_init(&pattr);
|
Listener::start();
|
||||||
pthread_attr_setdetachstate(&pattr, PTHREAD_CREATE_JOINABLE);
|
|
||||||
|
|
||||||
int rc = pthread_create(&ipamm_thread, &pattr, ipamm_action_loop, (void *)this);
|
return 0;
|
||||||
|
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void IPAMManager::user_action(const ActionRequest& ar)
|
void IPAMManager::send_request(IPAMManagerMessages type, IPAMRequest& ir)
|
||||||
{
|
|
||||||
const IPMAction& ipam_ar = static_cast<const IPMAction&>(ar);
|
|
||||||
|
|
||||||
IPAMRequest * request = ipam_ar.request();
|
|
||||||
|
|
||||||
if ( request == nullptr )
|
|
||||||
{
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch(ipam_ar.action())
|
|
||||||
{
|
|
||||||
case IPMAction::REGISTER_ADDRESS_RANGE:
|
|
||||||
register_address_range_action(request);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case IPMAction::UNREGISTER_ADDRESS_RANGE:
|
|
||||||
unregister_address_range_action(request);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case IPMAction::ALLOCATE_ADDRESS:
|
|
||||||
allocate_address_action(request);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case IPMAction::GET_ADDRESS:
|
|
||||||
get_address_action(request);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case IPMAction::FREE_ADDRESS:
|
|
||||||
free_address_action(request);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
void IPAMManager::send_request(IPAMManagerMessages type, IPAMRequest * ir)
|
|
||||||
{
|
{
|
||||||
auto ipammd = get();
|
auto ipammd = get();
|
||||||
|
|
||||||
if (ipammd == nullptr)
|
if (ipammd == nullptr)
|
||||||
{
|
{
|
||||||
ir->result = false;
|
ir.result = false;
|
||||||
ir->message = "Could not find the IPAM driver";
|
ir.message = "Could not find the IPAM driver";
|
||||||
|
|
||||||
ir->notify();
|
ir.notify();
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
add_request(ir);
|
add_request(&ir);
|
||||||
|
|
||||||
string action_data;
|
string action_data;
|
||||||
ipam_msg_t msg(type, "", ir->id, ir->to_xml64(action_data));
|
ipam_msg_t msg(type, "", ir.id, ir.to_xml64(action_data));
|
||||||
|
|
||||||
ipammd->write(msg);
|
ipammd->write(msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void IPAMManager::register_address_range_action(IPAMRequest * ir)
|
void IPAMManager::trigger_register_address_range(IPAMRequest& ir)
|
||||||
{
|
{
|
||||||
|
trigger([&] {
|
||||||
send_request(IPAMManagerMessages::REGISTER_ADDRESS_RANGE, ir);
|
send_request(IPAMManagerMessages::REGISTER_ADDRESS_RANGE, ir);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void IPAMManager::unregister_address_range_action(IPAMRequest * ir)
|
void IPAMManager::trigger_unregister_address_range(IPAMRequest& ir)
|
||||||
{
|
{
|
||||||
|
trigger([&] {
|
||||||
send_request(IPAMManagerMessages::UNREGISTER_ADDRESS_RANGE, ir);
|
send_request(IPAMManagerMessages::UNREGISTER_ADDRESS_RANGE, ir);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void IPAMManager::get_address_action(IPAMRequest * ir)
|
void IPAMManager::trigger_get_address(IPAMRequest& ir)
|
||||||
{
|
{
|
||||||
|
trigger([&] {
|
||||||
send_request(IPAMManagerMessages::GET_ADDRESS, ir);
|
send_request(IPAMManagerMessages::GET_ADDRESS, ir);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void IPAMManager::allocate_address_action(IPAMRequest * ir)
|
void IPAMManager::trigger_allocate_address(IPAMRequest& ir)
|
||||||
{
|
{
|
||||||
|
trigger([&] {
|
||||||
send_request(IPAMManagerMessages::ALLOCATE_ADDRESS, ir);
|
send_request(IPAMManagerMessages::ALLOCATE_ADDRESS, ir);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void IPAMManager::free_address_action(IPAMRequest * ir)
|
void IPAMManager::trigger_free_address(IPAMRequest& ir)
|
||||||
{
|
{
|
||||||
|
trigger([&] {
|
||||||
send_request(IPAMManagerMessages::FREE_ADDRESS, ir);
|
send_request(IPAMManagerMessages::FREE_ADDRESS, ir);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************** */
|
/* ************************************************************************** */
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -19,44 +19,15 @@
|
|||||||
#include "NebulaLog.h"
|
#include "NebulaLog.h"
|
||||||
#include "Request.h"
|
#include "Request.h"
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
extern "C" void * lcm_action_loop(void *arg)
|
|
||||||
{
|
|
||||||
LifeCycleManager * lcm;
|
|
||||||
|
|
||||||
if ( arg == 0 )
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
lcm = static_cast<LifeCycleManager *>(arg);
|
|
||||||
|
|
||||||
NebulaLog::log("LCM",Log::INFO,"Life-cycle Manager started.");
|
|
||||||
|
|
||||||
lcm->am.loop();
|
|
||||||
|
|
||||||
NebulaLog::log("LCM",Log::INFO,"Life-cycle Manager stopped.");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
int LifeCycleManager::start()
|
int LifeCycleManager::start()
|
||||||
{
|
{
|
||||||
int rc;
|
|
||||||
pthread_attr_t pattr;
|
|
||||||
|
|
||||||
pthread_attr_init(&pattr);
|
|
||||||
pthread_attr_setdetachstate(&pattr, PTHREAD_CREATE_JOINABLE);
|
|
||||||
|
|
||||||
NebulaLog::log("LCM",Log::INFO,"Starting Life-cycle Manager...");
|
NebulaLog::log("LCM",Log::INFO,"Starting Life-cycle Manager...");
|
||||||
|
|
||||||
rc = pthread_create(&lcm_thread,&pattr,lcm_action_loop,(void *) this);
|
Listener::start();
|
||||||
|
|
||||||
return rc;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
@ -80,222 +51,3 @@ void LifeCycleManager::init_managers()
|
|||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::trigger(LCMAction::Actions action, int vid,
|
|
||||||
const RequestAttributes& ra)
|
|
||||||
{
|
|
||||||
LCMAction lcm_ar(action, vid, ra.uid, ra.gid, ra.req_id);
|
|
||||||
|
|
||||||
am.trigger(lcm_ar);
|
|
||||||
}
|
|
||||||
|
|
||||||
void LifeCycleManager::trigger(LCMAction::Actions action, int vid)
|
|
||||||
{
|
|
||||||
LCMAction lcm_ar(action, vid, -1, -1, -1);
|
|
||||||
|
|
||||||
am.trigger(lcm_ar);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
void LifeCycleManager::user_action(const ActionRequest& ar)
|
|
||||||
{
|
|
||||||
const LCMAction& la = static_cast<const LCMAction& >(ar);
|
|
||||||
int vid = la.vm_id();
|
|
||||||
|
|
||||||
switch (la.action())
|
|
||||||
{
|
|
||||||
// -------------------------------------------------------------------------
|
|
||||||
// Internal Actions, triggered by OpenNebula components & drivers
|
|
||||||
// -------------------------------------------------------------------------
|
|
||||||
case LCMAction::SAVE_SUCCESS:
|
|
||||||
save_success_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::SAVE_FAILURE:
|
|
||||||
save_failure_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::DEPLOY_SUCCESS:
|
|
||||||
deploy_success_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::DEPLOY_FAILURE:
|
|
||||||
deploy_failure_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::SHUTDOWN_SUCCESS:
|
|
||||||
shutdown_success_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::SHUTDOWN_FAILURE:
|
|
||||||
shutdown_failure_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::CANCEL_SUCCESS:
|
|
||||||
shutdown_success_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::CANCEL_FAILURE:
|
|
||||||
shutdown_failure_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::MONITOR_SUSPEND:
|
|
||||||
monitor_suspend_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::MONITOR_DONE:
|
|
||||||
monitor_done_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::MONITOR_POWEROFF:
|
|
||||||
monitor_poweroff_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::MONITOR_POWERON:
|
|
||||||
monitor_poweron_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::PROLOG_SUCCESS:
|
|
||||||
prolog_success_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::PROLOG_FAILURE:
|
|
||||||
prolog_failure_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::EPILOG_SUCCESS:
|
|
||||||
epilog_success_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::EPILOG_FAILURE:
|
|
||||||
epilog_failure_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::ATTACH_SUCCESS:
|
|
||||||
attach_success_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::ATTACH_FAILURE:
|
|
||||||
attach_failure_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::DETACH_SUCCESS:
|
|
||||||
detach_success_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::DETACH_FAILURE:
|
|
||||||
detach_failure_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::SAVEAS_SUCCESS:
|
|
||||||
saveas_success_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::SAVEAS_FAILURE:
|
|
||||||
saveas_failure_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::ATTACH_NIC_SUCCESS:
|
|
||||||
attach_nic_success_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::ATTACH_NIC_FAILURE:
|
|
||||||
attach_nic_failure_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::DETACH_NIC_SUCCESS:
|
|
||||||
detach_nic_success_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::DETACH_NIC_FAILURE:
|
|
||||||
detach_nic_failure_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::CLEANUP_SUCCESS:
|
|
||||||
cleanup_callback_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::CLEANUP_FAILURE:
|
|
||||||
cleanup_callback_action(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::SNAPSHOT_CREATE_SUCCESS:
|
|
||||||
snapshot_create_success(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::SNAPSHOT_CREATE_FAILURE:
|
|
||||||
snapshot_create_failure(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::SNAPSHOT_REVERT_SUCCESS:
|
|
||||||
snapshot_revert_success(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::SNAPSHOT_REVERT_FAILURE:
|
|
||||||
snapshot_revert_failure(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::SNAPSHOT_DELETE_SUCCESS:
|
|
||||||
snapshot_delete_success(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::SNAPSHOT_DELETE_FAILURE:
|
|
||||||
snapshot_delete_failure(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::DISK_SNAPSHOT_SUCCESS:
|
|
||||||
disk_snapshot_success(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::DISK_SNAPSHOT_FAILURE:
|
|
||||||
disk_snapshot_failure(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::DISK_LOCK_SUCCESS:
|
|
||||||
disk_lock_success(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::DISK_LOCK_FAILURE:
|
|
||||||
disk_lock_failure(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::DISK_RESIZE_SUCCESS:
|
|
||||||
disk_resize_success(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::DISK_RESIZE_FAILURE:
|
|
||||||
disk_resize_failure(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::UPDATE_CONF_SUCCESS:
|
|
||||||
update_conf_success(vid);
|
|
||||||
break;
|
|
||||||
case LCMAction::UPDATE_CONF_FAILURE:
|
|
||||||
update_conf_failure(vid);
|
|
||||||
break;
|
|
||||||
// -------------------------------------------------------------------------
|
|
||||||
// External Actions, triggered by user requests
|
|
||||||
// -------------------------------------------------------------------------
|
|
||||||
case LCMAction::DEPLOY:
|
|
||||||
deploy_action(la);
|
|
||||||
break;
|
|
||||||
case LCMAction::SUSPEND:
|
|
||||||
suspend_action(la);
|
|
||||||
break;
|
|
||||||
case LCMAction::RESTORE:
|
|
||||||
restore_action(la);
|
|
||||||
break;
|
|
||||||
case LCMAction::STOP:
|
|
||||||
stop_action(la);
|
|
||||||
break;
|
|
||||||
case LCMAction::CANCEL:
|
|
||||||
shutdown_action(la, true);
|
|
||||||
break;
|
|
||||||
case LCMAction::MIGRATE:
|
|
||||||
migrate_action(la);
|
|
||||||
break;
|
|
||||||
case LCMAction::POFF_MIGRATE:
|
|
||||||
migrate_action(la);
|
|
||||||
break;
|
|
||||||
case LCMAction::POFF_HARD_MIGRATE:
|
|
||||||
migrate_action(la);
|
|
||||||
break;
|
|
||||||
case LCMAction::LIVE_MIGRATE:
|
|
||||||
live_migrate_action(la);
|
|
||||||
break;
|
|
||||||
case LCMAction::SHUTDOWN:
|
|
||||||
shutdown_action(la, false);
|
|
||||||
break;
|
|
||||||
case LCMAction::UNDEPLOY:
|
|
||||||
undeploy_action(la, false);
|
|
||||||
break;
|
|
||||||
case LCMAction::UNDEPLOY_HARD:
|
|
||||||
undeploy_action(la, true);
|
|
||||||
break;
|
|
||||||
case LCMAction::RESTART:
|
|
||||||
restart_action(la);
|
|
||||||
break;
|
|
||||||
case LCMAction::DELETE:
|
|
||||||
delete_action(la);
|
|
||||||
break;
|
|
||||||
case LCMAction::DELETE_RECREATE:
|
|
||||||
delete_recreate_action(la);
|
|
||||||
break;
|
|
||||||
case LCMAction::POWEROFF:
|
|
||||||
poweroff_action(la);
|
|
||||||
break;
|
|
||||||
case LCMAction::POWEROFF_HARD:
|
|
||||||
poweroff_hard_action(la);
|
|
||||||
break;
|
|
||||||
case LCMAction::UPDATESG:
|
|
||||||
updatesg_action(la);
|
|
||||||
break;
|
|
||||||
case LCMAction::NONE:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
#include "ClusterPool.h"
|
#include "ClusterPool.h"
|
||||||
#include "HostPool.h"
|
#include "HostPool.h"
|
||||||
#include "ImagePool.h"
|
#include "ImagePool.h"
|
||||||
|
#include "VirtualMachinePool.h"
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
@ -67,9 +68,12 @@ void LifeCycleManager::start_prolog_migrate(VirtualMachine* vm)
|
|||||||
|
|
||||||
//----------------------------------------------------
|
//----------------------------------------------------
|
||||||
|
|
||||||
tm->trigger(TMAction::PROLOG_MIGR,vm->get_oid());
|
tm->trigger_prolog_migr(vm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* -------------------------------------------------------------------------- */
|
||||||
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::revert_migrate_after_failure(VirtualMachine* vm)
|
void LifeCycleManager::revert_migrate_after_failure(VirtualMachine* vm)
|
||||||
{
|
{
|
||||||
HostShareCapacity sr;
|
HostShareCapacity sr;
|
||||||
@ -118,8 +122,9 @@ void LifeCycleManager::revert_migrate_after_failure(VirtualMachine* vm)
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::save_success_action(int vid)
|
void LifeCycleManager::trigger_save_success(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
ostringstream os;
|
ostringstream os;
|
||||||
|
|
||||||
@ -149,7 +154,7 @@ void LifeCycleManager::save_success_action(int vid)
|
|||||||
|
|
||||||
//----------------------------------------------------
|
//----------------------------------------------------
|
||||||
|
|
||||||
dm->trigger(DMAction::SUSPEND_SUCCESS,vid);
|
dm->trigger_suspend_success(vid);
|
||||||
}
|
}
|
||||||
else if ( vm->get_lcm_state() == VirtualMachine::SAVE_STOP)
|
else if ( vm->get_lcm_state() == VirtualMachine::SAVE_STOP)
|
||||||
{
|
{
|
||||||
@ -176,7 +181,7 @@ void LifeCycleManager::save_success_action(int vid)
|
|||||||
|
|
||||||
//----------------------------------------------------
|
//----------------------------------------------------
|
||||||
|
|
||||||
tm->trigger(TMAction::EPILOG_STOP,vid);
|
tm->trigger_epilog_stop(vm);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -184,13 +189,15 @@ void LifeCycleManager::save_success_action(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::save_failure_action(int vid)
|
void LifeCycleManager::trigger_save_failure(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
vm = vmpool->get(vid);
|
||||||
@ -228,13 +235,15 @@ void LifeCycleManager::save_failure_action(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::deploy_success_action(int vid)
|
void LifeCycleManager::trigger_deploy_success(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
vm = vmpool->get(vid);
|
||||||
@ -313,14 +322,15 @@ void LifeCycleManager::deploy_success_action(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::deploy_failure_action(int vid)
|
void LifeCycleManager::trigger_deploy_failure(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
vm = vmpool->get(vid);
|
||||||
@ -425,13 +435,15 @@ void LifeCycleManager::deploy_failure_action(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::shutdown_success_action(int vid)
|
void LifeCycleManager::trigger_shutdown_success(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
time_t the_time = time(0);
|
time_t the_time = time(0);
|
||||||
|
|
||||||
@ -464,7 +476,7 @@ void LifeCycleManager::shutdown_success_action(int vid)
|
|||||||
|
|
||||||
//----------------------------------------------------
|
//----------------------------------------------------
|
||||||
|
|
||||||
tm->trigger(TMAction::EPILOG,vid);
|
tm->trigger_epilog(false, vm);
|
||||||
}
|
}
|
||||||
else if (vm->get_lcm_state() == VirtualMachine::SHUTDOWN_POWEROFF)
|
else if (vm->get_lcm_state() == VirtualMachine::SHUTDOWN_POWEROFF)
|
||||||
{
|
{
|
||||||
@ -481,7 +493,7 @@ void LifeCycleManager::shutdown_success_action(int vid)
|
|||||||
|
|
||||||
//----------------------------------------------------
|
//----------------------------------------------------
|
||||||
|
|
||||||
dm->trigger(DMAction::POWEROFF_SUCCESS,vid);
|
dm->trigger_poweroff_success(vid);
|
||||||
}
|
}
|
||||||
else if (vm->get_lcm_state() == VirtualMachine::SHUTDOWN_UNDEPLOY)
|
else if (vm->get_lcm_state() == VirtualMachine::SHUTDOWN_UNDEPLOY)
|
||||||
{
|
{
|
||||||
@ -506,7 +518,7 @@ void LifeCycleManager::shutdown_success_action(int vid)
|
|||||||
|
|
||||||
//----------------------------------------------------
|
//----------------------------------------------------
|
||||||
|
|
||||||
tm->trigger(TMAction::EPILOG_STOP,vid);
|
tm->trigger_epilog_stop(vm);
|
||||||
}
|
}
|
||||||
else if (vm->get_lcm_state() == VirtualMachine::SAVE_MIGRATE)
|
else if (vm->get_lcm_state() == VirtualMachine::SAVE_MIGRATE)
|
||||||
{
|
{
|
||||||
@ -518,13 +530,15 @@ void LifeCycleManager::shutdown_success_action(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::shutdown_failure_action(int vid)
|
void LifeCycleManager::trigger_shutdown_failure(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
vm = vmpool->get(vid);
|
||||||
@ -563,19 +577,19 @@ void LifeCycleManager::shutdown_failure_action(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::prolog_success_action(int vid)
|
void LifeCycleManager::trigger_prolog_success(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
time_t the_time = time(0);
|
time_t the_time = time(0);
|
||||||
ostringstream os;
|
ostringstream os;
|
||||||
|
|
||||||
VMMAction::Actions action;
|
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
vm = vmpool->get(vid);
|
||||||
|
|
||||||
if ( vm == nullptr )
|
if ( vm == nullptr )
|
||||||
@ -604,13 +618,13 @@ void LifeCycleManager::prolog_success_action(int vid)
|
|||||||
{
|
{
|
||||||
case VirtualMachine::PROLOG_RESUME:
|
case VirtualMachine::PROLOG_RESUME:
|
||||||
case VirtualMachine::PROLOG_RESUME_FAILURE:
|
case VirtualMachine::PROLOG_RESUME_FAILURE:
|
||||||
action = VMMAction::RESTORE;
|
vmm->trigger_restore(vid);
|
||||||
vm->set_state(VirtualMachine::BOOT_STOPPED);
|
vm->set_state(VirtualMachine::BOOT_STOPPED);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::PROLOG_UNDEPLOY:
|
case VirtualMachine::PROLOG_UNDEPLOY:
|
||||||
case VirtualMachine::PROLOG_UNDEPLOY_FAILURE:
|
case VirtualMachine::PROLOG_UNDEPLOY_FAILURE:
|
||||||
action = VMMAction::DEPLOY;
|
vmm->trigger_deploy(vid);
|
||||||
vm->set_state(VirtualMachine::BOOT_UNDEPLOY);
|
vm->set_state(VirtualMachine::BOOT_UNDEPLOY);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -619,12 +633,12 @@ void LifeCycleManager::prolog_success_action(int vid)
|
|||||||
if (vm->get_action() == VMActions::POFF_MIGRATE_ACTION ||
|
if (vm->get_action() == VMActions::POFF_MIGRATE_ACTION ||
|
||||||
vm->get_action() == VMActions::POFF_HARD_MIGRATE_ACTION)
|
vm->get_action() == VMActions::POFF_HARD_MIGRATE_ACTION)
|
||||||
{
|
{
|
||||||
action = VMMAction::DEPLOY;
|
vmm->trigger_deploy(vid);
|
||||||
vm->set_state(VirtualMachine::BOOT);
|
vm->set_state(VirtualMachine::BOOT);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
action = VMMAction::RESTORE;
|
vmm->trigger_restore(vid);
|
||||||
vm->set_state(VirtualMachine::BOOT_MIGRATE);
|
vm->set_state(VirtualMachine::BOOT_MIGRATE);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@ -632,7 +646,7 @@ void LifeCycleManager::prolog_success_action(int vid)
|
|||||||
case VirtualMachine::PROLOG_MIGRATE_UNKNOWN_FAILURE:
|
case VirtualMachine::PROLOG_MIGRATE_UNKNOWN_FAILURE:
|
||||||
case VirtualMachine::PROLOG:
|
case VirtualMachine::PROLOG:
|
||||||
case VirtualMachine::PROLOG_FAILURE: //recover success
|
case VirtualMachine::PROLOG_FAILURE: //recover success
|
||||||
action = VMMAction::DEPLOY;
|
vmm->trigger_deploy(vid);
|
||||||
vm->set_state(VirtualMachine::BOOT);
|
vm->set_state(VirtualMachine::BOOT);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -647,8 +661,6 @@ void LifeCycleManager::prolog_success_action(int vid)
|
|||||||
vmpool->update_history(vm);
|
vmpool->update_history(vm);
|
||||||
|
|
||||||
vmpool->update(vm);
|
vmpool->update(vm);
|
||||||
|
|
||||||
vmm->trigger(action,vid);
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
//---------------------------------------------------------------------
|
//---------------------------------------------------------------------
|
||||||
@ -672,11 +684,11 @@ void LifeCycleManager::prolog_success_action(int vid)
|
|||||||
if (lcm_state == VirtualMachine::PROLOG_MIGRATE_POWEROFF||
|
if (lcm_state == VirtualMachine::PROLOG_MIGRATE_POWEROFF||
|
||||||
lcm_state == VirtualMachine::PROLOG_MIGRATE_POWEROFF_FAILURE)
|
lcm_state == VirtualMachine::PROLOG_MIGRATE_POWEROFF_FAILURE)
|
||||||
{
|
{
|
||||||
dm->trigger(DMAction::POWEROFF_SUCCESS,vid);
|
dm->trigger_poweroff_success(vid);
|
||||||
}
|
}
|
||||||
else //PROLOG_MIGRATE_SUSPEND, PROLOG_MIGRATE_SUSPEND_FAILURE
|
else //PROLOG_MIGRATE_SUSPEND, PROLOG_MIGRATE_SUSPEND_FAILURE
|
||||||
{
|
{
|
||||||
dm->trigger(DMAction::SUSPEND_SUCCESS,vid);
|
dm->trigger_suspend_success(vid);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -686,15 +698,15 @@ void LifeCycleManager::prolog_success_action(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::prolog_failure_action(int vid)
|
void LifeCycleManager::trigger_prolog_failure(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
HostShareCapacity sr;
|
HostShareCapacity sr;
|
||||||
|
|
||||||
time_t t = time(0);
|
time_t t = time(0);
|
||||||
@ -789,7 +801,7 @@ void LifeCycleManager::prolog_failure_action(int vid)
|
|||||||
|
|
||||||
vmpool->update(vm);
|
vmpool->update(vm);
|
||||||
|
|
||||||
trigger(LCMAction::PROLOG_SUCCESS, vm->get_oid());
|
trigger_prolog_success(vm->get_oid());
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::PROLOG_RESUME_FAILURE:
|
case VirtualMachine::PROLOG_RESUME_FAILURE:
|
||||||
@ -803,26 +815,24 @@ void LifeCycleManager::prolog_failure_action(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::epilog_success_action(int vid)
|
void LifeCycleManager::trigger_epilog_success(int vid)
|
||||||
{
|
{
|
||||||
VirtualMachine * vm;
|
trigger([this, vid] {
|
||||||
|
|
||||||
HostShareCapacity sr;
|
HostShareCapacity sr;
|
||||||
|
|
||||||
time_t the_time = time(0);
|
time_t the_time = time(0);
|
||||||
unsigned int port;
|
unsigned int port;
|
||||||
|
|
||||||
VirtualMachine::LcmState state;
|
VirtualMachine::LcmState state;
|
||||||
DMAction::Actions action;
|
void (DispatchManager::*action)(int);
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
VirtualMachine * vm = vmpool->get(vid);
|
||||||
|
|
||||||
if ( vm == nullptr )
|
if ( vm == nullptr )
|
||||||
{
|
{
|
||||||
@ -849,19 +859,19 @@ void LifeCycleManager::epilog_success_action(int vid)
|
|||||||
|
|
||||||
if ( state == VirtualMachine::EPILOG_STOP )
|
if ( state == VirtualMachine::EPILOG_STOP )
|
||||||
{
|
{
|
||||||
action = DMAction::STOP_SUCCESS;
|
action = &DispatchManager::trigger_stop_success;
|
||||||
}
|
}
|
||||||
else if ( state == VirtualMachine::EPILOG_UNDEPLOY )
|
else if ( state == VirtualMachine::EPILOG_UNDEPLOY )
|
||||||
{
|
{
|
||||||
action = DMAction::UNDEPLOY_SUCCESS;
|
action = &DispatchManager::trigger_undeploy_success;
|
||||||
}
|
}
|
||||||
else if ( state == VirtualMachine::EPILOG )
|
else if ( state == VirtualMachine::EPILOG )
|
||||||
{
|
{
|
||||||
action = DMAction::DONE;
|
action = &DispatchManager::trigger_done;
|
||||||
}
|
}
|
||||||
else if ( state == VirtualMachine::CLEANUP_RESUBMIT )
|
else if ( state == VirtualMachine::CLEANUP_RESUBMIT )
|
||||||
{
|
{
|
||||||
dm->trigger(DMAction::RESUBMIT, vid);
|
dm->trigger_resubmit(vid);
|
||||||
|
|
||||||
vmpool->update(vm);
|
vmpool->update(vm);
|
||||||
|
|
||||||
@ -901,18 +911,18 @@ void LifeCycleManager::epilog_success_action(int vid)
|
|||||||
|
|
||||||
//----------------------------------------------------
|
//----------------------------------------------------
|
||||||
|
|
||||||
dm->trigger(action,vid);
|
(dm->*action)(vid);
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::cleanup_callback_action(int vid)
|
void LifeCycleManager::trigger_cleanup_callback(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
VirtualMachine::LcmState state;
|
VirtualMachine::LcmState state;
|
||||||
@ -928,7 +938,7 @@ void LifeCycleManager::cleanup_callback_action(int vid)
|
|||||||
|
|
||||||
if ( state == VirtualMachine::CLEANUP_RESUBMIT )
|
if ( state == VirtualMachine::CLEANUP_RESUBMIT )
|
||||||
{
|
{
|
||||||
dm->trigger(DMAction::RESUBMIT, vid);
|
dm->trigger_resubmit(vid);
|
||||||
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -937,15 +947,15 @@ void LifeCycleManager::cleanup_callback_action(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::epilog_failure_action(int vid)
|
void LifeCycleManager::trigger_epilog_failure(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
VirtualMachine::LcmState state;
|
VirtualMachine::LcmState state;
|
||||||
@ -961,7 +971,7 @@ void LifeCycleManager::epilog_failure_action(int vid)
|
|||||||
|
|
||||||
if ( state == VirtualMachine::CLEANUP_RESUBMIT )
|
if ( state == VirtualMachine::CLEANUP_RESUBMIT )
|
||||||
{
|
{
|
||||||
dm->trigger(DMAction::RESUBMIT, vid);
|
dm->trigger_resubmit(vid);
|
||||||
}
|
}
|
||||||
else if ( state == VirtualMachine::EPILOG )
|
else if ( state == VirtualMachine::EPILOG )
|
||||||
{
|
{
|
||||||
@ -987,15 +997,15 @@ void LifeCycleManager::epilog_failure_action(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::monitor_suspend_action(int vid)
|
void LifeCycleManager::trigger_monitor_suspend(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
vm = vmpool->get(vid);
|
||||||
@ -1031,7 +1041,7 @@ void LifeCycleManager::monitor_suspend_action(int vid)
|
|||||||
|
|
||||||
//----------------------------------------------------
|
//----------------------------------------------------
|
||||||
|
|
||||||
dm->trigger(DMAction::SUSPEND_SUCCESS,vid);
|
dm->trigger_suspend_success(vid);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -1039,13 +1049,15 @@ void LifeCycleManager::monitor_suspend_action(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::monitor_done_action(int vid)
|
void LifeCycleManager::trigger_monitor_done(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
vm = vmpool->get(vid);
|
||||||
@ -1072,13 +1084,15 @@ void LifeCycleManager::monitor_done_action(int vid)
|
|||||||
// Just ignore the callback if VM is not in RUNNING.
|
// Just ignore the callback if VM is not in RUNNING.
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::monitor_poweroff_action(int vid)
|
void LifeCycleManager::trigger_monitor_poweroff(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
vm = vmpool->get(vid);
|
||||||
@ -1114,7 +1128,7 @@ void LifeCycleManager::monitor_poweroff_action(int vid)
|
|||||||
|
|
||||||
//----------------------------------------------------
|
//----------------------------------------------------
|
||||||
|
|
||||||
dm->trigger(DMAction::POWEROFF_SUCCESS,vid);
|
dm->trigger_poweroff_success(vid);
|
||||||
|
|
||||||
}
|
}
|
||||||
else if ( vm->get_lcm_state() == VirtualMachine::SHUTDOWN ||
|
else if ( vm->get_lcm_state() == VirtualMachine::SHUTDOWN ||
|
||||||
@ -1123,17 +1137,19 @@ void LifeCycleManager::monitor_poweroff_action(int vid)
|
|||||||
{
|
{
|
||||||
vm->log("LCM", Log::INFO, "VM reported SHUTDOWN by the drivers");
|
vm->log("LCM", Log::INFO, "VM reported SHUTDOWN by the drivers");
|
||||||
|
|
||||||
trigger(LCMAction::SHUTDOWN_SUCCESS, vid);
|
trigger_shutdown_success(vid);
|
||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::monitor_poweron_action(int vid)
|
void LifeCycleManager::trigger_monitor_poweron(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
vm = vmpool->get(vid);
|
||||||
@ -1191,7 +1207,7 @@ void LifeCycleManager::monitor_poweron_action(int vid)
|
|||||||
case VirtualMachine::BOOT_FAILURE:
|
case VirtualMachine::BOOT_FAILURE:
|
||||||
vm->log("LCM", Log::INFO, "VM reported RUNNING by the drivers");
|
vm->log("LCM", Log::INFO, "VM reported RUNNING by the drivers");
|
||||||
|
|
||||||
trigger(LCMAction::DEPLOY_SUCCESS, vid);
|
trigger_deploy_success(vid);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@ -1200,13 +1216,15 @@ void LifeCycleManager::monitor_poweron_action(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::attach_success_action(int vid)
|
void LifeCycleManager::trigger_attach_success(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
vm = vmpool->get(vid);
|
||||||
@ -1233,7 +1251,7 @@ void LifeCycleManager::attach_success_action(int vid)
|
|||||||
vmpool->update(vm);
|
vmpool->update(vm);
|
||||||
vmpool->update_search(vm);
|
vmpool->update_search(vm);
|
||||||
|
|
||||||
dm->trigger(DMAction::POWEROFF_SUCCESS,vid);
|
dm->trigger_poweroff_success(vid);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -1241,13 +1259,15 @@ void LifeCycleManager::attach_success_action(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::attach_failure_action(int vid)
|
void LifeCycleManager::trigger_attach_failure(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
vm = vmpool->get(vid);
|
||||||
@ -1279,7 +1299,7 @@ void LifeCycleManager::attach_failure_action(int vid)
|
|||||||
{
|
{
|
||||||
vm->log("LCM", Log::INFO, "VM Disk attach failure.");
|
vm->log("LCM", Log::INFO, "VM Disk attach failure.");
|
||||||
|
|
||||||
dm->trigger(DMAction::POWEROFF_SUCCESS,vid);
|
dm->trigger_poweroff_success(vid);
|
||||||
}
|
}
|
||||||
|
|
||||||
vmpool->update(vm);
|
vmpool->update(vm);
|
||||||
@ -1291,13 +1311,15 @@ void LifeCycleManager::attach_failure_action(int vid)
|
|||||||
vm->log("LCM",Log::ERROR,"attach_failure_action, VM in a wrong state");
|
vm->log("LCM",Log::ERROR,"attach_failure_action, VM in a wrong state");
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
}
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::detach_success_action(int vid)
|
void LifeCycleManager::trigger_detach_success(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
vm = vmpool->get(vid);
|
||||||
@ -1329,7 +1351,7 @@ void LifeCycleManager::detach_success_action(int vid)
|
|||||||
{
|
{
|
||||||
vm->log("LCM", Log::INFO, "VM Disk successfully detached.");
|
vm->log("LCM", Log::INFO, "VM Disk successfully detached.");
|
||||||
|
|
||||||
dm->trigger(DMAction::POWEROFF_SUCCESS,vid);
|
dm->trigger_poweroff_success(vid);
|
||||||
}
|
}
|
||||||
|
|
||||||
vmpool->update(vm);
|
vmpool->update(vm);
|
||||||
@ -1342,13 +1364,15 @@ void LifeCycleManager::detach_success_action(int vid)
|
|||||||
vm->log("LCM",Log::ERROR,"detach_success_action, VM in a wrong state");
|
vm->log("LCM",Log::ERROR,"detach_success_action, VM in a wrong state");
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
}
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::detach_failure_action(int vid)
|
void LifeCycleManager::trigger_detach_failure(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
vm = vmpool->get(vid);
|
||||||
@ -1373,7 +1397,7 @@ void LifeCycleManager::detach_failure_action(int vid)
|
|||||||
vm->clear_attach_disk();
|
vm->clear_attach_disk();
|
||||||
vmpool->update(vm);
|
vmpool->update(vm);
|
||||||
|
|
||||||
dm->trigger(DMAction::POWEROFF_SUCCESS,vid);
|
dm->trigger_poweroff_success(vid);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -1381,13 +1405,15 @@ void LifeCycleManager::detach_failure_action(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::snapshot_create_success(int vid)
|
void LifeCycleManager::trigger_snapshot_create_success(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
vm = vmpool->get(vid);
|
||||||
@ -1411,13 +1437,15 @@ void LifeCycleManager::snapshot_create_success(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::snapshot_create_failure(int vid)
|
void LifeCycleManager::trigger_snapshot_create_failure(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
vm = vmpool->get(vid);
|
||||||
@ -1441,16 +1469,17 @@ void LifeCycleManager::snapshot_create_failure(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::snapshot_revert_success(int vid)
|
void LifeCycleManager::trigger_snapshot_revert_success(int vid)
|
||||||
{
|
{
|
||||||
// TODO: snapshot list may be inconsistent with hypervisor info
|
// TODO: snapshot list may be inconsistent with hypervisor info
|
||||||
// after a revert operation
|
// after a revert operation
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
vm = vmpool->get(vid);
|
||||||
@ -1474,21 +1503,23 @@ void LifeCycleManager::snapshot_revert_success(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::snapshot_revert_failure(int vid)
|
void LifeCycleManager::trigger_snapshot_revert_failure(int vid)
|
||||||
{
|
{
|
||||||
snapshot_revert_success(vid);
|
trigger_snapshot_revert_success(vid);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::snapshot_delete_success(int vid)
|
void LifeCycleManager::trigger_snapshot_delete_success(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
vm = vmpool->get(vid);
|
||||||
@ -1512,13 +1543,15 @@ void LifeCycleManager::snapshot_delete_success(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::snapshot_delete_failure(int vid)
|
void LifeCycleManager::trigger_snapshot_delete_failure(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
vm = vmpool->get(vid);
|
||||||
@ -1542,13 +1575,15 @@ void LifeCycleManager::snapshot_delete_failure(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::attach_nic_success_action(int vid)
|
void LifeCycleManager::trigger_attach_nic_success(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
vm = vmpool->get(vid);
|
||||||
@ -1574,7 +1609,7 @@ void LifeCycleManager::attach_nic_success_action(int vid)
|
|||||||
vmpool->update(vm);
|
vmpool->update(vm);
|
||||||
vmpool->update_search(vm);
|
vmpool->update_search(vm);
|
||||||
|
|
||||||
dm->trigger(DMAction::POWEROFF_SUCCESS,vid);
|
dm->trigger_poweroff_success(vid);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -1582,13 +1617,15 @@ void LifeCycleManager::attach_nic_success_action(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::attach_nic_failure_action(int vid)
|
void LifeCycleManager::trigger_attach_nic_failure(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
vm = vmpool->get(vid);
|
||||||
@ -1623,20 +1660,22 @@ void LifeCycleManager::attach_nic_failure_action(int vid)
|
|||||||
|
|
||||||
vmpool->delete_attach_nic(vid);
|
vmpool->delete_attach_nic(vid);
|
||||||
|
|
||||||
dm->trigger(DMAction::POWEROFF_SUCCESS, vid);
|
dm->trigger_poweroff_success(vid);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
vm->log("LCM",Log::ERROR,"attach_nic_failure_action, VM in a wrong state");
|
vm->log("LCM",Log::ERROR,"attach_nic_failure_action, VM in a wrong state");
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
}
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::detach_nic_success_action(int vid)
|
void LifeCycleManager::trigger_detach_nic_success(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
vm = vmpool->get(vid);
|
||||||
@ -1680,7 +1719,7 @@ void LifeCycleManager::detach_nic_success_action(int vid)
|
|||||||
vmpool->update(vm);
|
vmpool->update(vm);
|
||||||
vmpool->update_search(vm);
|
vmpool->update_search(vm);
|
||||||
|
|
||||||
dm->trigger(DMAction::POWEROFF_SUCCESS, vid);
|
dm->trigger_poweroff_success(vid);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -1688,13 +1727,15 @@ void LifeCycleManager::detach_nic_success_action(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::detach_nic_failure_action(int vid)
|
void LifeCycleManager::trigger_detach_nic_failure(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
|
|
||||||
vm = vmpool->get(vid);
|
vm = vmpool->get(vid);
|
||||||
@ -1718,7 +1759,7 @@ void LifeCycleManager::detach_nic_failure_action(int vid)
|
|||||||
|
|
||||||
vmpool->update(vm);
|
vmpool->update(vm);
|
||||||
|
|
||||||
dm->trigger(DMAction::POWEROFF_SUCCESS, vid);
|
dm->trigger_poweroff_success(vid);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -1726,13 +1767,15 @@ void LifeCycleManager::detach_nic_failure_action(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::saveas_success_action(int vid)
|
void LifeCycleManager::trigger_saveas_success(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
int image_id;
|
int image_id;
|
||||||
int disk_id;
|
int disk_id;
|
||||||
string tm_mad;
|
string tm_mad;
|
||||||
@ -1783,13 +1826,15 @@ void LifeCycleManager::saveas_success_action(int vid)
|
|||||||
ipool->update(image);
|
ipool->update(image);
|
||||||
|
|
||||||
image->unlock();
|
image->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::saveas_failure_action(int vid)
|
void LifeCycleManager::trigger_saveas_failure(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
int image_id;
|
int image_id;
|
||||||
int disk_id;
|
int disk_id;
|
||||||
string tm_mad;
|
string tm_mad;
|
||||||
@ -1840,13 +1885,15 @@ void LifeCycleManager::saveas_failure_action(int vid)
|
|||||||
ipool->update(image);
|
ipool->update(image);
|
||||||
|
|
||||||
image->unlock();
|
image->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::disk_snapshot_success(int vid)
|
void LifeCycleManager::trigger_disk_snapshot_success(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
string tm_mad;
|
string tm_mad;
|
||||||
int disk_id, ds_id, snap_id;
|
int disk_id, ds_id, snap_id;
|
||||||
int img_id = -1;
|
int img_id = -1;
|
||||||
@ -1975,27 +2022,27 @@ void LifeCycleManager::disk_snapshot_success(int vid)
|
|||||||
case VirtualMachine::DISK_SNAPSHOT_POWEROFF:
|
case VirtualMachine::DISK_SNAPSHOT_POWEROFF:
|
||||||
case VirtualMachine::DISK_SNAPSHOT_REVERT_POWEROFF:
|
case VirtualMachine::DISK_SNAPSHOT_REVERT_POWEROFF:
|
||||||
case VirtualMachine::DISK_SNAPSHOT_DELETE_POWEROFF:
|
case VirtualMachine::DISK_SNAPSHOT_DELETE_POWEROFF:
|
||||||
dm->trigger(DMAction::POWEROFF_SUCCESS, vid);
|
dm->trigger_poweroff_success(vid);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::DISK_SNAPSHOT_SUSPENDED:
|
case VirtualMachine::DISK_SNAPSHOT_SUSPENDED:
|
||||||
case VirtualMachine::DISK_SNAPSHOT_REVERT_SUSPENDED:
|
case VirtualMachine::DISK_SNAPSHOT_REVERT_SUSPENDED:
|
||||||
case VirtualMachine::DISK_SNAPSHOT_DELETE_SUSPENDED:
|
case VirtualMachine::DISK_SNAPSHOT_DELETE_SUSPENDED:
|
||||||
dm->trigger(DMAction::SUSPEND_SUCCESS, vid);
|
dm->trigger_suspend_success(vid);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
});
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::disk_snapshot_failure(int vid)
|
void LifeCycleManager::trigger_disk_snapshot_failure(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
string tm_mad;
|
string tm_mad;
|
||||||
int disk_id, ds_id, snap_id;
|
int disk_id, ds_id, snap_id;
|
||||||
int img_id = -1;
|
int img_id = -1;
|
||||||
@ -2119,26 +2166,27 @@ void LifeCycleManager::disk_snapshot_failure(int vid)
|
|||||||
case VirtualMachine::DISK_SNAPSHOT_POWEROFF:
|
case VirtualMachine::DISK_SNAPSHOT_POWEROFF:
|
||||||
case VirtualMachine::DISK_SNAPSHOT_REVERT_POWEROFF:
|
case VirtualMachine::DISK_SNAPSHOT_REVERT_POWEROFF:
|
||||||
case VirtualMachine::DISK_SNAPSHOT_DELETE_POWEROFF:
|
case VirtualMachine::DISK_SNAPSHOT_DELETE_POWEROFF:
|
||||||
dm->trigger(DMAction::POWEROFF_SUCCESS, vid);
|
dm->trigger_poweroff_success(vid);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::DISK_SNAPSHOT_SUSPENDED:
|
case VirtualMachine::DISK_SNAPSHOT_SUSPENDED:
|
||||||
case VirtualMachine::DISK_SNAPSHOT_REVERT_SUSPENDED:
|
case VirtualMachine::DISK_SNAPSHOT_REVERT_SUSPENDED:
|
||||||
case VirtualMachine::DISK_SNAPSHOT_DELETE_SUSPENDED:
|
case VirtualMachine::DISK_SNAPSHOT_DELETE_SUSPENDED:
|
||||||
dm->trigger(DMAction::SUSPEND_SUCCESS, vid);
|
dm->trigger_suspend_success(vid);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
return;
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::disk_lock_success(int vid)
|
void LifeCycleManager::trigger_disk_lock_success(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm = vmpool->get_ro(vid);
|
VirtualMachine * vm = vmpool->get_ro(vid);
|
||||||
Image * image;
|
Image * image;
|
||||||
|
|
||||||
@ -2237,21 +2285,23 @@ void LifeCycleManager::disk_lock_success(int vid)
|
|||||||
vmpool->update(vm);
|
vmpool->update(vm);
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::disk_lock_failure(int vid)
|
void LifeCycleManager::trigger_disk_lock_failure(int vid)
|
||||||
{
|
{
|
||||||
disk_lock_success(vid);
|
trigger_disk_lock_success(vid);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::disk_resize_success(int vid)
|
void LifeCycleManager::trigger_disk_resize_success(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
int img_id = -1;
|
int img_id = -1;
|
||||||
long long size;
|
long long size;
|
||||||
|
|
||||||
@ -2310,25 +2360,25 @@ void LifeCycleManager::disk_resize_success(int vid)
|
|||||||
switch (state)
|
switch (state)
|
||||||
{
|
{
|
||||||
case VirtualMachine::DISK_RESIZE_POWEROFF:
|
case VirtualMachine::DISK_RESIZE_POWEROFF:
|
||||||
dm->trigger(DMAction::POWEROFF_SUCCESS, vid);
|
dm->trigger_poweroff_success(vid);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::DISK_RESIZE_UNDEPLOYED:
|
case VirtualMachine::DISK_RESIZE_UNDEPLOYED:
|
||||||
dm->trigger(DMAction::UNDEPLOY_SUCCESS, vid);
|
dm->trigger_undeploy_success(vid);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
});
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::disk_resize_failure(int vid)
|
void LifeCycleManager::trigger_disk_resize_failure(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
Template ds_deltas;
|
Template ds_deltas;
|
||||||
Template vm_deltas;
|
Template vm_deltas;
|
||||||
|
|
||||||
@ -2414,25 +2464,25 @@ void LifeCycleManager::disk_resize_failure(int vid)
|
|||||||
switch (state)
|
switch (state)
|
||||||
{
|
{
|
||||||
case VirtualMachine::DISK_RESIZE_POWEROFF:
|
case VirtualMachine::DISK_RESIZE_POWEROFF:
|
||||||
dm->trigger(DMAction::POWEROFF_SUCCESS, vid);
|
dm->trigger_poweroff_success(vid);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::DISK_RESIZE_UNDEPLOYED:
|
case VirtualMachine::DISK_RESIZE_UNDEPLOYED:
|
||||||
dm->trigger(DMAction::UNDEPLOY_SUCCESS, vid);
|
dm->trigger_undeploy_success(vid);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
});
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::update_conf_success(int vid)
|
void LifeCycleManager::trigger_update_conf_success(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm = vmpool->get(vid);
|
VirtualMachine * vm = vmpool->get(vid);
|
||||||
|
|
||||||
if ( vm == nullptr )
|
if ( vm == nullptr )
|
||||||
@ -2452,13 +2502,15 @@ void LifeCycleManager::update_conf_success(int vid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void LifeCycleManager::update_conf_failure(int vid)
|
void LifeCycleManager::trigger_update_conf_failure(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm = vmpool->get(vid);
|
VirtualMachine * vm = vmpool->get(vid);
|
||||||
|
|
||||||
if ( vm == nullptr )
|
if ( vm == nullptr )
|
||||||
@ -2479,6 +2531,7 @@ void LifeCycleManager::update_conf_failure(int vid)
|
|||||||
vm->log("LCM",Log::ERROR,"update_conf_failure, VM in a wrong state");
|
vm->log("LCM",Log::ERROR,"update_conf_failure, VM in a wrong state");
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
}
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
@ -28,37 +28,14 @@ const char * MarketPlaceManager::market_driver_name = "market_exe";
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
extern "C" void * marketplace_action_loop(void *arg)
|
|
||||||
{
|
|
||||||
MarketPlaceManager * mpm;
|
|
||||||
|
|
||||||
if ( arg == nullptr )
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
NebulaLog::log("MKP", Log::INFO, "Marketplace Manager started.");
|
|
||||||
|
|
||||||
mpm = static_cast<MarketPlaceManager *>(arg);
|
|
||||||
|
|
||||||
mpm->am.loop(mpm->timer_period);
|
|
||||||
|
|
||||||
NebulaLog::log("MKP", Log::INFO, "Marketplace Manager stopped.");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
MarketPlaceManager::MarketPlaceManager(
|
MarketPlaceManager::MarketPlaceManager(
|
||||||
time_t _timer_period,
|
time_t _timer_period,
|
||||||
time_t _monitor_period,
|
time_t _monitor_period,
|
||||||
const string& _mad_location):
|
const string& _mad_location)
|
||||||
DriverManager(_mad_location),
|
: DriverManager(_mad_location)
|
||||||
timer_period(_timer_period),
|
, timer_thread(_timer_period, [this](){timer_action();})
|
||||||
monitor_period(_monitor_period),
|
, timer_period(_timer_period)
|
||||||
imagem(0),
|
, monitor_period(_monitor_period)
|
||||||
raftm(0)
|
|
||||||
{
|
{
|
||||||
Nebula& nd = Nebula::instance();
|
Nebula& nd = Nebula::instance();
|
||||||
|
|
||||||
@ -66,9 +43,7 @@ MarketPlaceManager::MarketPlaceManager(
|
|||||||
apppool = nd.get_apppool();
|
apppool = nd.get_apppool();
|
||||||
dspool = nd.get_dspool();
|
dspool = nd.get_dspool();
|
||||||
ipool = nd.get_ipool();
|
ipool = nd.get_ipool();
|
||||||
|
}
|
||||||
am.addListener(this);
|
|
||||||
};
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
@ -122,11 +97,10 @@ void MarketPlaceManager::init_managers()
|
|||||||
|
|
||||||
int MarketPlaceManager::start()
|
int MarketPlaceManager::start()
|
||||||
{
|
{
|
||||||
int rc;
|
|
||||||
pthread_attr_t pattr;
|
|
||||||
|
|
||||||
using namespace std::placeholders; // for _1
|
using namespace std::placeholders; // for _1
|
||||||
|
|
||||||
|
NebulaLog::log("MKP",Log::INFO,"Starting Marketplace Manager...");
|
||||||
|
|
||||||
register_action(MarketPlaceManagerMessages::UNDEFINED,
|
register_action(MarketPlaceManagerMessages::UNDEFINED,
|
||||||
&MarketPlaceManager::_undefined);
|
&MarketPlaceManager::_undefined);
|
||||||
|
|
||||||
@ -143,21 +117,14 @@ int MarketPlaceManager::start()
|
|||||||
&MarketPlaceManager::_log);
|
&MarketPlaceManager::_log);
|
||||||
|
|
||||||
string error;
|
string error;
|
||||||
|
|
||||||
if ( DriverManager::start(error) != 0 )
|
if ( DriverManager::start(error) != 0 )
|
||||||
{
|
{
|
||||||
NebulaLog::error("MKP", error);
|
NebulaLog::error("MKP", error);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
NebulaLog::log("MKP",Log::INFO,"Starting Marketplace Manager...");
|
return 0;
|
||||||
|
|
||||||
pthread_attr_init(&pattr);
|
|
||||||
pthread_attr_setdetachstate(&pattr, PTHREAD_CREATE_JOINABLE);
|
|
||||||
|
|
||||||
rc = pthread_create(&marketm_thread, &pattr, marketplace_action_loop,
|
|
||||||
(void *) this);
|
|
||||||
|
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
@ -185,7 +152,7 @@ string MarketPlaceManager::format_message(
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void MarketPlaceManager::timer_action(const ActionRequest& ar)
|
void MarketPlaceManager::timer_action()
|
||||||
{
|
{
|
||||||
static int mark = 0;
|
static int mark = 0;
|
||||||
static int tics = monitor_period - 5; //first monitor in 5 secs
|
static int tics = monitor_period - 5; //first monitor in 5 secs
|
||||||
|
@ -845,16 +845,6 @@ void Nebula::start(bool bootstrap_only)
|
|||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!cache)
|
|
||||||
{
|
|
||||||
rc = raftm->start();
|
|
||||||
|
|
||||||
if ( rc != 0 )
|
|
||||||
{
|
|
||||||
throw runtime_error("Could not start the Raft Consensus Manager");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---- FedReplica Manager ----
|
// ---- FedReplica Manager ----
|
||||||
if (!cache)
|
if (!cache)
|
||||||
{
|
{
|
||||||
@ -867,19 +857,12 @@ void Nebula::start(bool bootstrap_only)
|
|||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = frm->start();
|
|
||||||
|
|
||||||
if ( is_federation_master() && solo )
|
if ( is_federation_master() && solo )
|
||||||
{
|
{
|
||||||
// Replica threads are started on master in solo mode.
|
// Replica threads are started on master in solo mode.
|
||||||
// HA start/stop the replica threads on leader/follower states
|
// HA start/stop the replica threads on leader/follower states
|
||||||
frm->start_replica_threads();
|
frm->start_replica_threads();
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( rc != 0 )
|
|
||||||
{
|
|
||||||
throw runtime_error("Could not start the Federation Replica Manager");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---- Virtual Machine Manager ----
|
// ---- Virtual Machine Manager ----
|
||||||
@ -893,7 +876,6 @@ void Nebula::start(bool bootstrap_only)
|
|||||||
nebula_configuration->get("VM_MAD", vmm_mads);
|
nebula_configuration->get("VM_MAD", vmm_mads);
|
||||||
|
|
||||||
vmm = new VirtualMachineManager(
|
vmm = new VirtualMachineManager(
|
||||||
timer_period,
|
|
||||||
vm_limit,
|
vm_limit,
|
||||||
mad_location);
|
mad_location);
|
||||||
|
|
||||||
@ -1202,20 +1184,15 @@ void Nebula::start(bool bootstrap_only)
|
|||||||
marketm->finalize();
|
marketm->finalize();
|
||||||
|
|
||||||
ipamm->finalize();
|
ipamm->finalize();
|
||||||
frm->finalize();
|
|
||||||
|
|
||||||
//sleep to wait drivers???
|
//sleep to wait drivers???
|
||||||
pthread_join(vmm->get_thread_id(),0);
|
vmm->join_thread();
|
||||||
pthread_join(lcm->get_thread_id(),0);
|
lcm->join_thread();
|
||||||
pthread_join(tm->get_thread_id(),0);
|
tm->join_thread();
|
||||||
pthread_join(dm->get_thread_id(),0);
|
dm->join_thread();
|
||||||
|
|
||||||
im->join_thread();
|
hm->join_thread();
|
||||||
pthread_join(hm->get_thread_id(),0);
|
ipamm->join_thread();
|
||||||
pthread_join(imagem->get_thread_id(),0);
|
|
||||||
pthread_join(marketm->get_thread_id(),0);
|
|
||||||
pthread_join(ipamm->get_thread_id(),0);
|
|
||||||
pthread_join(frm->get_thread_id(),0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
raftm->finalize();
|
raftm->finalize();
|
||||||
@ -1224,13 +1201,11 @@ void Nebula::start(bool bootstrap_only)
|
|||||||
rm->finalize();
|
rm->finalize();
|
||||||
authm->finalize();
|
authm->finalize();
|
||||||
|
|
||||||
pthread_join(rm->get_thread_id(),0);
|
authm->join_thread();
|
||||||
pthread_join(authm->get_thread_id(),0);
|
|
||||||
pthread_join(raftm->get_thread_id(),0);
|
|
||||||
|
|
||||||
if (is_federation_slave())
|
if (is_federation_slave())
|
||||||
{
|
{
|
||||||
pthread_join(aclm->get_thread_id(),0);
|
aclm->join_thread();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include "PoolObjectSQL.h"
|
#include "PoolObjectSQL.h"
|
||||||
#include "PoolObjectAuth.h"
|
#include "PoolObjectAuth.h"
|
||||||
#include "NebulaUtil.h"
|
#include "NebulaUtil.h"
|
||||||
|
#include "SSLUtil.h"
|
||||||
#include "Nebula.h"
|
#include "Nebula.h"
|
||||||
#include "Clusterable.h"
|
#include "Clusterable.h"
|
||||||
#include "ClusterableSingle.h"
|
#include "ClusterableSingle.h"
|
||||||
|
@ -33,10 +33,7 @@ const time_t FedReplicaManager::xmlrpc_timeout_ms = 10000;
|
|||||||
|
|
||||||
FedReplicaManager::FedReplicaManager(LogDB * d): ReplicaManager(), logdb(d)
|
FedReplicaManager::FedReplicaManager(LogDB * d): ReplicaManager(), logdb(d)
|
||||||
{
|
{
|
||||||
pthread_mutex_init(&mutex, 0);
|
}
|
||||||
|
|
||||||
am.addListener(this);
|
|
||||||
};
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
@ -57,7 +54,7 @@ FedReplicaManager::~FedReplicaManager()
|
|||||||
{
|
{
|
||||||
stop_replica_threads();
|
stop_replica_threads();
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
@ -65,83 +62,28 @@ FedReplicaManager::~FedReplicaManager()
|
|||||||
uint64_t FedReplicaManager::apply_log_record(uint64_t index, uint64_t prev,
|
uint64_t FedReplicaManager::apply_log_record(uint64_t index, uint64_t prev,
|
||||||
const std::string& sql)
|
const std::string& sql)
|
||||||
{
|
{
|
||||||
uint64_t rc;
|
lock_guard<mutex> ul(fed_mutex);
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
|
||||||
|
|
||||||
uint64_t last_index = logdb->last_federated();
|
uint64_t last_index = logdb->last_federated();
|
||||||
|
|
||||||
if ( prev != last_index )
|
if ( prev != last_index )
|
||||||
{
|
{
|
||||||
rc = last_index;
|
return last_index;
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::ostringstream oss(sql);
|
std::ostringstream oss(sql);
|
||||||
|
|
||||||
if ( logdb->exec_federated_wr(oss, index) != 0 )
|
if ( logdb->exec_federated_wr(oss, index) != 0 )
|
||||||
{
|
{
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
return UINT64_MAX;
|
return UINT64_MAX;
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
extern "C" void * frm_loop(void *arg)
|
|
||||||
{
|
|
||||||
FedReplicaManager * fedrm;
|
|
||||||
|
|
||||||
if ( arg == 0 )
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
fedrm = static_cast<FedReplicaManager *>(arg);
|
|
||||||
|
|
||||||
NebulaLog::log("FRM",Log::INFO,"Federation Replica Manger started.");
|
|
||||||
|
|
||||||
fedrm->am.loop();
|
|
||||||
|
|
||||||
NebulaLog::log("FRM",Log::INFO,"Federation Replica Manger stopped.");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
int FedReplicaManager::start()
|
|
||||||
{
|
|
||||||
int rc;
|
|
||||||
pthread_attr_t pattr;
|
|
||||||
|
|
||||||
pthread_attr_init (&pattr);
|
|
||||||
pthread_attr_setdetachstate (&pattr, PTHREAD_CREATE_JOINABLE);
|
|
||||||
|
|
||||||
NebulaLog::log("FRM",Log::INFO,"Starting Federation Replica Manager...");
|
|
||||||
|
|
||||||
rc = pthread_create(&frm_thread, &pattr, frm_loop,(void *) this);
|
|
||||||
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
void FedReplicaManager::finalize_action(const ActionRequest& ar)
|
|
||||||
{
|
|
||||||
NebulaLog::log("FRM", Log::INFO, "Federation Replica Manager...");
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
void FedReplicaManager::update_zones(std::vector<int>& zone_ids)
|
void FedReplicaManager::update_zones(std::vector<int>& zone_ids)
|
||||||
{
|
{
|
||||||
Nebula& nd = Nebula::instance();
|
Nebula& nd = Nebula::instance();
|
||||||
@ -156,7 +98,7 @@ void FedReplicaManager::update_zones(std::vector<int>& zone_ids)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
lock_guard<mutex> ul(fed_mutex);
|
||||||
|
|
||||||
uint64_t last_index = logdb->last_federated();
|
uint64_t last_index = logdb->last_federated();
|
||||||
|
|
||||||
@ -192,8 +134,6 @@ void FedReplicaManager::update_zones(std::vector<int>& zone_ids)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
@ -219,7 +159,7 @@ void FedReplicaManager::add_zone(int zone_id)
|
|||||||
|
|
||||||
zone->unlock();
|
zone->unlock();
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
lock_guard<mutex> ul(fed_mutex);
|
||||||
|
|
||||||
int last_index = logdb->last_federated();
|
int last_index = logdb->last_federated();
|
||||||
|
|
||||||
@ -232,8 +172,6 @@ void FedReplicaManager::add_zone(int zone_id)
|
|||||||
NebulaLog::log("FRM", Log::INFO, oss);
|
NebulaLog::log("FRM", Log::INFO, oss);
|
||||||
|
|
||||||
add_replica_thread(zone_id);
|
add_replica_thread(zone_id);
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
@ -244,7 +182,7 @@ void FedReplicaManager::delete_zone(int zone_id)
|
|||||||
|
|
||||||
std::map<int, ZoneServers *>::iterator it;
|
std::map<int, ZoneServers *>::iterator it;
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
lock_guard<mutex> ul(fed_mutex);
|
||||||
|
|
||||||
it = zones.find(zone_id);
|
it = zones.find(zone_id);
|
||||||
|
|
||||||
@ -262,8 +200,6 @@ void FedReplicaManager::delete_zone(int zone_id)
|
|||||||
NebulaLog::log("FRM", Log::INFO, oss);
|
NebulaLog::log("FRM", Log::INFO, oss);
|
||||||
|
|
||||||
delete_replica_thread(zone_id);
|
delete_replica_thread(zone_id);
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
@ -280,13 +216,12 @@ ReplicaThread * FedReplicaManager::thread_factory(int zone_id)
|
|||||||
int FedReplicaManager::get_next_record(int zone_id, std::string& zedp,
|
int FedReplicaManager::get_next_record(int zone_id, std::string& zedp,
|
||||||
LogDBRecord& lr, std::string& error)
|
LogDBRecord& lr, std::string& error)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&mutex);
|
lock_guard<mutex> ul(fed_mutex);
|
||||||
|
|
||||||
std::map<int, ZoneServers *>::iterator it = zones.find(zone_id);
|
std::map<int, ZoneServers *>::iterator it = zones.find(zone_id);
|
||||||
|
|
||||||
if ( it == zones.end() )
|
if ( it == zones.end() )
|
||||||
{
|
{
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -308,7 +243,6 @@ int FedReplicaManager::get_next_record(int zone_id, std::string& zedp,
|
|||||||
|
|
||||||
if ( zs->next == UINT64_MAX ) //no new records
|
if ( zs->next == UINT64_MAX ) //no new records
|
||||||
{
|
{
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
return -2;
|
return -2;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -322,8 +256,6 @@ int FedReplicaManager::get_next_record(int zone_id, std::string& zedp,
|
|||||||
|
|
||||||
error = oss.str();
|
error = oss.str();
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -339,8 +271,6 @@ int FedReplicaManager::get_next_record(int zone_id, std::string& zedp,
|
|||||||
error = oss.str();
|
error = oss.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -349,13 +279,12 @@ int FedReplicaManager::get_next_record(int zone_id, std::string& zedp,
|
|||||||
|
|
||||||
void FedReplicaManager::replicate_success(int zone_id)
|
void FedReplicaManager::replicate_success(int zone_id)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&mutex);
|
lock_guard<mutex> ul(fed_mutex);
|
||||||
|
|
||||||
std::map<int, ZoneServers *>::iterator it = zones.find(zone_id);
|
std::map<int, ZoneServers *>::iterator it = zones.find(zone_id);
|
||||||
|
|
||||||
if ( it == zones.end() )
|
if ( it == zones.end() )
|
||||||
{
|
{
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -369,15 +298,13 @@ void FedReplicaManager::replicate_success(int zone_id)
|
|||||||
{
|
{
|
||||||
ReplicaManager::replicate(zone_id);
|
ReplicaManager::replicate(zone_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void FedReplicaManager::replicate_failure(int zone_id, uint64_t last_zone)
|
void FedReplicaManager::replicate_failure(int zone_id, uint64_t last_zone)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&mutex);
|
lock_guard<mutex> ul(fed_mutex);
|
||||||
|
|
||||||
std::map<int, ZoneServers *>::iterator it = zones.find(zone_id);
|
std::map<int, ZoneServers *>::iterator it = zones.find(zone_id);
|
||||||
|
|
||||||
@ -397,8 +324,6 @@ void FedReplicaManager::replicate_failure(int zone_id, uint64_t last_zone)
|
|||||||
ReplicaManager::replicate(zone_id);
|
ReplicaManager::replicate(zone_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -55,18 +55,21 @@ static unsigned int get_zone_servers(std::map<int, std::string>& _s);
|
|||||||
RaftManager::RaftManager(int id, const VectorAttribute * leader_hook_mad,
|
RaftManager::RaftManager(int id, const VectorAttribute * leader_hook_mad,
|
||||||
const VectorAttribute * follower_hook_mad, time_t log_purge,
|
const VectorAttribute * follower_hook_mad, time_t log_purge,
|
||||||
long long bcast, long long elect, time_t xmlrpc,
|
long long bcast, long long elect, time_t xmlrpc,
|
||||||
const string& remotes_location):server_id(id), term(0), num_servers(0),
|
const string& remotes_location)
|
||||||
reconciling(false), commit(0), leader_hook(0), follower_hook(0)
|
: server_id(id)
|
||||||
|
, term(0)
|
||||||
|
, num_servers(0)
|
||||||
|
, reconciling(false)
|
||||||
|
, timer_thread(timer_period_ms / 1000.0, [this](){timer_action();})
|
||||||
|
, commit(0)
|
||||||
|
, leader_hook(0)
|
||||||
|
, follower_hook(0)
|
||||||
{
|
{
|
||||||
Nebula& nd = Nebula::instance();
|
Nebula& nd = Nebula::instance();
|
||||||
LogDB * logdb = nd.get_logdb();
|
LogDB * logdb = nd.get_logdb();
|
||||||
|
|
||||||
std::string raft_xml, cmd, arg;
|
std::string raft_xml, cmd, arg;
|
||||||
|
|
||||||
pthread_mutex_init(&mutex, 0);
|
|
||||||
|
|
||||||
am.addListener(this);
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
// Initialize Raft variables:
|
// Initialize Raft variables:
|
||||||
// - state
|
// - state
|
||||||
@ -182,55 +185,13 @@ RaftManager::RaftManager(int id, const VectorAttribute * leader_hook_mad,
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
extern "C" void * raft_manager_loop(void *arg)
|
void RaftManager::finalize()
|
||||||
{
|
{
|
||||||
RaftManager * raftm;
|
timer_thread.stop();
|
||||||
struct timespec timeout;
|
|
||||||
|
|
||||||
if ( arg == 0 )
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
raftm = static_cast<RaftManager *>(arg);
|
|
||||||
|
|
||||||
timeout.tv_sec = 0;
|
|
||||||
timeout.tv_nsec = raftm->timer_period_ms * 1000000;
|
|
||||||
|
|
||||||
NebulaLog::log("RCM",Log::INFO,"Raft Consensus Manager started.");
|
|
||||||
|
|
||||||
raftm->am.loop(timeout);
|
|
||||||
|
|
||||||
NebulaLog::log("RCM",Log::INFO,"Raft Consensus Manager stopped.");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
int RaftManager::start()
|
|
||||||
{
|
|
||||||
int rc;
|
|
||||||
pthread_attr_t pattr;
|
|
||||||
|
|
||||||
pthread_attr_init (&pattr);
|
|
||||||
pthread_attr_setdetachstate (&pattr, PTHREAD_CREATE_JOINABLE);
|
|
||||||
|
|
||||||
NebulaLog::log("RCM",Log::INFO,"Starting Raft Consensus Manager...");
|
|
||||||
|
|
||||||
rc = pthread_create(&raft_thread, &pattr, raft_manager_loop,(void *) this);
|
|
||||||
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
void RaftManager::finalize_action(const ActionRequest& ar)
|
|
||||||
{
|
|
||||||
NebulaLog::log("RCM", Log::INFO, "Raft Consensus Manager...");
|
|
||||||
|
|
||||||
if (is_leader())
|
if (is_leader())
|
||||||
{
|
{
|
||||||
@ -258,7 +219,7 @@ int RaftManager::get_leader_endpoint(std::string& endpoint)
|
|||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
std::lock_guard<mutex> lock(raft_mutex);
|
||||||
|
|
||||||
if ( leader_id == -1 )
|
if ( leader_id == -1 )
|
||||||
{
|
{
|
||||||
@ -281,8 +242,6 @@ int RaftManager::get_leader_endpoint(std::string& endpoint)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -300,11 +259,10 @@ void RaftManager::add_server(int follower_id, const std::string& endpoint)
|
|||||||
|
|
||||||
logdb->get_last_record_index(log_index, log_term);
|
logdb->get_last_record_index(log_index, log_term);
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
std::lock_guard<mutex> lock(raft_mutex);
|
||||||
|
|
||||||
if ( state != LEADER )
|
if ( state != LEADER )
|
||||||
{
|
{
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -324,9 +282,7 @@ void RaftManager::add_server(int follower_id, const std::string& endpoint)
|
|||||||
replica_manager.add_replica_thread(follower_id);
|
replica_manager.add_replica_thread(follower_id);
|
||||||
|
|
||||||
heartbeat_manager.add_replica_thread(follower_id);
|
heartbeat_manager.add_replica_thread(follower_id);
|
||||||
|
}
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
};
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
@ -335,11 +291,10 @@ void RaftManager::delete_server(int follower_id)
|
|||||||
std::ostringstream oss;
|
std::ostringstream oss;
|
||||||
std::map<int, std::string> _servers;
|
std::map<int, std::string> _servers;
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
std::lock_guard<mutex> lock(raft_mutex);
|
||||||
|
|
||||||
if ( state != LEADER )
|
if ( state != LEADER )
|
||||||
{
|
{
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -358,9 +313,7 @@ void RaftManager::delete_server(int follower_id)
|
|||||||
replica_manager.delete_replica_thread(follower_id);
|
replica_manager.delete_replica_thread(follower_id);
|
||||||
|
|
||||||
heartbeat_manager.delete_replica_thread(follower_id);
|
heartbeat_manager.delete_replica_thread(follower_id);
|
||||||
|
}
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
};
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
@ -368,32 +321,6 @@ void RaftManager::delete_server(int follower_id)
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
extern "C" void * reconciling_thread(void *arg)
|
|
||||||
{
|
|
||||||
Nebula& nd = Nebula::instance();
|
|
||||||
|
|
||||||
LogDB * logdb = nd.get_logdb();
|
|
||||||
RaftManager * rm = nd.get_raftm();
|
|
||||||
|
|
||||||
uint64_t * index = static_cast<uint64_t *>(arg);
|
|
||||||
|
|
||||||
NebulaLog::log("RCM", Log::INFO, "Replicating log to followers");
|
|
||||||
|
|
||||||
logdb->replicate(*index);
|
|
||||||
|
|
||||||
NebulaLog::log("RCM", Log::INFO, "Leader log replicated");
|
|
||||||
|
|
||||||
pthread_mutex_lock(&(rm->mutex));
|
|
||||||
|
|
||||||
rm->reconciling = false;
|
|
||||||
|
|
||||||
pthread_mutex_unlock(&(rm->mutex));
|
|
||||||
|
|
||||||
free(index);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void RaftManager::leader()
|
void RaftManager::leader()
|
||||||
{
|
{
|
||||||
Nebula& nd = Nebula::instance();
|
Nebula& nd = Nebula::instance();
|
||||||
@ -414,11 +341,12 @@ void RaftManager::leader()
|
|||||||
|
|
||||||
logdb->setup_index(_applied, index);
|
logdb->setup_index(_applied, index);
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
{
|
||||||
|
std::lock_guard<mutex> lock(raft_mutex);
|
||||||
|
|
||||||
|
|
||||||
if ( state != CANDIDATE )
|
if ( state != CANDIDATE )
|
||||||
{
|
{
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -473,8 +401,7 @@ void RaftManager::leader()
|
|||||||
heartbeat_manager.replicate();
|
heartbeat_manager.replicate();
|
||||||
|
|
||||||
clock_gettime(CLOCK_REALTIME, &last_heartbeat);
|
clock_gettime(CLOCK_REALTIME, &last_heartbeat);
|
||||||
|
}
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
aclm->reload_rules();
|
aclm->reload_rules();
|
||||||
|
|
||||||
@ -488,19 +415,23 @@ void RaftManager::leader()
|
|||||||
|
|
||||||
if ( _applied < index )
|
if ( _applied < index )
|
||||||
{
|
{
|
||||||
pthread_attr_t pattr;
|
std::thread t([this, _next_index] {
|
||||||
pthread_t thid;
|
Nebula& nd = Nebula::instance();
|
||||||
|
|
||||||
pthread_attr_init (&pattr);
|
LogDB * logdb = nd.get_logdb();
|
||||||
pthread_attr_setdetachstate(&pattr, PTHREAD_CREATE_DETACHED);
|
|
||||||
|
|
||||||
uint64_t * _index = (uint64_t *) malloc(sizeof(uint64_t));
|
NebulaLog::log("RCM", Log::INFO, "Replicating log to followers");
|
||||||
|
|
||||||
*_index = _next_index;
|
logdb->replicate(_next_index);
|
||||||
|
|
||||||
pthread_create(&thid, &pattr, reconciling_thread, (void *) _index);
|
NebulaLog::log("RCM", Log::INFO, "Leader log replicated");
|
||||||
|
|
||||||
pthread_attr_destroy(&pattr);
|
std::lock_guard<mutex> lock(raft_mutex);
|
||||||
|
|
||||||
|
reconciling = false;
|
||||||
|
});
|
||||||
|
|
||||||
|
t.detach();
|
||||||
}
|
}
|
||||||
|
|
||||||
NebulaLog::log("RCM", Log::INFO, "oned is now the leader of the zone");
|
NebulaLog::log("RCM", Log::INFO, "oned is now the leader of the zone");
|
||||||
@ -522,7 +453,8 @@ void RaftManager::follower(unsigned int _term)
|
|||||||
|
|
||||||
logdb->setup_index(lapplied, lindex);
|
logdb->setup_index(lapplied, lindex);
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
{
|
||||||
|
std::lock_guard<mutex> lock(raft_mutex);
|
||||||
|
|
||||||
if ( state == LEADER && follower_hook != 0 )
|
if ( state == LEADER && follower_hook != 0 )
|
||||||
{
|
{
|
||||||
@ -557,8 +489,7 @@ void RaftManager::follower(unsigned int _term)
|
|||||||
match.clear();
|
match.clear();
|
||||||
|
|
||||||
requests.clear();
|
requests.clear();
|
||||||
|
}
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
if ( nd.is_federation_master() )
|
if ( nd.is_federation_master() )
|
||||||
{
|
{
|
||||||
@ -576,7 +507,7 @@ void RaftManager::follower(unsigned int _term)
|
|||||||
|
|
||||||
void RaftManager::replicate_log(ReplicaRequest * request)
|
void RaftManager::replicate_log(ReplicaRequest * request)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&mutex);
|
std::lock_guard<mutex> lock(raft_mutex);
|
||||||
|
|
||||||
if ( state != LEADER )
|
if ( state != LEADER )
|
||||||
{
|
{
|
||||||
@ -584,7 +515,6 @@ void RaftManager::replicate_log(ReplicaRequest * request)
|
|||||||
|
|
||||||
requests.remove(request->index());
|
requests.remove(request->index());
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -622,8 +552,6 @@ void RaftManager::replicate_log(ReplicaRequest * request)
|
|||||||
|
|
||||||
requests.set(request->index(), request);
|
requests.set(request->index(), request);
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
@ -644,14 +572,13 @@ void RaftManager::replicate_success(int follower_id)
|
|||||||
|
|
||||||
logdb->get_last_record_index(db_lindex, db_lterm);
|
logdb->get_last_record_index(db_lindex, db_lterm);
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
std::lock_guard<mutex> lock(raft_mutex);
|
||||||
|
|
||||||
next_it = next.find(follower_id);
|
next_it = next.find(follower_id);
|
||||||
match_it = match.find(follower_id);
|
match_it = match.find(follower_id);
|
||||||
|
|
||||||
if ( next_it == next.end() || match_it == match.end() )
|
if ( next_it == next.end() || match_it == match.end() )
|
||||||
{
|
{
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -670,8 +597,6 @@ void RaftManager::replicate_success(int follower_id)
|
|||||||
{
|
{
|
||||||
replica_manager.replicate(follower_id);
|
replica_manager.replicate(follower_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
@ -681,7 +606,7 @@ void RaftManager::replicate_failure(int follower_id)
|
|||||||
{
|
{
|
||||||
std::map<int, uint64_t>::iterator next_it;
|
std::map<int, uint64_t>::iterator next_it;
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
std::lock_guard<mutex> lock(raft_mutex);
|
||||||
|
|
||||||
next_it = next.find(follower_id);
|
next_it = next.find(follower_id);
|
||||||
|
|
||||||
@ -697,8 +622,6 @@ void RaftManager::replicate_failure(int follower_id)
|
|||||||
{
|
{
|
||||||
replica_manager.replicate(follower_id);
|
replica_manager.replicate(follower_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
@ -709,13 +632,11 @@ void RaftManager::replicate_failure(int follower_id)
|
|||||||
|
|
||||||
void RaftManager::update_last_heartbeat(int _leader_id)
|
void RaftManager::update_last_heartbeat(int _leader_id)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&mutex);
|
std::lock_guard<mutex> lock(raft_mutex);
|
||||||
|
|
||||||
leader_id = _leader_id;
|
leader_id = _leader_id;
|
||||||
|
|
||||||
clock_gettime(CLOCK_REALTIME, &last_heartbeat);
|
clock_gettime(CLOCK_REALTIME, &last_heartbeat);
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
@ -723,9 +644,7 @@ void RaftManager::update_last_heartbeat(int _leader_id)
|
|||||||
|
|
||||||
uint64_t RaftManager::update_commit(uint64_t leader_commit, uint64_t index)
|
uint64_t RaftManager::update_commit(uint64_t leader_commit, uint64_t index)
|
||||||
{
|
{
|
||||||
uint64_t _commit;
|
std::lock_guard<mutex> lock(raft_mutex);
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
|
||||||
|
|
||||||
if ( leader_commit > commit )
|
if ( leader_commit > commit )
|
||||||
{
|
{
|
||||||
@ -739,11 +658,7 @@ uint64_t RaftManager::update_commit(uint64_t leader_commit, uint64_t index)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_commit = commit;
|
return commit;
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
return _commit;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
@ -756,12 +671,11 @@ int RaftManager::update_votedfor(int _votedfor)
|
|||||||
|
|
||||||
std::string raft_state_xml;
|
std::string raft_state_xml;
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
{
|
||||||
|
std::lock_guard<mutex> lock(raft_mutex);
|
||||||
|
|
||||||
if ( votedfor != -1 && votedfor != _votedfor )
|
if ( votedfor != -1 && votedfor != _votedfor )
|
||||||
{
|
{
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -770,8 +684,7 @@ int RaftManager::update_votedfor(int _votedfor)
|
|||||||
raft_state.replace("VOTEDFOR", votedfor);
|
raft_state.replace("VOTEDFOR", votedfor);
|
||||||
|
|
||||||
raft_state.to_xml(raft_state_xml);
|
raft_state.to_xml(raft_state_xml);
|
||||||
|
}
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
logdb->update_raft_state(raft_state_name, raft_state_xml);
|
logdb->update_raft_state(raft_state_name, raft_state_xml);
|
||||||
|
|
||||||
@ -781,7 +694,7 @@ int RaftManager::update_votedfor(int _votedfor)
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void RaftManager::timer_action(const ActionRequest& ar)
|
void RaftManager::timer_action()
|
||||||
{
|
{
|
||||||
static int mark_tics = 0;
|
static int mark_tics = 0;
|
||||||
static int purge_tics = 0;
|
static int purge_tics = 0;
|
||||||
@ -824,7 +737,7 @@ void RaftManager::timer_action(const ActionRequest& ar)
|
|||||||
|
|
||||||
clock_gettime(CLOCK_REALTIME, &the_time);
|
clock_gettime(CLOCK_REALTIME, &the_time);
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
std::unique_lock<mutex> lock(raft_mutex);
|
||||||
|
|
||||||
if ( state == LEADER ) // Send the heartbeat
|
if ( state == LEADER ) // Send the heartbeat
|
||||||
{
|
{
|
||||||
@ -837,12 +750,6 @@ void RaftManager::timer_action(const ActionRequest& ar)
|
|||||||
heartbeat_manager.replicate();
|
heartbeat_manager.replicate();
|
||||||
|
|
||||||
clock_gettime(CLOCK_REALTIME, &last_heartbeat);
|
clock_gettime(CLOCK_REALTIME, &last_heartbeat);
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if ( state == FOLLOWER )
|
else if ( state == FOLLOWER )
|
||||||
@ -858,18 +765,13 @@ void RaftManager::timer_action(const ActionRequest& ar)
|
|||||||
|
|
||||||
state = CANDIDATE;
|
state = CANDIDATE;
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
lock.unlock();
|
||||||
|
|
||||||
request_vote();
|
request_vote();
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
else //SOLO or CANDIDATE, do nothing
|
else //SOLO or CANDIDATE, do nothing
|
||||||
{
|
{
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return;
|
return;
|
||||||
@ -916,11 +818,11 @@ void RaftManager::request_vote()
|
|||||||
/* ------------------------------------------------------------------ */
|
/* ------------------------------------------------------------------ */
|
||||||
/* Initialize election variables */
|
/* Initialize election variables */
|
||||||
/* ------------------------------------------------------------------ */
|
/* ------------------------------------------------------------------ */
|
||||||
pthread_mutex_lock(&mutex);
|
{
|
||||||
|
std::lock_guard<mutex> lock(raft_mutex);
|
||||||
|
|
||||||
if ( state != CANDIDATE )
|
if ( state != CANDIDATE )
|
||||||
{
|
{
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -942,8 +844,7 @@ void RaftManager::request_vote()
|
|||||||
|
|
||||||
_term = term;
|
_term = term;
|
||||||
_server_id = server_id;
|
_server_id = server_id;
|
||||||
|
}
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
logdb->update_raft_state(raft_state_name, raft_state_xml);
|
logdb->update_raft_state(raft_state_name, raft_state_xml);
|
||||||
|
|
||||||
@ -1016,11 +917,11 @@ void RaftManager::request_vote()
|
|||||||
/* ------------------------------------------------------------------ */
|
/* ------------------------------------------------------------------ */
|
||||||
/* Timeout for a new election process (blocking timer thread) */
|
/* Timeout for a new election process (blocking timer thread) */
|
||||||
/* ------------------------------------------------------------------ */
|
/* ------------------------------------------------------------------ */
|
||||||
pthread_mutex_lock(&mutex);
|
{
|
||||||
|
std::lock_guard<mutex> lock(raft_mutex);
|
||||||
|
|
||||||
if ( state != CANDIDATE )
|
if ( state != CANDIDATE )
|
||||||
{
|
{
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1029,8 +930,7 @@ void RaftManager::request_vote()
|
|||||||
raft_state.replace("VOTEDFOR", votedfor);
|
raft_state.replace("VOTEDFOR", votedfor);
|
||||||
|
|
||||||
raft_state.to_xml(raft_state_xml);
|
raft_state.to_xml(raft_state_xml);
|
||||||
|
}
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
logdb->update_raft_state(raft_state_name, raft_state_xml);
|
logdb->update_raft_state(raft_state_name, raft_state_xml);
|
||||||
|
|
||||||
@ -1068,14 +968,14 @@ int RaftManager::xmlrpc_replicate_log(int follower_id, LogDBRecord * lr,
|
|||||||
|
|
||||||
int xml_rc = 0;
|
int xml_rc = 0;
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
{
|
||||||
|
std::lock_guard<mutex> lock(raft_mutex);
|
||||||
|
|
||||||
it = servers.find(follower_id);
|
it = servers.find(follower_id);
|
||||||
|
|
||||||
if ( it == servers.end() )
|
if ( it == servers.end() )
|
||||||
{
|
{
|
||||||
error = "Cannot find follower end point";
|
error = "Cannot find follower end point";
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -1085,8 +985,7 @@ int RaftManager::xmlrpc_replicate_log(int follower_id, LogDBRecord * lr,
|
|||||||
_commit = commit;
|
_commit = commit;
|
||||||
_term = term;
|
_term = term;
|
||||||
_server_id = server_id;
|
_server_id = server_id;
|
||||||
|
}
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
// Get parameters to call append entries on follower
|
// Get parameters to call append entries on follower
|
||||||
@ -1165,14 +1064,14 @@ int RaftManager::xmlrpc_request_vote(int follower_id, uint64_t lindex,
|
|||||||
|
|
||||||
int xml_rc = 0;
|
int xml_rc = 0;
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
{
|
||||||
|
std::lock_guard<mutex> lock(raft_mutex);
|
||||||
|
|
||||||
it = servers.find(follower_id);
|
it = servers.find(follower_id);
|
||||||
|
|
||||||
if ( it == servers.end() )
|
if ( it == servers.end() )
|
||||||
{
|
{
|
||||||
error = "Cannot find follower end point";
|
error = "Cannot find follower end point";
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -1181,8 +1080,7 @@ int RaftManager::xmlrpc_request_vote(int follower_id, uint64_t lindex,
|
|||||||
|
|
||||||
_term = term;
|
_term = term;
|
||||||
_server_id = server_id;
|
_server_id = server_id;
|
||||||
|
}
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
// Get parameters to call append entries on follower
|
// Get parameters to call append entries on follower
|
||||||
@ -1251,7 +1149,7 @@ std::string& RaftManager::to_xml(std::string& raft_xml)
|
|||||||
|
|
||||||
logdb->get_last_record_index(lindex, lterm);
|
logdb->get_last_record_index(lindex, lterm);
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
std::lock_guard<mutex> lock(raft_mutex);
|
||||||
|
|
||||||
oss << "<RAFT>"
|
oss << "<RAFT>"
|
||||||
<< "<SERVER_ID>" << server_id << "</SERVER_ID>"
|
<< "<SERVER_ID>" << server_id << "</SERVER_ID>"
|
||||||
@ -1282,8 +1180,6 @@ std::string& RaftManager::to_xml(std::string& raft_xml)
|
|||||||
|
|
||||||
oss << "</RAFT>";
|
oss << "</RAFT>";
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
raft_xml = oss.str();
|
raft_xml = oss.str();
|
||||||
|
|
||||||
return raft_xml;
|
return raft_xml;
|
||||||
@ -1303,7 +1199,7 @@ void RaftManager::reset_index(int follower_id)
|
|||||||
|
|
||||||
logdb->get_last_record_index(log_index, log_term);
|
logdb->get_last_record_index(log_index, log_term);
|
||||||
|
|
||||||
pthread_mutex_lock(&mutex);
|
std::lock_guard<mutex> lock(raft_mutex);
|
||||||
|
|
||||||
next_it = next.find(follower_id);
|
next_it = next.find(follower_id);
|
||||||
|
|
||||||
@ -1311,8 +1207,6 @@ void RaftManager::reset_index(int follower_id)
|
|||||||
{
|
{
|
||||||
next_it->second = log_index + 1;
|
next_it->second = log_index + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_unlock(&mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
@ -451,7 +451,7 @@ void Request::execute(
|
|||||||
|
|
||||||
if (!event.empty())
|
if (!event.empty())
|
||||||
{
|
{
|
||||||
hm->trigger(HMAction::SEND_EVENT, event);
|
hm->trigger_send_event(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( log_method_call )
|
if ( log_method_call )
|
||||||
|
@ -93,36 +93,12 @@ RequestManager::RequestManager(
|
|||||||
Request::set_call_log_format(call_log_format);
|
Request::set_call_log_format(call_log_format);
|
||||||
|
|
||||||
xmlrpc_limit_set(XMLRPC_XML_SIZE_LIMIT_ID, message_size);
|
xmlrpc_limit_set(XMLRPC_XML_SIZE_LIMIT_ID, message_size);
|
||||||
|
|
||||||
am.addListener(this);
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
extern "C" void * rm_action_loop(void *arg)
|
|
||||||
{
|
|
||||||
RequestManager * rm;
|
|
||||||
|
|
||||||
if ( arg == 0 )
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
NebulaLog::log("ReM",Log::INFO,"Request Manager started.");
|
|
||||||
|
|
||||||
rm = static_cast<RequestManager *>(arg);
|
|
||||||
|
|
||||||
rm->am.loop();
|
|
||||||
|
|
||||||
NebulaLog::log("ReM",Log::INFO,"Request Manager stopped.");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Connection class is used to pass arguments to connection threads.
|
* Connection class is used to pass arguments to connection threads.
|
||||||
*/
|
*/
|
||||||
@ -320,7 +296,6 @@ int RequestManager::setup_socket()
|
|||||||
|
|
||||||
int RequestManager::start()
|
int RequestManager::start()
|
||||||
{
|
{
|
||||||
pthread_attr_t pattr;
|
|
||||||
ostringstream oss;
|
ostringstream oss;
|
||||||
|
|
||||||
NebulaLog::log("ReM",Log::INFO,"Starting Request Manager...");
|
NebulaLog::log("ReM",Log::INFO,"Starting Request Manager...");
|
||||||
@ -334,11 +309,7 @@ int RequestManager::start()
|
|||||||
|
|
||||||
register_xml_methods();
|
register_xml_methods();
|
||||||
|
|
||||||
pthread_attr_init (&pattr);
|
pthread_attr_t pattr;
|
||||||
pthread_attr_setdetachstate (&pattr, PTHREAD_CREATE_JOINABLE);
|
|
||||||
|
|
||||||
pthread_create(&rm_thread,&pattr,rm_action_loop,(void *)this);
|
|
||||||
|
|
||||||
pthread_attr_init(&pattr);
|
pthread_attr_init(&pattr);
|
||||||
pthread_attr_setdetachstate(&pattr, PTHREAD_CREATE_JOINABLE);
|
pthread_attr_setdetachstate(&pattr, PTHREAD_CREATE_JOINABLE);
|
||||||
|
|
||||||
@ -1240,10 +1211,8 @@ void RequestManager::register_xml_methods()
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void RequestManager::finalize_action(const ActionRequest& ar)
|
void RequestManager::finalize()
|
||||||
{
|
{
|
||||||
NebulaLog::log("ReM",Log::INFO,"Stopping Request Manager...");
|
|
||||||
|
|
||||||
pthread_cancel(rm_xml_server_thread);
|
pthread_cancel(rm_xml_server_thread);
|
||||||
|
|
||||||
pthread_join(rm_xml_server_thread,0);
|
pthread_join(rm_xml_server_thread,0);
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
|
|
||||||
#include "RequestManagerAcl.h"
|
#include "RequestManagerAcl.h"
|
||||||
#include "AclManager.h"
|
#include "AclManager.h"
|
||||||
|
#include "AclRule.h"
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
|
@ -67,7 +67,7 @@ void SecurityGroupCommit::request_execute(xmlrpc_c::paramList const& paramList,
|
|||||||
|
|
||||||
sg->unlock();
|
sg->unlock();
|
||||||
|
|
||||||
lcm->trigger(LCMAction::UPDATESG, oid, att);
|
lcm->trigger_updatesg(oid);
|
||||||
|
|
||||||
success_response(oid, att);
|
success_response(oid, att);
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include "RequestManagerSystem.h"
|
#include "RequestManagerSystem.h"
|
||||||
#include "Nebula.h"
|
#include "Nebula.h"
|
||||||
#include "LogDB.h"
|
#include "LogDB.h"
|
||||||
|
#include "SSLUtil.h"
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
|
@ -26,24 +26,31 @@
|
|||||||
#include "VirtualMachinePoolXML.h"
|
#include "VirtualMachinePoolXML.h"
|
||||||
#include "VirtualNetworkPoolXML.h"
|
#include "VirtualNetworkPoolXML.h"
|
||||||
#include "SchedulerPolicy.h"
|
#include "SchedulerPolicy.h"
|
||||||
#include "ActionManager.h"
|
#include "Listener.h"
|
||||||
#include "AclXML.h"
|
#include "AclXML.h"
|
||||||
#include "MonitorXML.h"
|
#include "MonitorXML.h"
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
extern "C" void * scheduler_action_loop(void *arg);
|
|
||||||
class SchedulerTemplate;
|
class SchedulerTemplate;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The Scheduler class. It represents the scheduler ...
|
* The Scheduler class. It represents the scheduler ...
|
||||||
*/
|
*/
|
||||||
|
class Scheduler
|
||||||
class Scheduler: public ActionListener
|
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
void start();
|
void start();
|
||||||
|
|
||||||
|
void finalize()
|
||||||
|
{
|
||||||
|
if (timer_thread.get())
|
||||||
|
{
|
||||||
|
timer_thread->stop();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
virtual void register_policies(const SchedulerTemplate& conf){};
|
virtual void register_policies(const SchedulerTemplate& conf){};
|
||||||
|
|
||||||
static Scheduler& instance(Scheduler* the_sched=0)
|
static Scheduler& instance(Scheduler* the_sched=0)
|
||||||
@ -66,18 +73,6 @@ public:
|
|||||||
protected:
|
protected:
|
||||||
|
|
||||||
Scheduler():
|
Scheduler():
|
||||||
acls(0),
|
|
||||||
upool(0),
|
|
||||||
hpool(0),
|
|
||||||
clpool(0),
|
|
||||||
dspool(0),
|
|
||||||
img_dspool(0),
|
|
||||||
vmpool(0),
|
|
||||||
vm_roles_pool(0),
|
|
||||||
vnetpool(0),
|
|
||||||
vmgpool(0),
|
|
||||||
vmapool(0),
|
|
||||||
hmonpool(0),
|
|
||||||
timer(0),
|
timer(0),
|
||||||
one_xmlrpc(""),
|
one_xmlrpc(""),
|
||||||
machines_limit(0),
|
machines_limit(0),
|
||||||
@ -86,8 +81,7 @@ protected:
|
|||||||
mem_ds_scale(0),
|
mem_ds_scale(0),
|
||||||
diff_vnets(false)
|
diff_vnets(false)
|
||||||
{
|
{
|
||||||
am.addListener(this);
|
}
|
||||||
};
|
|
||||||
|
|
||||||
virtual ~Scheduler()
|
virtual ~Scheduler()
|
||||||
{
|
{
|
||||||
@ -107,30 +101,30 @@ protected:
|
|||||||
delete vmgpool;
|
delete vmgpool;
|
||||||
|
|
||||||
delete acls;
|
delete acls;
|
||||||
};
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------
|
// ---------------------------------------------------------------
|
||||||
// Pools
|
// Pools
|
||||||
// ---------------------------------------------------------------
|
// ---------------------------------------------------------------
|
||||||
AclXML * acls;
|
AclXML * acls = nullptr;
|
||||||
UserPoolXML * upool;
|
UserPoolXML * upool = nullptr;
|
||||||
|
|
||||||
HostPoolXML * hpool;
|
HostPoolXML * hpool = nullptr;
|
||||||
ClusterPoolXML * clpool;
|
ClusterPoolXML * clpool = nullptr;
|
||||||
|
|
||||||
SystemDatastorePoolXML * dspool;
|
SystemDatastorePoolXML * dspool = nullptr;
|
||||||
ImageDatastorePoolXML * img_dspool;
|
ImageDatastorePoolXML * img_dspool = nullptr;
|
||||||
|
|
||||||
VirtualMachinePoolXML * vmpool;
|
VirtualMachinePoolXML * vmpool = nullptr;
|
||||||
VirtualMachineRolePoolXML * vm_roles_pool;
|
VirtualMachineRolePoolXML * vm_roles_pool = nullptr;
|
||||||
|
|
||||||
VirtualNetworkPoolXML * vnetpool;
|
VirtualNetworkPoolXML * vnetpool = nullptr;
|
||||||
|
|
||||||
VMGroupPoolXML * vmgpool;
|
VMGroupPoolXML * vmgpool = nullptr;
|
||||||
|
|
||||||
VirtualMachineActionsPoolXML* vmapool;
|
VirtualMachineActionsPoolXML* vmapool = nullptr;
|
||||||
|
|
||||||
MonitorPoolXML * hmonpool;
|
MonitorPoolXML * hmonpool = nullptr;
|
||||||
|
|
||||||
// ---------------------------------------------------------------
|
// ---------------------------------------------------------------
|
||||||
// Scheduler Policies
|
// Scheduler Policies
|
||||||
@ -183,11 +177,9 @@ protected:
|
|||||||
virtual void do_vm_groups();
|
virtual void do_vm_groups();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Scheduler(Scheduler const&){};
|
Scheduler(Scheduler const&) = delete;
|
||||||
|
|
||||||
Scheduler& operator=(Scheduler const&){return *this;};
|
Scheduler& operator=(Scheduler const&) = delete;
|
||||||
|
|
||||||
friend void * scheduler_action_loop(void *arg);
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------
|
// ---------------------------------------------------------------
|
||||||
// Scheduling Policies
|
// Scheduling Policies
|
||||||
@ -244,19 +236,12 @@ private:
|
|||||||
// ---------------------------------------------------------------
|
// ---------------------------------------------------------------
|
||||||
// Timer to periodically schedule and dispatch VMs
|
// Timer to periodically schedule and dispatch VMs
|
||||||
// ---------------------------------------------------------------
|
// ---------------------------------------------------------------
|
||||||
|
std::unique_ptr<Timer> timer_thread;
|
||||||
pthread_t sched_thread;
|
|
||||||
ActionManager am;
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
// Action Listener interface
|
// Action Listener interface
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
void timer_action(const ActionRequest& ar);
|
void timer_action();
|
||||||
|
|
||||||
void finalize_action(const ActionRequest& ar)
|
|
||||||
{
|
|
||||||
NebulaLog::log("SCHED",Log::INFO,"Stopping the scheduler...");
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /*SCHEDULER_H_*/
|
#endif /*SCHEDULER_H_*/
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
#include "AclXML.h"
|
#include "AclXML.h"
|
||||||
|
#include "AclRule.h"
|
||||||
#include "ObjectXML.h"
|
#include "ObjectXML.h"
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
@ -77,33 +77,8 @@ static double profile(bool start, const string& message="")
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
extern "C" void * scheduler_action_loop(void *arg)
|
|
||||||
{
|
|
||||||
Scheduler * sched;
|
|
||||||
|
|
||||||
if ( arg == 0 )
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
sched = static_cast<Scheduler *>(arg);
|
|
||||||
|
|
||||||
NebulaLog::log("SCHED",Log::INFO,"Scheduler loop started.");
|
|
||||||
|
|
||||||
sched->am.loop(sched->timer);
|
|
||||||
|
|
||||||
NebulaLog::log("SCHED",Log::INFO,"Scheduler loop stopped.");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
void Scheduler::start()
|
void Scheduler::start()
|
||||||
{
|
{
|
||||||
int rc;
|
|
||||||
|
|
||||||
ifstream file;
|
ifstream file;
|
||||||
ostringstream oss;
|
ostringstream oss;
|
||||||
|
|
||||||
@ -111,8 +86,6 @@ void Scheduler::start()
|
|||||||
|
|
||||||
unsigned int live_rescheds;
|
unsigned int live_rescheds;
|
||||||
|
|
||||||
pthread_attr_t pattr;
|
|
||||||
|
|
||||||
// -----------------------------------------------------------
|
// -----------------------------------------------------------
|
||||||
// Configuration File
|
// Configuration File
|
||||||
// -----------------------------------------------------------
|
// -----------------------------------------------------------
|
||||||
@ -375,18 +348,7 @@ void Scheduler::start()
|
|||||||
|
|
||||||
NebulaLog::log("SCHED",Log::INFO,"Starting scheduler loop...");
|
NebulaLog::log("SCHED",Log::INFO,"Starting scheduler loop...");
|
||||||
|
|
||||||
pthread_attr_init (&pattr);
|
timer_thread.reset(new Timer(timer, [this](){timer_action();}));
|
||||||
pthread_attr_setdetachstate (&pattr, PTHREAD_CREATE_JOINABLE);
|
|
||||||
|
|
||||||
rc = pthread_create(&sched_thread,&pattr,scheduler_action_loop,(void *) this);
|
|
||||||
|
|
||||||
if ( rc != 0 )
|
|
||||||
{
|
|
||||||
NebulaLog::log("SCHED",Log::ERROR,
|
|
||||||
"Could not start scheduler loop, exiting");
|
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// -----------------------------------------------------------
|
// -----------------------------------------------------------
|
||||||
// Wait for a SIGTERM or SIGINT signal
|
// Wait for a SIGTERM or SIGINT signal
|
||||||
@ -399,9 +361,7 @@ void Scheduler::start()
|
|||||||
|
|
||||||
sigwait(&mask, &signal);
|
sigwait(&mask, &signal);
|
||||||
|
|
||||||
am.finalize();
|
finalize();
|
||||||
|
|
||||||
pthread_join(sched_thread,0);
|
|
||||||
|
|
||||||
xmlCleanupParser();
|
xmlCleanupParser();
|
||||||
|
|
||||||
@ -1835,7 +1795,7 @@ void Scheduler::do_vm_groups()
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void Scheduler::timer_action(const ActionRequest& ar)
|
void Scheduler::timer_action()
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
@ -450,7 +450,7 @@ int SecurityGroup::post_update_template(string& error)
|
|||||||
|
|
||||||
commit(false);
|
commit(false);
|
||||||
|
|
||||||
Nebula::instance().get_lcm()->trigger(LCMAction::UPDATESG, oid);
|
Nebula::instance().get_lcm()->trigger_updatesg(oid);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
#include "LogDB.h"
|
#include "LogDB.h"
|
||||||
#include "Nebula.h"
|
#include "Nebula.h"
|
||||||
#include "NebulaUtil.h"
|
#include "SSLUtil.h"
|
||||||
#include "ZoneServer.h"
|
#include "ZoneServer.h"
|
||||||
#include "Callbackable.h"
|
#include "Callbackable.h"
|
||||||
#include "RaftManager.h"
|
#include "RaftManager.h"
|
||||||
|
@ -33,33 +33,8 @@ const char * TransferManager::transfer_driver_name = "transfer_exe";
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
extern "C" void * tm_action_loop(void *arg)
|
|
||||||
{
|
|
||||||
TransferManager * tm;
|
|
||||||
|
|
||||||
if ( arg == nullptr )
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
tm = static_cast<TransferManager *>(arg);
|
|
||||||
|
|
||||||
NebulaLog::log("TrM",Log::INFO,"Transfer Manager started.");
|
|
||||||
|
|
||||||
tm->am.loop();
|
|
||||||
|
|
||||||
NebulaLog::log("TrM",Log::INFO,"Transfer Manager stopped.");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
int TransferManager::start()
|
int TransferManager::start()
|
||||||
{
|
{
|
||||||
int rc;
|
|
||||||
pthread_attr_t pattr;
|
|
||||||
|
|
||||||
using namespace std::placeholders; // for _1
|
using namespace std::placeholders; // for _1
|
||||||
|
|
||||||
register_action(TransferManagerMessages::UNDEFINED,
|
register_action(TransferManagerMessages::UNDEFINED,
|
||||||
@ -80,262 +55,14 @@ int TransferManager::start()
|
|||||||
|
|
||||||
NebulaLog::log("TrM",Log::INFO,"Starting Transfer Manager...");
|
NebulaLog::log("TrM",Log::INFO,"Starting Transfer Manager...");
|
||||||
|
|
||||||
pthread_attr_init(&pattr);
|
Listener::start();
|
||||||
pthread_attr_setdetachstate(&pattr, PTHREAD_CREATE_JOINABLE);
|
|
||||||
|
|
||||||
rc = pthread_create(&tm_thread,&pattr,tm_action_loop,(void *) this);
|
return 0;
|
||||||
|
|
||||||
return rc;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void TransferManager::user_action(const ActionRequest& ar)
|
|
||||||
{
|
|
||||||
const TMAction& tm_ar = static_cast<const TMAction& >(ar);
|
|
||||||
int vid = tm_ar.vm_id();
|
|
||||||
|
|
||||||
bool host_is_cloud = false;
|
|
||||||
bool vm_no_history = false;
|
|
||||||
|
|
||||||
Nebula& nd = Nebula::instance();
|
|
||||||
|
|
||||||
VirtualMachine * vm = vmpool->get(vid);
|
|
||||||
|
|
||||||
if (vm == nullptr)
|
|
||||||
{
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (vm->hasHistory())
|
|
||||||
{
|
|
||||||
host_is_cloud = vm->get_host_is_cloud();
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
vm_no_history = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
vm->unlock();
|
|
||||||
|
|
||||||
auto lcm = nd.get_lcm();
|
|
||||||
|
|
||||||
switch (tm_ar.action())
|
|
||||||
{
|
|
||||||
case TMAction::PROLOG:
|
|
||||||
if (host_is_cloud)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::PROLOG_SUCCESS,vid);
|
|
||||||
}
|
|
||||||
else if (vm_no_history)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::PROLOG_FAILURE,vid);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
prolog_action(vid);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case TMAction::PROLOG_MIGR:
|
|
||||||
if (host_is_cloud)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::PROLOG_SUCCESS,vid);
|
|
||||||
}
|
|
||||||
else if (vm_no_history)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::PROLOG_FAILURE,vid);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
prolog_migr_action(vid);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case TMAction::PROLOG_RESUME:
|
|
||||||
if (host_is_cloud)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::PROLOG_SUCCESS,vid);
|
|
||||||
}
|
|
||||||
else if (vm_no_history)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::PROLOG_FAILURE,vid);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
prolog_resume_action(vid);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case TMAction::PROLOG_ATTACH:
|
|
||||||
if (host_is_cloud)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::ATTACH_SUCCESS,vid);
|
|
||||||
}
|
|
||||||
else if (vm_no_history)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::ATTACH_FAILURE,vid);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
prolog_attach_action(vid);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case TMAction::EPILOG:
|
|
||||||
if (host_is_cloud)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::EPILOG_SUCCESS,vid);
|
|
||||||
}
|
|
||||||
else if (vm_no_history)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::EPILOG_FAILURE,vid);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
epilog_action(false, vid);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case TMAction::EPILOG_LOCAL:
|
|
||||||
if (host_is_cloud)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::EPILOG_SUCCESS,vid);
|
|
||||||
}
|
|
||||||
else if (vm_no_history)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::EPILOG_FAILURE,vid);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
epilog_action(true, vid);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case TMAction::EPILOG_STOP:
|
|
||||||
if (host_is_cloud)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::EPILOG_SUCCESS,vid);
|
|
||||||
}
|
|
||||||
else if (vm_no_history)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::EPILOG_FAILURE,vid);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
epilog_stop_action(vid);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case TMAction::EPILOG_DELETE:
|
|
||||||
if (host_is_cloud)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::EPILOG_SUCCESS,vid);
|
|
||||||
}
|
|
||||||
else if (vm_no_history)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::EPILOG_FAILURE,vid);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
epilog_delete_action(vid);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case TMAction::EPILOG_DELETE_STOP:
|
|
||||||
if (host_is_cloud)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::EPILOG_SUCCESS,vid);
|
|
||||||
}
|
|
||||||
else if (vm_no_history)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::EPILOG_FAILURE,vid);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
epilog_delete_stop_action(vid);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case TMAction::EPILOG_DELETE_PREVIOUS:
|
|
||||||
if (host_is_cloud)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::EPILOG_SUCCESS,vid);
|
|
||||||
}
|
|
||||||
else if (vm_no_history)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::EPILOG_FAILURE,vid);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
epilog_delete_previous_action(vid);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case TMAction::EPILOG_DELETE_BOTH:
|
|
||||||
if (host_is_cloud)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::EPILOG_SUCCESS,vid);
|
|
||||||
}
|
|
||||||
else if (vm_no_history)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::EPILOG_FAILURE,vid);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
epilog_delete_both_action(vid);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case TMAction::EPILOG_DETACH:
|
|
||||||
if (host_is_cloud)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::DETACH_SUCCESS,vid);
|
|
||||||
}
|
|
||||||
else if (vm_no_history)
|
|
||||||
{
|
|
||||||
lcm->trigger(LCMAction::DETACH_FAILURE,vid);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
epilog_detach_action(vid);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case TMAction::CHECKPOINT:
|
|
||||||
checkpoint_action(vid);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case TMAction::SAVEAS_HOT:
|
|
||||||
saveas_hot_action(vid);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case TMAction::DRIVER_CANCEL:
|
|
||||||
driver_cancel_action(vid);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case TMAction::SNAPSHOT_CREATE:
|
|
||||||
snapshot_create_action(vid);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case TMAction::SNAPSHOT_REVERT:
|
|
||||||
snapshot_revert_action(vid);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case TMAction::SNAPSHOT_DELETE:
|
|
||||||
snapshot_delete_action(vid);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case TMAction::RESIZE:
|
|
||||||
resize_action(vid);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
int TransferManager::prolog_transfer_command(
|
int TransferManager::prolog_transfer_command(
|
||||||
VirtualMachine * vm,
|
VirtualMachine * vm,
|
||||||
const VirtualMachineDisk* disk,
|
const VirtualMachineDisk* disk,
|
||||||
@ -571,9 +298,56 @@ int TransferManager::prolog_context_command(
|
|||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
/**
|
||||||
void TransferManager::prolog_action(int vid)
|
* Check VM attributes and trigger a LCM event
|
||||||
|
* - success for hybrid VMs
|
||||||
|
* - failure if no history is present (consistency check)
|
||||||
|
*
|
||||||
|
* @return 0 if the TM event needs to be triggered
|
||||||
|
*/
|
||||||
|
static int test_and_trigger(VirtualMachine * vm,
|
||||||
|
void (LifeCycleManager::*success)(int),
|
||||||
|
void (LifeCycleManager::*failure)(int))
|
||||||
{
|
{
|
||||||
|
if (!vm->hasHistory())
|
||||||
|
{
|
||||||
|
auto lcm = Nebula::instance().get_lcm();
|
||||||
|
|
||||||
|
(lcm->*failure)(vm->get_oid());
|
||||||
|
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (vm->get_host_is_cloud())
|
||||||
|
{
|
||||||
|
auto lcm = Nebula::instance().get_lcm();
|
||||||
|
|
||||||
|
(lcm->*success)(vm->get_oid());
|
||||||
|
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* -------------------------------------------------------------------------- */
|
||||||
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
void TransferManager::trigger_prolog(VirtualMachine * vm)
|
||||||
|
{
|
||||||
|
int vid = vm->get_oid();
|
||||||
|
|
||||||
|
int rc = test_and_trigger(vm,
|
||||||
|
&LifeCycleManager::trigger_prolog_success,
|
||||||
|
&LifeCycleManager::trigger_prolog_failure);
|
||||||
|
|
||||||
|
if ( rc == -1 )
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
trigger([this, vid] {
|
||||||
ofstream xfr;
|
ofstream xfr;
|
||||||
ostringstream os("prolog, ");
|
ostringstream os("prolog, ");
|
||||||
string xfr_name;
|
string xfr_name;
|
||||||
@ -723,18 +497,31 @@ error_attributes:
|
|||||||
goto error_common;
|
goto error_common;
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
(nd.get_lcm())->trigger(LCMAction::PROLOG_FAILURE,vid);
|
nd.get_lcm()->trigger_prolog_failure(vid);
|
||||||
vm->log("TrM", Log::ERROR, os);
|
vm->log("TrM", Log::ERROR, os);
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void TransferManager::prolog_migr_action(int vid)
|
void TransferManager::trigger_prolog_migr(VirtualMachine * vm)
|
||||||
{
|
{
|
||||||
|
int vid = vm->get_oid();
|
||||||
|
|
||||||
|
int rc = test_and_trigger(vm,
|
||||||
|
&LifeCycleManager::trigger_prolog_success,
|
||||||
|
&LifeCycleManager::trigger_prolog_failure);
|
||||||
|
|
||||||
|
if ( rc == -1 )
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
trigger([this, vid] {
|
||||||
ofstream xfr;
|
ofstream xfr;
|
||||||
ostringstream os;
|
ostringstream os;
|
||||||
string xfr_name;
|
string xfr_name;
|
||||||
@ -862,18 +649,30 @@ error_file:
|
|||||||
goto error_common;
|
goto error_common;
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
(Nebula::instance().get_lcm())->trigger(LCMAction::PROLOG_FAILURE,vid);
|
Nebula::instance().get_lcm()->trigger_prolog_failure(vid);
|
||||||
vm->log("TrM", Log::ERROR, os);
|
vm->log("TrM", Log::ERROR, os);
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void TransferManager::prolog_resume_action(int vid)
|
void TransferManager::trigger_prolog_resume(VirtualMachine * vm)
|
||||||
{
|
{
|
||||||
|
int vid = vm->get_oid();
|
||||||
|
|
||||||
|
int rc = test_and_trigger(vm,
|
||||||
|
&LifeCycleManager::trigger_prolog_success,
|
||||||
|
&LifeCycleManager::trigger_prolog_failure);
|
||||||
|
|
||||||
|
if ( rc == -1 )
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
trigger([this, vid] {
|
||||||
ofstream xfr;
|
ofstream xfr;
|
||||||
ostringstream os;
|
ostringstream os;
|
||||||
string xfr_name;
|
string xfr_name;
|
||||||
@ -1005,19 +804,32 @@ error_file:
|
|||||||
goto error_common;
|
goto error_common;
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
(nd.get_lcm())->trigger(LCMAction::PROLOG_FAILURE,vid);
|
nd.get_lcm()->trigger_prolog_failure(vid);
|
||||||
|
|
||||||
vm->log("TrM", Log::ERROR, os);
|
vm->log("TrM", Log::ERROR, os);
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void TransferManager::prolog_attach_action(int vid)
|
void TransferManager::trigger_prolog_attach(VirtualMachine * vm)
|
||||||
{
|
{
|
||||||
|
int vid = vm->get_oid();
|
||||||
|
|
||||||
|
int rc = test_and_trigger(vm,
|
||||||
|
&LifeCycleManager::trigger_prolog_success,
|
||||||
|
&LifeCycleManager::trigger_prolog_failure);
|
||||||
|
|
||||||
|
if ( rc == -1 )
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
trigger([this, vid] {
|
||||||
ofstream xfr;
|
ofstream xfr;
|
||||||
ostringstream os("prolog, ");
|
ostringstream os("prolog, ");
|
||||||
string xfr_name;
|
string xfr_name;
|
||||||
@ -1125,11 +937,12 @@ error_attributes:
|
|||||||
goto error_common;
|
goto error_common;
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
(nd.get_lcm())->trigger(LCMAction::PROLOG_FAILURE,vid);
|
nd.get_lcm()->trigger_prolog_failure(vid);
|
||||||
vm->log("TrM", Log::ERROR, os);
|
vm->log("TrM", Log::ERROR, os);
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
@ -1219,8 +1032,20 @@ void TransferManager::epilog_transfer_command(
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void TransferManager::epilog_action(bool local, int vid)
|
void TransferManager::trigger_epilog(bool local, VirtualMachine * vm)
|
||||||
{
|
{
|
||||||
|
int vid = vm->get_oid();
|
||||||
|
|
||||||
|
int rc = test_and_trigger(vm,
|
||||||
|
&LifeCycleManager::trigger_epilog_success,
|
||||||
|
&LifeCycleManager::trigger_epilog_failure);
|
||||||
|
|
||||||
|
if ( rc == -1 )
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
trigger([this, local, vid] {
|
||||||
ofstream xfr;
|
ofstream xfr;
|
||||||
ostringstream os;
|
ostringstream os;
|
||||||
|
|
||||||
@ -1319,18 +1144,31 @@ error_file:
|
|||||||
goto error_common;
|
goto error_common;
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
(nd.get_lcm())->trigger(LCMAction::EPILOG_FAILURE,vid);
|
nd.get_lcm()->trigger_epilog_failure(vid);
|
||||||
vm->log("TrM", Log::ERROR, os);
|
vm->log("TrM", Log::ERROR, os);
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void TransferManager::epilog_stop_action(int vid)
|
void TransferManager::trigger_epilog_stop(VirtualMachine * vm)
|
||||||
{
|
{
|
||||||
|
int vid = vm->get_oid();
|
||||||
|
|
||||||
|
int rc = test_and_trigger(vm,
|
||||||
|
&LifeCycleManager::trigger_epilog_success,
|
||||||
|
&LifeCycleManager::trigger_epilog_failure);
|
||||||
|
|
||||||
|
if ( rc == -1 )
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
trigger([this, vid] {
|
||||||
ofstream xfr;
|
ofstream xfr;
|
||||||
ostringstream os;
|
ostringstream os;
|
||||||
|
|
||||||
@ -1456,14 +1294,14 @@ error_file:
|
|||||||
goto error_common;
|
goto error_common;
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
(nd.get_lcm())->trigger(LCMAction::EPILOG_FAILURE,vid);
|
nd.get_lcm()->trigger_epilog_failure(vid);
|
||||||
vm->log("TrM", Log::ERROR, os);
|
vm->log("TrM", Log::ERROR, os);
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
@ -1598,8 +1436,20 @@ error_common:
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void TransferManager::epilog_delete_action(bool local, int vid)
|
void TransferManager::trigger_epilog_delete(bool local, VirtualMachine * vm)
|
||||||
{
|
{
|
||||||
|
int vid = vm->get_oid();
|
||||||
|
|
||||||
|
int rc = test_and_trigger(vm,
|
||||||
|
&LifeCycleManager::trigger_epilog_success,
|
||||||
|
&LifeCycleManager::trigger_epilog_failure);
|
||||||
|
|
||||||
|
if ( rc == -1 )
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
trigger([this, local, vid] {
|
||||||
ostringstream os;
|
ostringstream os;
|
||||||
|
|
||||||
ofstream xfr;
|
ofstream xfr;
|
||||||
@ -1666,17 +1516,30 @@ error_file:
|
|||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
vm->log("TrM", Log::ERROR, os);
|
vm->log("TrM", Log::ERROR, os);
|
||||||
(nd.get_lcm())->trigger(LCMAction::EPILOG_FAILURE, vid);
|
(nd.get_lcm())->trigger_epilog_failure(vid);
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void TransferManager::epilog_delete_previous_action(int vid)
|
void TransferManager::trigger_epilog_delete_previous(VirtualMachine * vm)
|
||||||
{
|
{
|
||||||
|
int vid = vm->get_oid();
|
||||||
|
|
||||||
|
int rc = test_and_trigger(vm,
|
||||||
|
&LifeCycleManager::trigger_epilog_success,
|
||||||
|
&LifeCycleManager::trigger_epilog_failure);
|
||||||
|
|
||||||
|
if ( rc == -1 )
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
trigger([this, vid] {
|
||||||
ostringstream os;
|
ostringstream os;
|
||||||
|
|
||||||
ofstream xfr;
|
ofstream xfr;
|
||||||
@ -1743,17 +1606,30 @@ error_file:
|
|||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
vm->log("TrM", Log::ERROR, os);
|
vm->log("TrM", Log::ERROR, os);
|
||||||
(nd.get_lcm())->trigger(LCMAction::EPILOG_FAILURE, vid);
|
(nd.get_lcm())->trigger_epilog_failure(vid);
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void TransferManager::epilog_delete_both_action(int vid)
|
void TransferManager::trigger_epilog_delete_both(VirtualMachine * vm)
|
||||||
{
|
{
|
||||||
|
int vid = vm->get_oid();
|
||||||
|
|
||||||
|
int rc = test_and_trigger(vm,
|
||||||
|
&LifeCycleManager::trigger_epilog_success,
|
||||||
|
&LifeCycleManager::trigger_epilog_failure);
|
||||||
|
|
||||||
|
if ( rc == -1 )
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
trigger([this, vid] {
|
||||||
ostringstream os;
|
ostringstream os;
|
||||||
|
|
||||||
ofstream xfr;
|
ofstream xfr;
|
||||||
@ -1821,17 +1697,30 @@ error_file:
|
|||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
vm->log("TrM", Log::ERROR, os);
|
vm->log("TrM", Log::ERROR, os);
|
||||||
(nd.get_lcm())->trigger(LCMAction::EPILOG_FAILURE, vid);
|
(nd.get_lcm())->trigger_epilog_failure(vid);
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void TransferManager::epilog_detach_action(int vid)
|
void TransferManager::trigger_epilog_detach(VirtualMachine * vm)
|
||||||
{
|
{
|
||||||
|
int vid = vm->get_oid();
|
||||||
|
|
||||||
|
int rc = test_and_trigger(vm,
|
||||||
|
&LifeCycleManager::trigger_epilog_success,
|
||||||
|
&LifeCycleManager::trigger_epilog_failure);
|
||||||
|
|
||||||
|
if ( rc == -1 )
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
trigger([this, vid] {
|
||||||
ofstream xfr;
|
ofstream xfr;
|
||||||
ostringstream os;
|
ostringstream os;
|
||||||
string xfr_name;
|
string xfr_name;
|
||||||
@ -1921,18 +1810,20 @@ error_disk:
|
|||||||
goto error_common;
|
goto error_common;
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
(nd.get_lcm())->trigger(LCMAction::EPILOG_FAILURE,vid);
|
(nd.get_lcm())->trigger_epilog_failure(vid);
|
||||||
vm->log("TrM", Log::ERROR, os);
|
vm->log("TrM", Log::ERROR, os);
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void TransferManager::driver_cancel_action(int vid)
|
void TransferManager::trigger_driver_cancel(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
// Get the Driver for this host
|
// Get the Driver for this host
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
@ -1958,14 +1849,13 @@ void TransferManager::driver_cancel_action(int vid)
|
|||||||
|
|
||||||
transfer_msg_t msg(TransferManagerMessages::DRIVER_CANCEL, "", vid, "");
|
transfer_msg_t msg(TransferManagerMessages::DRIVER_CANCEL, "", vid, "");
|
||||||
tm_md->write(msg);
|
tm_md->write(msg);
|
||||||
|
});
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void TransferManager::checkpoint_action(int vid)
|
void TransferManager::trigger_checkpoint(int vid)
|
||||||
{
|
{
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -1973,8 +1863,9 @@ void TransferManager::checkpoint_action(int vid)
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void TransferManager::saveas_hot_action(int vid)
|
void TransferManager::trigger_saveas_hot(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
int disk_id;
|
int disk_id;
|
||||||
int image_id;
|
int image_id;
|
||||||
string src;
|
string src;
|
||||||
@ -2083,10 +1974,11 @@ error_file:
|
|||||||
error_common:
|
error_common:
|
||||||
vm->log("TrM", Log::ERROR, os);
|
vm->log("TrM", Log::ERROR, os);
|
||||||
|
|
||||||
(nd.get_lcm())->trigger(LCMAction::SAVEAS_FAILURE, vid);
|
nd.get_lcm()->trigger_saveas_failure(vid);
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
@ -2228,26 +2120,41 @@ error_file:
|
|||||||
error_common:
|
error_common:
|
||||||
vm->log("TrM", Log::ERROR, os);
|
vm->log("TrM", Log::ERROR, os);
|
||||||
|
|
||||||
(nd.get_lcm())->trigger(LCMAction::DISK_SNAPSHOT_FAILURE, vid);
|
nd.get_lcm()->trigger_disk_snapshot_failure(vid);
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
void TransferManager::snapshot_create_action(int vid)
|
/* -------------------------------------------------------------------------- */
|
||||||
{
|
/* -------------------------------------------------------------------------- */
|
||||||
return do_snapshot_action(vid, "SNAP_CREATE");
|
|
||||||
};
|
|
||||||
|
|
||||||
void TransferManager::snapshot_revert_action(int vid)
|
void TransferManager::trigger_snapshot_create(int vid)
|
||||||
{
|
{
|
||||||
return do_snapshot_action(vid, "SNAP_REVERT");
|
trigger([this, vid] {
|
||||||
};
|
do_snapshot_action(vid, "SNAP_CREATE");
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
void TransferManager::snapshot_delete_action(int vid)
|
/* -------------------------------------------------------------------------- */
|
||||||
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
void TransferManager::trigger_snapshot_revert(int vid)
|
||||||
{
|
{
|
||||||
return do_snapshot_action(vid, "SNAP_DELETE");
|
trigger([this, vid] {
|
||||||
};
|
do_snapshot_action(vid, "SNAP_REVERT");
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/* -------------------------------------------------------------------------- */
|
||||||
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
void TransferManager::trigger_snapshot_delete(int vid)
|
||||||
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
|
do_snapshot_action(vid, "SNAP_DELETE");
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
@ -2290,8 +2197,9 @@ void TransferManager::resize_command(VirtualMachine * vm,
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void TransferManager::resize_action(int vid)
|
void TransferManager::trigger_resize(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
ostringstream os;
|
ostringstream os;
|
||||||
|
|
||||||
ofstream xfr;
|
ofstream xfr;
|
||||||
@ -2368,10 +2276,11 @@ error_disk:
|
|||||||
error_common:
|
error_common:
|
||||||
vm->log("TrM", Log::ERROR, os);
|
vm->log("TrM", Log::ERROR, os);
|
||||||
|
|
||||||
(nd.get_lcm())->trigger(LCMAction::DISK_RESIZE_FAILURE, vid);
|
nd.get_lcm()->trigger_disk_resize_failure(vid);
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************** */
|
/* ************************************************************************** */
|
||||||
|
@ -44,8 +44,6 @@ void TransferManager::_transfer(unique_ptr<transfer_msg_t> msg)
|
|||||||
|
|
||||||
LifeCycleManager * lcm = Nebula::instance().get_lcm();
|
LifeCycleManager * lcm = Nebula::instance().get_lcm();
|
||||||
|
|
||||||
LCMAction::Actions lcm_action;
|
|
||||||
|
|
||||||
int id = msg->oid();
|
int id = msg->oid();
|
||||||
|
|
||||||
// Get the VM from the pool
|
// Get the VM from the pool
|
||||||
@ -77,28 +75,28 @@ void TransferManager::_transfer(unique_ptr<transfer_msg_t> msg)
|
|||||||
case VirtualMachine::PROLOG_MIGRATE_POWEROFF:
|
case VirtualMachine::PROLOG_MIGRATE_POWEROFF:
|
||||||
case VirtualMachine::PROLOG_MIGRATE_SUSPEND:
|
case VirtualMachine::PROLOG_MIGRATE_SUSPEND:
|
||||||
case VirtualMachine::PROLOG_MIGRATE_UNKNOWN:
|
case VirtualMachine::PROLOG_MIGRATE_UNKNOWN:
|
||||||
lcm_action = LCMAction::PROLOG_SUCCESS;
|
lcm->trigger_prolog_success(id);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::EPILOG:
|
case VirtualMachine::EPILOG:
|
||||||
case VirtualMachine::EPILOG_STOP:
|
case VirtualMachine::EPILOG_STOP:
|
||||||
case VirtualMachine::EPILOG_UNDEPLOY:
|
case VirtualMachine::EPILOG_UNDEPLOY:
|
||||||
case VirtualMachine::CLEANUP_RESUBMIT:
|
case VirtualMachine::CLEANUP_RESUBMIT:
|
||||||
lcm_action = LCMAction::EPILOG_SUCCESS;
|
lcm->trigger_epilog_success(id);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::HOTPLUG_SAVEAS:
|
case VirtualMachine::HOTPLUG_SAVEAS:
|
||||||
case VirtualMachine::HOTPLUG_SAVEAS_POWEROFF:
|
case VirtualMachine::HOTPLUG_SAVEAS_POWEROFF:
|
||||||
case VirtualMachine::HOTPLUG_SAVEAS_SUSPENDED:
|
case VirtualMachine::HOTPLUG_SAVEAS_SUSPENDED:
|
||||||
lcm_action = LCMAction::SAVEAS_SUCCESS;
|
lcm->trigger_saveas_success(id);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::HOTPLUG_PROLOG_POWEROFF:
|
case VirtualMachine::HOTPLUG_PROLOG_POWEROFF:
|
||||||
lcm_action = LCMAction::ATTACH_SUCCESS;
|
lcm->trigger_attach_success(id);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::HOTPLUG_EPILOG_POWEROFF:
|
case VirtualMachine::HOTPLUG_EPILOG_POWEROFF:
|
||||||
lcm_action = LCMAction::DETACH_SUCCESS;
|
lcm->trigger_detach_success(id);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::DISK_SNAPSHOT_POWEROFF:
|
case VirtualMachine::DISK_SNAPSHOT_POWEROFF:
|
||||||
@ -108,12 +106,12 @@ void TransferManager::_transfer(unique_ptr<transfer_msg_t> msg)
|
|||||||
case VirtualMachine::DISK_SNAPSHOT_REVERT_SUSPENDED:
|
case VirtualMachine::DISK_SNAPSHOT_REVERT_SUSPENDED:
|
||||||
case VirtualMachine::DISK_SNAPSHOT_DELETE_SUSPENDED:
|
case VirtualMachine::DISK_SNAPSHOT_DELETE_SUSPENDED:
|
||||||
case VirtualMachine::DISK_SNAPSHOT_DELETE:
|
case VirtualMachine::DISK_SNAPSHOT_DELETE:
|
||||||
lcm_action = LCMAction::DISK_SNAPSHOT_SUCCESS;
|
lcm->trigger_disk_snapshot_success(id);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::DISK_RESIZE_POWEROFF:
|
case VirtualMachine::DISK_RESIZE_POWEROFF:
|
||||||
case VirtualMachine::DISK_RESIZE_UNDEPLOYED:
|
case VirtualMachine::DISK_RESIZE_UNDEPLOYED:
|
||||||
lcm_action = LCMAction::DISK_RESIZE_SUCCESS;
|
lcm->trigger_disk_resize_success(id);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@ -146,28 +144,28 @@ void TransferManager::_transfer(unique_ptr<transfer_msg_t> msg)
|
|||||||
case VirtualMachine::PROLOG_MIGRATE_POWEROFF:
|
case VirtualMachine::PROLOG_MIGRATE_POWEROFF:
|
||||||
case VirtualMachine::PROLOG_MIGRATE_SUSPEND:
|
case VirtualMachine::PROLOG_MIGRATE_SUSPEND:
|
||||||
case VirtualMachine::PROLOG_MIGRATE_UNKNOWN:
|
case VirtualMachine::PROLOG_MIGRATE_UNKNOWN:
|
||||||
lcm_action = LCMAction::PROLOG_FAILURE;
|
lcm->trigger_prolog_failure(id);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::EPILOG:
|
case VirtualMachine::EPILOG:
|
||||||
case VirtualMachine::EPILOG_STOP:
|
case VirtualMachine::EPILOG_STOP:
|
||||||
case VirtualMachine::EPILOG_UNDEPLOY:
|
case VirtualMachine::EPILOG_UNDEPLOY:
|
||||||
case VirtualMachine::CLEANUP_RESUBMIT:
|
case VirtualMachine::CLEANUP_RESUBMIT:
|
||||||
lcm_action = LCMAction::EPILOG_FAILURE;
|
lcm->trigger_epilog_failure(id);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::HOTPLUG_SAVEAS:
|
case VirtualMachine::HOTPLUG_SAVEAS:
|
||||||
case VirtualMachine::HOTPLUG_SAVEAS_POWEROFF:
|
case VirtualMachine::HOTPLUG_SAVEAS_POWEROFF:
|
||||||
case VirtualMachine::HOTPLUG_SAVEAS_SUSPENDED:
|
case VirtualMachine::HOTPLUG_SAVEAS_SUSPENDED:
|
||||||
lcm_action = LCMAction::SAVEAS_FAILURE;
|
lcm->trigger_saveas_failure(id);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::HOTPLUG_PROLOG_POWEROFF:
|
case VirtualMachine::HOTPLUG_PROLOG_POWEROFF:
|
||||||
lcm_action = LCMAction::ATTACH_FAILURE;
|
lcm->trigger_attach_failure(id);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::HOTPLUG_EPILOG_POWEROFF:
|
case VirtualMachine::HOTPLUG_EPILOG_POWEROFF:
|
||||||
lcm_action = LCMAction::DETACH_FAILURE;
|
lcm->trigger_detach_failure(id);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::DISK_SNAPSHOT_POWEROFF:
|
case VirtualMachine::DISK_SNAPSHOT_POWEROFF:
|
||||||
@ -177,12 +175,12 @@ void TransferManager::_transfer(unique_ptr<transfer_msg_t> msg)
|
|||||||
case VirtualMachine::DISK_SNAPSHOT_REVERT_SUSPENDED:
|
case VirtualMachine::DISK_SNAPSHOT_REVERT_SUSPENDED:
|
||||||
case VirtualMachine::DISK_SNAPSHOT_DELETE_SUSPENDED:
|
case VirtualMachine::DISK_SNAPSHOT_DELETE_SUSPENDED:
|
||||||
case VirtualMachine::DISK_SNAPSHOT_DELETE:
|
case VirtualMachine::DISK_SNAPSHOT_DELETE:
|
||||||
lcm_action = LCMAction::DISK_SNAPSHOT_FAILURE;
|
lcm->trigger_disk_snapshot_failure(id);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VirtualMachine::DISK_RESIZE_POWEROFF:
|
case VirtualMachine::DISK_RESIZE_POWEROFF:
|
||||||
case VirtualMachine::DISK_RESIZE_UNDEPLOYED:
|
case VirtualMachine::DISK_RESIZE_UNDEPLOYED:
|
||||||
lcm_action = LCMAction::DISK_RESIZE_FAILURE;
|
lcm->trigger_disk_resize_failure(id);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@ -190,8 +188,6 @@ void TransferManager::_transfer(unique_ptr<transfer_msg_t> msg)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
lcm->trigger(lcm_action, id);
|
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
@ -778,7 +778,7 @@ bool UserPool::authenticate_internal(User * user,
|
|||||||
{
|
{
|
||||||
ar.add_authenticate(auth_driver,username,password,token);
|
ar.add_authenticate(auth_driver,username,password,token);
|
||||||
|
|
||||||
authm->trigger(AMAction::AUTHENTICATE,&ar);
|
authm->trigger_authenticate(ar);
|
||||||
|
|
||||||
ar.wait();
|
ar.wait();
|
||||||
|
|
||||||
@ -1134,7 +1134,7 @@ bool UserPool::authenticate_server(User * user,
|
|||||||
server_password,
|
server_password,
|
||||||
second_token);
|
second_token);
|
||||||
|
|
||||||
authm->trigger(AMAction::AUTHENTICATE,&ar);
|
authm->trigger_authenticate(ar);
|
||||||
ar.wait();
|
ar.wait();
|
||||||
|
|
||||||
if (ar.result!=true) //User was not authenticated
|
if (ar.result!=true) //User was not authenticated
|
||||||
@ -1239,7 +1239,7 @@ bool UserPool::authenticate_external(const string& username,
|
|||||||
|
|
||||||
ar.add_authenticate(default_auth, username,"-",token);
|
ar.add_authenticate(default_auth, username,"-",token);
|
||||||
|
|
||||||
authm->trigger(AMAction::AUTHENTICATE, &ar);
|
authm->trigger_authenticate(ar);
|
||||||
ar.wait();
|
ar.wait();
|
||||||
|
|
||||||
if (ar.result != true) //User was not authenticated
|
if (ar.result != true) //User was not authenticated
|
||||||
@ -1416,7 +1416,7 @@ int UserPool::authorize(AuthRequest& ar)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
authm->trigger(AMAction::AUTHORIZE,&ar);
|
authm->trigger_authorize(ar);
|
||||||
ar.wait();
|
ar.wait();
|
||||||
|
|
||||||
if (ar.result==true)
|
if (ar.result==true)
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include "Vdc.h"
|
#include "Vdc.h"
|
||||||
#include "Nebula.h"
|
#include "Nebula.h"
|
||||||
#include "AclManager.h"
|
#include "AclManager.h"
|
||||||
|
#include "AclRule.h"
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
|
@ -1065,7 +1065,7 @@ int VirtualMachine::insert(SqlDB * db, string& error_str)
|
|||||||
//-------------------------------------------------------------------------
|
//-------------------------------------------------------------------------
|
||||||
if ( state == VirtualMachine::CLONING )
|
if ( state == VirtualMachine::CLONING )
|
||||||
{
|
{
|
||||||
Nebula::instance().get_lcm()->trigger(LCMAction::DISK_LOCK_SUCCESS,oid);
|
Nebula::instance().get_lcm()->trigger_disk_lock_success(oid);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -65,7 +65,7 @@ int VirtualMachinePool::update(PoolObjectSQL * objsql)
|
|||||||
{
|
{
|
||||||
std::string event = HookStateVM::format_message(vm);
|
std::string event = HookStateVM::format_message(vm);
|
||||||
|
|
||||||
Nebula::instance().get_hm()->trigger(HMAction::SEND_EVENT, event);
|
Nebula::instance().get_hm()->trigger_send_event(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
vm->set_prev_state();
|
vm->set_prev_state();
|
||||||
@ -203,7 +203,7 @@ int VirtualMachinePool::allocate(
|
|||||||
{
|
{
|
||||||
std::string event = HookStateVM::format_message(vm);
|
std::string event = HookStateVM::format_message(vm);
|
||||||
|
|
||||||
Nebula::instance().get_hm()->trigger(HMAction::SEND_EVENT, event);
|
Nebula::instance().get_hm()->trigger_send_event(event);
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
}
|
}
|
||||||
|
@ -36,11 +36,10 @@ using namespace std;
|
|||||||
/* ************************************************************************** */
|
/* ************************************************************************** */
|
||||||
|
|
||||||
VirtualMachineManager::VirtualMachineManager(
|
VirtualMachineManager::VirtualMachineManager(
|
||||||
time_t _timer_period,
|
|
||||||
int _vm_limit,
|
int _vm_limit,
|
||||||
const string& _mad_location):
|
const string& _mad_location):
|
||||||
DriverManager(_mad_location),
|
DriverManager(_mad_location),
|
||||||
timer_period(_timer_period),
|
Listener("Virtual Machine Manager"),
|
||||||
vm_limit(_vm_limit)
|
vm_limit(_vm_limit)
|
||||||
{
|
{
|
||||||
Nebula& nd = Nebula::instance();
|
Nebula& nd = Nebula::instance();
|
||||||
@ -48,40 +47,14 @@ VirtualMachineManager::VirtualMachineManager(
|
|||||||
vmpool = nd.get_vmpool();
|
vmpool = nd.get_vmpool();
|
||||||
hpool = nd.get_hpool();
|
hpool = nd.get_hpool();
|
||||||
ds_pool = nd.get_dspool();
|
ds_pool = nd.get_dspool();
|
||||||
|
}
|
||||||
am.addListener(this);
|
|
||||||
};
|
|
||||||
|
|
||||||
/* ************************************************************************** */
|
/* ************************************************************************** */
|
||||||
/* Manager start function */
|
/* Manager start function */
|
||||||
/* ************************************************************************** */
|
/* ************************************************************************** */
|
||||||
|
|
||||||
extern "C" void * vmm_action_loop(void *arg)
|
|
||||||
{
|
|
||||||
VirtualMachineManager * vmm;
|
|
||||||
|
|
||||||
if ( arg == nullptr )
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
vmm = static_cast<VirtualMachineManager *>(arg);
|
|
||||||
|
|
||||||
NebulaLog::log("VMM",Log::INFO,"Virtual Machine Manager started.");
|
|
||||||
|
|
||||||
vmm->am.loop(vmm->timer_period);
|
|
||||||
|
|
||||||
NebulaLog::log("VMM",Log::INFO,"Virtual Machine Manager stopped.");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
int VirtualMachineManager::start()
|
int VirtualMachineManager::start()
|
||||||
{
|
{
|
||||||
pthread_attr_t pattr;
|
|
||||||
|
|
||||||
using namespace std::placeholders; // for _1
|
using namespace std::placeholders; // for _1
|
||||||
|
|
||||||
register_action(VMManagerMessages::UNDEFINED,
|
register_action(VMManagerMessages::UNDEFINED,
|
||||||
@ -161,95 +134,9 @@ int VirtualMachineManager::start()
|
|||||||
|
|
||||||
NebulaLog::log("VMM",Log::INFO,"Starting Virtual Machine Manager...");
|
NebulaLog::log("VMM",Log::INFO,"Starting Virtual Machine Manager...");
|
||||||
|
|
||||||
pthread_attr_init(&pattr);
|
Listener::start();
|
||||||
pthread_attr_setdetachstate(&pattr, PTHREAD_CREATE_JOINABLE);
|
|
||||||
|
|
||||||
int rc = pthread_create(&vmm_thread,&pattr,vmm_action_loop,(void *) this);
|
return 0;
|
||||||
|
|
||||||
return rc;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* ************************************************************************** */
|
|
||||||
/* Manager Action Interface */
|
|
||||||
/* ************************************************************************** */
|
|
||||||
|
|
||||||
void VirtualMachineManager::user_action(const ActionRequest& ar)
|
|
||||||
{
|
|
||||||
const VMMAction& vmm_ar = static_cast<const VMMAction& >(ar);
|
|
||||||
int vid = vmm_ar.vm_id();
|
|
||||||
|
|
||||||
switch (vmm_ar.action())
|
|
||||||
{
|
|
||||||
case VMMAction::DEPLOY:
|
|
||||||
deploy_action(vid);
|
|
||||||
break;
|
|
||||||
case VMMAction::SAVE:
|
|
||||||
save_action(vid);
|
|
||||||
break;
|
|
||||||
case VMMAction::RESTORE:
|
|
||||||
restore_action(vid);
|
|
||||||
break;
|
|
||||||
case VMMAction::REBOOT:
|
|
||||||
reboot_action(vid);
|
|
||||||
break;
|
|
||||||
case VMMAction::RESET:
|
|
||||||
reset_action(vid);
|
|
||||||
break;
|
|
||||||
case VMMAction::SHUTDOWN:
|
|
||||||
shutdown_action(vid);
|
|
||||||
break;
|
|
||||||
case VMMAction::CANCEL:
|
|
||||||
cancel_action(vid);
|
|
||||||
break;
|
|
||||||
case VMMAction::CANCEL_PREVIOUS:
|
|
||||||
cancel_previous_action(vid);
|
|
||||||
break;
|
|
||||||
case VMMAction::CLEANUP:
|
|
||||||
cleanup_action(vid, false);
|
|
||||||
break;
|
|
||||||
case VMMAction::CLEANUP_BOTH:
|
|
||||||
cleanup_action(vid, true);
|
|
||||||
break;
|
|
||||||
case VMMAction::CLEANUP_PREVIOUS:
|
|
||||||
cleanup_previous_action(vid);
|
|
||||||
break;
|
|
||||||
case VMMAction::MIGRATE:
|
|
||||||
migrate_action(vid);
|
|
||||||
break;
|
|
||||||
case VMMAction::DRIVER_CANCEL:
|
|
||||||
driver_cancel_action(vid);
|
|
||||||
break;
|
|
||||||
case VMMAction::ATTACH:
|
|
||||||
attach_action(vid);
|
|
||||||
break;
|
|
||||||
case VMMAction::DETACH:
|
|
||||||
detach_action(vid);
|
|
||||||
break;
|
|
||||||
case VMMAction::ATTACH_NIC:
|
|
||||||
attach_nic_action(vid);
|
|
||||||
break;
|
|
||||||
case VMMAction::DETACH_NIC:
|
|
||||||
detach_nic_action(vid);
|
|
||||||
break;
|
|
||||||
case VMMAction::SNAPSHOT_CREATE:
|
|
||||||
snapshot_create_action(vid);
|
|
||||||
break;
|
|
||||||
case VMMAction::SNAPSHOT_REVERT:
|
|
||||||
snapshot_revert_action(vid);
|
|
||||||
break;
|
|
||||||
case VMMAction::SNAPSHOT_DELETE:
|
|
||||||
snapshot_delete_action(vid);
|
|
||||||
break;
|
|
||||||
case VMMAction::DISK_SNAPSHOT_CREATE:
|
|
||||||
disk_snapshot_create_action(vid);
|
|
||||||
break;
|
|
||||||
case VMMAction::DISK_RESIZE:
|
|
||||||
disk_resize_action(vid);
|
|
||||||
break;
|
|
||||||
case VMMAction::UPDATE_CONF:
|
|
||||||
update_conf_action(vid);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************** */
|
/* ************************************************************************** */
|
||||||
@ -396,6 +283,9 @@ string VirtualMachineManager::format_message(
|
|||||||
return base64;
|
return base64;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* -------------------------------------------------------------------------- */
|
||||||
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
static int do_context_command(VirtualMachine * vm, const string& password,
|
static int do_context_command(VirtualMachine * vm, const string& password,
|
||||||
string& prolog_cmd, string& disk_path)
|
string& prolog_cmd, string& disk_path)
|
||||||
{
|
{
|
||||||
@ -438,8 +328,9 @@ static int do_context_command(VirtualMachine * vm, const string& password,
|
|||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void VirtualMachineManager::deploy_action(int vid)
|
void VirtualMachineManager::trigger_deploy(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
const VirtualMachineManagerDriver * vmd;
|
const VirtualMachineManagerDriver * vmd;
|
||||||
int rc;
|
int rc;
|
||||||
@ -544,22 +435,22 @@ error_no_tm_command:
|
|||||||
os << "Cannot set context disk to update it for VM " << vm->get_oid();
|
os << "Cannot set context disk to update it for VM " << vm->get_oid();
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
Nebula &ne = Nebula::instance();
|
LifeCycleManager * lcm = Nebula::instance().get_lcm();
|
||||||
LifeCycleManager * lcm = ne.get_lcm();
|
|
||||||
|
|
||||||
lcm->trigger(LCMAction::DEPLOY_FAILURE, vid);
|
lcm->trigger_deploy_failure(vid);
|
||||||
|
|
||||||
vm->log("VMM", Log::ERROR, os);
|
vm->log("VMM", Log::ERROR, os);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void VirtualMachineManager::save_action(
|
void VirtualMachineManager::trigger_save(int vid)
|
||||||
int vid)
|
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
const VirtualMachineManagerDriver * vmd;
|
const VirtualMachineManagerDriver * vmd;
|
||||||
|
|
||||||
@ -643,22 +534,22 @@ error_previous_history:
|
|||||||
os << "save_action, VM has no previous history";
|
os << "save_action, VM has no previous history";
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
Nebula &ne = Nebula::instance();
|
LifeCycleManager * lcm = Nebula::instance().get_lcm();
|
||||||
LifeCycleManager * lcm = ne.get_lcm();
|
|
||||||
|
|
||||||
lcm->trigger(LCMAction::SAVE_FAILURE, vid);
|
lcm->trigger_save_failure(vid);
|
||||||
|
|
||||||
vm->log("VMM", Log::ERROR, os);
|
vm->log("VMM", Log::ERROR, os);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void VirtualMachineManager::shutdown_action(
|
void VirtualMachineManager::trigger_shutdown(int vid)
|
||||||
int vid)
|
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
const VirtualMachineManagerDriver * vmd;
|
const VirtualMachineManagerDriver * vmd;
|
||||||
|
|
||||||
@ -740,22 +631,22 @@ error_driver:
|
|||||||
os << "shutdown_action, error getting driver " << vm->get_vmm_mad();
|
os << "shutdown_action, error getting driver " << vm->get_vmm_mad();
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
Nebula &ne = Nebula::instance();
|
LifeCycleManager * lcm = Nebula::instance().get_lcm();
|
||||||
LifeCycleManager * lcm = ne.get_lcm();
|
|
||||||
|
|
||||||
lcm->trigger(LCMAction::SHUTDOWN_FAILURE, vid);
|
lcm->trigger_shutdown_failure(vid);
|
||||||
|
|
||||||
vm->log("VMM", Log::ERROR, os);
|
vm->log("VMM", Log::ERROR, os);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void VirtualMachineManager::reboot_action(
|
void VirtualMachineManager::trigger_reboot(int vid)
|
||||||
int vid)
|
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
const VirtualMachineManagerDriver * vmd;
|
const VirtualMachineManagerDriver * vmd;
|
||||||
|
|
||||||
@ -816,14 +707,15 @@ error_common:
|
|||||||
vm->log("VMM", Log::ERROR, os);
|
vm->log("VMM", Log::ERROR, os);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void VirtualMachineManager::reset_action(
|
void VirtualMachineManager::trigger_reset(int vid)
|
||||||
int vid)
|
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
const VirtualMachineManagerDriver * vmd;
|
const VirtualMachineManagerDriver * vmd;
|
||||||
|
|
||||||
@ -884,14 +776,15 @@ error_common:
|
|||||||
vm->log("VMM", Log::ERROR, os);
|
vm->log("VMM", Log::ERROR, os);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void VirtualMachineManager::cancel_action(
|
void VirtualMachineManager::trigger_cancel(int vid)
|
||||||
int vid)
|
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
ostringstream os;
|
ostringstream os;
|
||||||
|
|
||||||
@ -973,22 +866,22 @@ error_driver:
|
|||||||
os << "cancel_action, error getting driver " << vm->get_vmm_mad();
|
os << "cancel_action, error getting driver " << vm->get_vmm_mad();
|
||||||
|
|
||||||
error_common://LCMAction::cancel_failure_action will check state
|
error_common://LCMAction::cancel_failure_action will check state
|
||||||
Nebula &ne = Nebula::instance();
|
LifeCycleManager * lcm = Nebula::instance().get_lcm();
|
||||||
LifeCycleManager * lcm = ne.get_lcm();
|
|
||||||
|
|
||||||
lcm->trigger(LCMAction::CANCEL_FAILURE, vid);
|
lcm->trigger_shutdown_failure(vid);
|
||||||
|
|
||||||
vm->log("VMM", Log::ERROR, os);
|
vm->log("VMM", Log::ERROR, os);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void VirtualMachineManager::cancel_previous_action(
|
void VirtualMachineManager::trigger_cancel_previous(int vid)
|
||||||
int vid)
|
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
ostringstream os;
|
ostringstream os;
|
||||||
|
|
||||||
@ -1050,14 +943,15 @@ error_common:
|
|||||||
vm->log("VMM", Log::ERROR, os);
|
vm->log("VMM", Log::ERROR, os);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void VirtualMachineManager::cleanup_action(
|
void VirtualMachineManager::trigger_cleanup(int vid, bool cancel_previous)
|
||||||
int vid, bool cancel_previous)
|
|
||||||
{
|
{
|
||||||
|
trigger([this, vid, cancel_previous] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
ostringstream os;
|
ostringstream os;
|
||||||
|
|
||||||
@ -1140,18 +1034,20 @@ error_epligo_command:
|
|||||||
os << "cleanup_action canceled";
|
os << "cleanup_action canceled";
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
(nd.get_lcm())->trigger(LCMAction::CLEANUP_FAILURE, vid);
|
nd.get_lcm()->trigger_cleanup_callback(vid);
|
||||||
|
|
||||||
vm->log("VMM", Log::ERROR, os);
|
vm->log("VMM", Log::ERROR, os);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void VirtualMachineManager::cleanup_previous_action(int vid)
|
void VirtualMachineManager::trigger_cleanup_previous(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
ostringstream os;
|
ostringstream os;
|
||||||
|
|
||||||
@ -1224,19 +1120,20 @@ error_epilog_command:
|
|||||||
os << "cleanup_action canceled";
|
os << "cleanup_action canceled";
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
(nd.get_lcm())->trigger(LCMAction::CLEANUP_FAILURE, vid);
|
nd.get_lcm()->trigger_cleanup_callback(vid);
|
||||||
|
|
||||||
vm->log("VMM", Log::ERROR, os);
|
vm->log("VMM", Log::ERROR, os);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void VirtualMachineManager::migrate_action(
|
void VirtualMachineManager::trigger_migrate(int vid)
|
||||||
int vid)
|
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
const VirtualMachineManagerDriver * vmd;
|
const VirtualMachineManagerDriver * vmd;
|
||||||
|
|
||||||
@ -1305,22 +1202,22 @@ error_previous_history:
|
|||||||
os << "migrate_action, error VM has no previous history";
|
os << "migrate_action, error VM has no previous history";
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
Nebula &ne = Nebula::instance();
|
LifeCycleManager * lcm = Nebula::instance().get_lcm();
|
||||||
LifeCycleManager * lcm = ne.get_lcm();
|
|
||||||
|
|
||||||
lcm->trigger(LCMAction::DEPLOY_FAILURE, vid);
|
lcm->trigger_deploy_failure(vid);
|
||||||
|
|
||||||
vm->log("VMM", Log::ERROR, os);
|
vm->log("VMM", Log::ERROR, os);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void VirtualMachineManager::restore_action(
|
void VirtualMachineManager::trigger_restore(int vid)
|
||||||
int vid)
|
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
const VirtualMachineManagerDriver * vmd;
|
const VirtualMachineManagerDriver * vmd;
|
||||||
|
|
||||||
@ -1407,22 +1304,22 @@ error_driver:
|
|||||||
os << "restore_action, error getting driver " << vm->get_vmm_mad();
|
os << "restore_action, error getting driver " << vm->get_vmm_mad();
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
Nebula &ne = Nebula::instance();
|
LifeCycleManager * lcm = Nebula::instance().get_lcm();
|
||||||
LifeCycleManager * lcm = ne.get_lcm();
|
|
||||||
|
|
||||||
lcm->trigger(LCMAction::DEPLOY_FAILURE, vid);
|
lcm->trigger_deploy_failure(vid);
|
||||||
|
|
||||||
vm->log("VMM", Log::ERROR, os);
|
vm->log("VMM", Log::ERROR, os);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void VirtualMachineManager::driver_cancel_action(
|
void VirtualMachineManager::trigger_driver_cancel(int vid)
|
||||||
int vid)
|
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
ostringstream os;
|
ostringstream os;
|
||||||
|
|
||||||
@ -1466,14 +1363,15 @@ error_common:
|
|||||||
vm->log("VMM", Log::ERROR, os);
|
vm->log("VMM", Log::ERROR, os);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void VirtualMachineManager::attach_action(
|
void VirtualMachineManager::trigger_attach(int vid)
|
||||||
int vid)
|
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
const VirtualMachineManagerDriver * vmd;
|
const VirtualMachineManagerDriver * vmd;
|
||||||
|
|
||||||
@ -1599,22 +1497,22 @@ error_no_tm_command:
|
|||||||
goto error_common;
|
goto error_common;
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
Nebula &ne = Nebula::instance();
|
LifeCycleManager * lcm = Nebula::instance().get_lcm();
|
||||||
LifeCycleManager * lcm = ne.get_lcm();
|
|
||||||
|
|
||||||
lcm->trigger(LCMAction::ATTACH_FAILURE, vid);
|
lcm->trigger_attach_failure(vid);
|
||||||
|
|
||||||
vm->log("VMM", Log::ERROR, os);
|
vm->log("VMM", Log::ERROR, os);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void VirtualMachineManager::detach_action(
|
void VirtualMachineManager::trigger_detach(int vid)
|
||||||
int vid)
|
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
const VirtualMachineManagerDriver * vmd;
|
const VirtualMachineManagerDriver * vmd;
|
||||||
|
|
||||||
@ -1713,21 +1611,22 @@ error_driver:
|
|||||||
goto error_common;
|
goto error_common;
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
Nebula &ne = Nebula::instance();
|
LifeCycleManager * lcm = Nebula::instance().get_lcm();
|
||||||
LifeCycleManager * lcm = ne.get_lcm();
|
|
||||||
|
|
||||||
lcm->trigger(LCMAction::DETACH_FAILURE, vid);
|
lcm->trigger_detach_failure(vid);
|
||||||
|
|
||||||
vm->log("VMM", Log::ERROR, os);
|
vm->log("VMM", Log::ERROR, os);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void VirtualMachineManager::snapshot_create_action(int vid)
|
void VirtualMachineManager::trigger_snapshot_create(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
const VirtualMachineManagerDriver * vmd;
|
const VirtualMachineManagerDriver * vmd;
|
||||||
|
|
||||||
@ -1788,22 +1687,22 @@ error_driver:
|
|||||||
goto error_common;
|
goto error_common;
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
Nebula &ne = Nebula::instance();
|
LifeCycleManager * lcm = Nebula::instance().get_lcm();
|
||||||
LifeCycleManager * lcm = ne.get_lcm();
|
|
||||||
|
|
||||||
lcm->trigger(LCMAction::SNAPSHOT_CREATE_FAILURE, vid);
|
lcm->trigger_snapshot_create_failure(vid);
|
||||||
|
|
||||||
vm->log("VMM", Log::ERROR, os);
|
vm->log("VMM", Log::ERROR, os);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void VirtualMachineManager::snapshot_revert_action(int vid)
|
void VirtualMachineManager::trigger_snapshot_revert(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
const VirtualMachineManagerDriver * vmd;
|
const VirtualMachineManagerDriver * vmd;
|
||||||
|
|
||||||
@ -1864,22 +1763,22 @@ error_driver:
|
|||||||
goto error_common;
|
goto error_common;
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
Nebula &ne = Nebula::instance();
|
LifeCycleManager * lcm = Nebula::instance().get_lcm();
|
||||||
LifeCycleManager * lcm = ne.get_lcm();
|
|
||||||
|
|
||||||
lcm->trigger(LCMAction::SNAPSHOT_REVERT_FAILURE, vid);
|
lcm->trigger_snapshot_revert_failure(vid);
|
||||||
|
|
||||||
vm->log("VMM", Log::ERROR, os);
|
vm->log("VMM", Log::ERROR, os);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void VirtualMachineManager::snapshot_delete_action(int vid)
|
void VirtualMachineManager::trigger_snapshot_delete(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
const VirtualMachineManagerDriver * vmd;
|
const VirtualMachineManagerDriver * vmd;
|
||||||
|
|
||||||
@ -1940,22 +1839,22 @@ error_driver:
|
|||||||
goto error_common;
|
goto error_common;
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
Nebula &ne = Nebula::instance();
|
LifeCycleManager * lcm = Nebula::instance().get_lcm();
|
||||||
LifeCycleManager * lcm = ne.get_lcm();
|
|
||||||
|
|
||||||
lcm->trigger(LCMAction::SNAPSHOT_DELETE_FAILURE, vid);
|
lcm->trigger_snapshot_delete_failure(vid);
|
||||||
|
|
||||||
vm->log("VMM", Log::ERROR, os);
|
vm->log("VMM", Log::ERROR, os);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void VirtualMachineManager::disk_snapshot_create_action(int vid)
|
void VirtualMachineManager::trigger_disk_snapshot_create(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
const VirtualMachineManagerDriver * vmd;
|
const VirtualMachineManagerDriver * vmd;
|
||||||
|
|
||||||
@ -2053,21 +1952,22 @@ error_no_tm_command:
|
|||||||
goto error_common;
|
goto error_common;
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
Nebula &ne = Nebula::instance();
|
LifeCycleManager * lcm = Nebula::instance().get_lcm();
|
||||||
LifeCycleManager * lcm = ne.get_lcm();
|
|
||||||
|
|
||||||
lcm->trigger(LCMAction::DISK_SNAPSHOT_FAILURE, vid);
|
lcm->trigger_disk_snapshot_failure(vid);
|
||||||
|
|
||||||
vm->log("VMM", Log::ERROR, os);
|
vm->log("VMM", Log::ERROR, os);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void VirtualMachineManager::disk_resize_action(int vid)
|
void VirtualMachineManager::trigger_disk_resize(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
const VirtualMachineManagerDriver * vmd;
|
const VirtualMachineManagerDriver * vmd;
|
||||||
|
|
||||||
@ -2163,21 +2063,22 @@ error_no_tm_command:
|
|||||||
goto error_common;
|
goto error_common;
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
Nebula &ne = Nebula::instance();
|
LifeCycleManager * lcm = Nebula::instance().get_lcm();
|
||||||
LifeCycleManager * lcm = ne.get_lcm();
|
|
||||||
|
|
||||||
lcm->trigger(LCMAction::DISK_SNAPSHOT_FAILURE, vid);
|
lcm->trigger_disk_snapshot_failure(vid);
|
||||||
|
|
||||||
vm->log("VMM", Log::ERROR, os);
|
vm->log("VMM", Log::ERROR, os);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void VirtualMachineManager::update_conf_action(int vid)
|
void VirtualMachineManager::trigger_update_conf(int vid)
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
ostringstream os;
|
ostringstream os;
|
||||||
|
|
||||||
string vm_tmpl;
|
string vm_tmpl;
|
||||||
@ -2244,22 +2145,22 @@ void VirtualMachineManager::update_conf_action(int vid)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
Nebula &ne = Nebula::instance();
|
LifeCycleManager * lcm = Nebula::instance().get_lcm();
|
||||||
LifeCycleManager * lcm = ne.get_lcm();
|
|
||||||
|
|
||||||
lcm->trigger(LCMAction::UPDATE_CONF_FAILURE, vid);
|
lcm->trigger_update_conf_failure(vid);
|
||||||
|
|
||||||
vm->log("VMM", Log::ERROR, os);
|
vm->log("VMM", Log::ERROR, os);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void VirtualMachineManager::attach_nic_action(
|
void VirtualMachineManager::trigger_attach_nic(int vid)
|
||||||
int vid)
|
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
const VirtualMachineManagerDriver * vmd;
|
const VirtualMachineManagerDriver * vmd;
|
||||||
|
|
||||||
@ -2350,22 +2251,22 @@ error_no_tm_command:
|
|||||||
goto error_common;
|
goto error_common;
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
Nebula &ne = Nebula::instance();
|
LifeCycleManager * lcm = Nebula::instance().get_lcm();
|
||||||
LifeCycleManager * lcm = ne.get_lcm();
|
|
||||||
|
|
||||||
lcm->trigger(LCMAction::ATTACH_NIC_FAILURE, vid);
|
lcm->trigger_attach_nic_failure(vid);
|
||||||
|
|
||||||
vm->log("VMM", Log::ERROR, os);
|
vm->log("VMM", Log::ERROR, os);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
void VirtualMachineManager::detach_nic_action(
|
void VirtualMachineManager::trigger_detach_nic(int vid)
|
||||||
int vid)
|
|
||||||
{
|
{
|
||||||
|
trigger([this, vid] {
|
||||||
VirtualMachine * vm;
|
VirtualMachine * vm;
|
||||||
const VirtualMachineManagerDriver * vmd;
|
const VirtualMachineManagerDriver * vmd;
|
||||||
|
|
||||||
@ -2453,14 +2354,14 @@ error_no_tm_command:
|
|||||||
goto error_common;
|
goto error_common;
|
||||||
|
|
||||||
error_common:
|
error_common:
|
||||||
Nebula &ne = Nebula::instance();
|
LifeCycleManager * lcm = Nebula::instance().get_lcm();
|
||||||
LifeCycleManager * lcm = ne.get_lcm();
|
|
||||||
|
|
||||||
lcm->trigger(LCMAction::DETACH_NIC_FAILURE, vid);
|
lcm->trigger_detach_nic_failure(vid);
|
||||||
|
|
||||||
vm->log("VMM", Log::ERROR, os);
|
vm->log("VMM", Log::ERROR, os);
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
return;
|
return;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
@ -131,7 +131,7 @@ void VirtualMachineManager::_deploy(unique_ptr<vm_msg_t> msg)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
LCMAction::Actions action = LCMAction::DEPLOY_SUCCESS;
|
void (LifeCycleManager::*action)(int) = &LifeCycleManager::trigger_deploy_success;
|
||||||
|
|
||||||
if (msg->status() == "SUCCESS")
|
if (msg->status() == "SUCCESS")
|
||||||
{
|
{
|
||||||
@ -149,7 +149,7 @@ void VirtualMachineManager::_deploy(unique_ptr<vm_msg_t> msg)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
action = LCMAction::DEPLOY_FAILURE;
|
action = &LifeCycleManager::trigger_deploy_failure;
|
||||||
log_error(vm, msg->payload(), "Empty deploy ID for virtual machine");
|
log_error(vm, msg->payload(), "Empty deploy ID for virtual machine");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -159,13 +159,13 @@ void VirtualMachineManager::_deploy(unique_ptr<vm_msg_t> msg)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
action = LCMAction::DEPLOY_FAILURE;
|
action = &LifeCycleManager::trigger_deploy_failure;
|
||||||
log_error(id, msg->payload(), "Error deploying virtual machine");
|
log_error(id, msg->payload(), "Error deploying virtual machine");
|
||||||
}
|
}
|
||||||
|
|
||||||
LifeCycleManager * lcm = Nebula::instance().get_lcm();
|
LifeCycleManager * lcm = Nebula::instance().get_lcm();
|
||||||
|
|
||||||
lcm->trigger(action, msg->oid());
|
(lcm->*action)(msg->oid());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
@ -184,13 +184,13 @@ void VirtualMachineManager::_shutdown(unique_ptr<vm_msg_t> msg)
|
|||||||
|
|
||||||
if (msg->status() == "SUCCESS")
|
if (msg->status() == "SUCCESS")
|
||||||
{
|
{
|
||||||
lcm->trigger(LCMAction::SHUTDOWN_SUCCESS, id);
|
lcm->trigger_shutdown_success(id);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
log_error(id, msg->payload(), "Error shutting down VM");
|
log_error(id, msg->payload(), "Error shutting down VM");
|
||||||
|
|
||||||
lcm->trigger(LCMAction::SHUTDOWN_FAILURE, msg->oid());
|
lcm->trigger_shutdown_failure(msg->oid());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -262,13 +262,13 @@ void VirtualMachineManager::_cancel(unique_ptr<vm_msg_t> msg)
|
|||||||
|
|
||||||
if (msg->status() == "SUCCESS")
|
if (msg->status() == "SUCCESS")
|
||||||
{
|
{
|
||||||
lcm->trigger(LCMAction::CANCEL_SUCCESS, id);
|
lcm->trigger_shutdown_success(id);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
log_error(msg->oid(), msg->payload(), "Error canceling VM");
|
log_error(msg->oid(), msg->payload(), "Error canceling VM");
|
||||||
|
|
||||||
lcm->trigger(LCMAction::CANCEL_FAILURE, id);
|
lcm->trigger_shutdown_failure(id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -293,15 +293,13 @@ void VirtualMachineManager::_cleanup(unique_ptr<vm_msg_t> msg)
|
|||||||
vm->log("VMM", Log::INFO, "Host successfully cleaned.");
|
vm->log("VMM", Log::INFO, "Host successfully cleaned.");
|
||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
|
||||||
lcm->trigger(LCMAction::CLEANUP_SUCCESS, id);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
log_error(id, msg->payload(), "Error cleaning Host");
|
log_error(id, msg->payload(), "Error cleaning Host");
|
||||||
|
|
||||||
lcm->trigger(LCMAction::CLEANUP_FAILURE, id);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
lcm->trigger_cleanup_callback(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
@ -327,13 +325,13 @@ void VirtualMachineManager::_save(unique_ptr<vm_msg_t> msg)
|
|||||||
|
|
||||||
if (msg->status() == "SUCCESS")
|
if (msg->status() == "SUCCESS")
|
||||||
{
|
{
|
||||||
lcm->trigger(LCMAction::SAVE_SUCCESS, id);
|
lcm->trigger_save_success(id);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
log_error(id, msg->payload(), "Error saving VM state");
|
log_error(id, msg->payload(), "Error saving VM state");
|
||||||
|
|
||||||
lcm->trigger(LCMAction::SAVE_FAILURE, id);
|
lcm->trigger_save_failure(id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -353,13 +351,13 @@ void VirtualMachineManager::_restore(unique_ptr<vm_msg_t> msg)
|
|||||||
|
|
||||||
if (msg->status() == "SUCCESS")
|
if (msg->status() == "SUCCESS")
|
||||||
{
|
{
|
||||||
lcm->trigger(LCMAction::DEPLOY_SUCCESS, id);
|
lcm->trigger_deploy_success(id);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
log_error(id, msg->payload(), "Error restoring VM");
|
log_error(id, msg->payload(), "Error restoring VM");
|
||||||
|
|
||||||
lcm->trigger(LCMAction::DEPLOY_FAILURE, id);
|
lcm->trigger_deploy_failure(id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -379,13 +377,13 @@ void VirtualMachineManager::_migrate(unique_ptr<vm_msg_t> msg)
|
|||||||
|
|
||||||
if (msg->status() == "SUCCESS")
|
if (msg->status() == "SUCCESS")
|
||||||
{
|
{
|
||||||
lcm->trigger(LCMAction::DEPLOY_SUCCESS, id);
|
lcm->trigger_deploy_success(id);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
log_error(msg->oid(), msg->payload(), "Error live migrating VM");
|
log_error(msg->oid(), msg->payload(), "Error live migrating VM");
|
||||||
|
|
||||||
lcm->trigger(LCMAction::DEPLOY_FAILURE, id);
|
lcm->trigger_deploy_failure(id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -411,13 +409,13 @@ void VirtualMachineManager::_attachdisk(unique_ptr<vm_msg_t> msg)
|
|||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
|
||||||
lcm->trigger(LCMAction::ATTACH_SUCCESS, id);
|
lcm->trigger_attach_success(id);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
log_error(id, msg->payload(), "Error attaching new VM Disk");
|
log_error(id, msg->payload(), "Error attaching new VM Disk");
|
||||||
|
|
||||||
lcm->trigger(LCMAction::ATTACH_FAILURE, id);
|
lcm->trigger_attach_failure(id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -443,13 +441,13 @@ void VirtualMachineManager::_detachdisk(unique_ptr<vm_msg_t> msg)
|
|||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
|
||||||
lcm->trigger(LCMAction::DETACH_SUCCESS, id);
|
lcm->trigger_detach_success(id);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
log_error(id, msg->payload(), "Error detaching VM Disk");
|
log_error(id, msg->payload(), "Error detaching VM Disk");
|
||||||
|
|
||||||
lcm->trigger(LCMAction::DETACH_FAILURE, id);
|
lcm->trigger_detach_failure(id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -475,13 +473,13 @@ void VirtualMachineManager::_attachnic(unique_ptr<vm_msg_t> msg)
|
|||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
|
||||||
lcm->trigger(LCMAction::ATTACH_NIC_SUCCESS, id);
|
lcm->trigger_attach_nic_success(id);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
log_error(id, msg->payload(), "Error attaching new VM NIC");
|
log_error(id, msg->payload(), "Error attaching new VM NIC");
|
||||||
|
|
||||||
lcm->trigger(LCMAction::ATTACH_NIC_FAILURE, id);
|
lcm->trigger_attach_nic_failure(id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -507,13 +505,13 @@ void VirtualMachineManager::_detachnic(unique_ptr<vm_msg_t> msg)
|
|||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
|
||||||
lcm->trigger(LCMAction::DETACH_NIC_SUCCESS, id);
|
lcm->trigger_detach_nic_success(id);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
log_error(id, msg->payload(), "Error detaching VM NIC");
|
log_error(id, msg->payload(), "Error detaching VM NIC");
|
||||||
|
|
||||||
lcm->trigger(LCMAction::DETACH_NIC_FAILURE, id);
|
lcm->trigger_detach_nic_failure(id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -548,13 +546,13 @@ void VirtualMachineManager::_snapshotcreate(unique_ptr<vm_msg_t> msg)
|
|||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
|
||||||
lcm->trigger(LCMAction::SNAPSHOT_CREATE_SUCCESS, id);
|
lcm->trigger_snapshot_create_success(id);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
log_error(msg->oid(), msg->payload(), "Error creating new VM Snapshot");
|
log_error(msg->oid(), msg->payload(), "Error creating new VM Snapshot");
|
||||||
|
|
||||||
lcm->trigger(LCMAction::SNAPSHOT_CREATE_FAILURE, id);
|
lcm->trigger_snapshot_create_failure(id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -580,13 +578,13 @@ void VirtualMachineManager::_snapshotrevert(unique_ptr<vm_msg_t> msg)
|
|||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
|
||||||
lcm->trigger(LCMAction::SNAPSHOT_REVERT_SUCCESS, id);
|
lcm->trigger_snapshot_revert_success(id);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
log_error(id, msg->payload(), "Error reverting VM Snapshot");
|
log_error(id, msg->payload(), "Error reverting VM Snapshot");
|
||||||
|
|
||||||
lcm->trigger(LCMAction::SNAPSHOT_REVERT_FAILURE, id);
|
lcm->trigger_snapshot_revert_failure(id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -612,13 +610,13 @@ void VirtualMachineManager::_snapshotdelete(unique_ptr<vm_msg_t> msg)
|
|||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
|
||||||
lcm->trigger(LCMAction::SNAPSHOT_DELETE_SUCCESS, id);
|
lcm->trigger_snapshot_delete_success(id);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
log_error(id, msg->payload(), "Error deleting VM Snapshot");
|
log_error(id, msg->payload(), "Error deleting VM Snapshot");
|
||||||
|
|
||||||
lcm->trigger(LCMAction::SNAPSHOT_DELETE_FAILURE, id);
|
lcm->trigger_snapshot_delete_failure(id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -644,13 +642,13 @@ void VirtualMachineManager::_disksnapshotcreate(unique_ptr<vm_msg_t> msg)
|
|||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
|
||||||
lcm->trigger(LCMAction::DISK_SNAPSHOT_SUCCESS, id);
|
lcm->trigger_disk_snapshot_success(id);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
log_error(id, msg->payload(), "Error creating new disk snapshot");
|
log_error(id, msg->payload(), "Error creating new disk snapshot");
|
||||||
|
|
||||||
lcm->trigger(LCMAction::DISK_SNAPSHOT_FAILURE, id);
|
lcm->trigger_disk_snapshot_failure(id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -676,13 +674,13 @@ void VirtualMachineManager::_disksnapshotrevert(unique_ptr<vm_msg_t> msg)
|
|||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
|
||||||
lcm->trigger(LCMAction::DISK_SNAPSHOT_SUCCESS, id);
|
lcm->trigger_disk_snapshot_success(id);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
log_error(id, msg->payload(), "Error reverting disk snapshot");
|
log_error(id, msg->payload(), "Error reverting disk snapshot");
|
||||||
|
|
||||||
lcm->trigger(LCMAction::DISK_SNAPSHOT_FAILURE, id);
|
lcm->trigger_disk_snapshot_failure(id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -708,13 +706,13 @@ void VirtualMachineManager::_resizedisk(unique_ptr<vm_msg_t> msg)
|
|||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
|
||||||
lcm->trigger(LCMAction::DISK_RESIZE_SUCCESS, id);
|
lcm->trigger_disk_resize_success(id);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
log_error(id, msg->payload(), "Error resizing disk");
|
log_error(id, msg->payload(), "Error resizing disk");
|
||||||
|
|
||||||
lcm->trigger(LCMAction::DISK_RESIZE_FAILURE, id);
|
lcm->trigger_disk_resize_failure(id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -740,13 +738,13 @@ void VirtualMachineManager::_updateconf(unique_ptr<vm_msg_t> msg)
|
|||||||
|
|
||||||
vm->unlock();
|
vm->unlock();
|
||||||
|
|
||||||
lcm->trigger(LCMAction::UPDATE_CONF_SUCCESS, id);
|
lcm->trigger_update_conf_success(id);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
log_error(id, msg->payload(), "Error updating conf for VM");
|
log_error(id, msg->payload(), "Error updating conf for VM");
|
||||||
|
|
||||||
lcm->trigger(LCMAction::UPDATE_CONF_FAILURE, id);
|
lcm->trigger_update_conf_failure(id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -816,7 +814,7 @@ void VirtualMachineManager::_updatesg(unique_ptr<vm_msg_t> msg)
|
|||||||
vm->unlock();
|
vm->unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
lcm->trigger(LCMAction::UPDATESG, sgid);
|
lcm->trigger_updatesg(sgid);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@ int AddressRangeIPAM::from_vattr(VectorAttribute * attr, std::string& error_msg)
|
|||||||
|
|
||||||
IPAMRequest ir(attr);
|
IPAMRequest ir(attr);
|
||||||
|
|
||||||
ipamm->trigger(IPMAction::REGISTER_ADDRESS_RANGE, &ir);
|
ipamm->trigger_register_address_range(ir);
|
||||||
|
|
||||||
ir.wait();
|
ir.wait();
|
||||||
|
|
||||||
@ -69,7 +69,7 @@ int AddressRangeIPAM::allocate_addr(unsigned int index, unsigned int rsize,
|
|||||||
|
|
||||||
IPAMRequest ir(this, address_xml);
|
IPAMRequest ir(this, address_xml);
|
||||||
|
|
||||||
ipamm->trigger(IPMAction::ALLOCATE_ADDRESS, &ir);
|
ipamm->trigger_allocate_address(ir);
|
||||||
|
|
||||||
ir.wait();
|
ir.wait();
|
||||||
|
|
||||||
@ -103,7 +103,7 @@ int AddressRangeIPAM::get_addr(unsigned int& index, unsigned int rsize,
|
|||||||
|
|
||||||
IPAMRequest ir(this, address_xml);
|
IPAMRequest ir(this, address_xml);
|
||||||
|
|
||||||
ipamm->trigger(IPMAction::GET_ADDRESS, &ir);
|
ipamm->trigger_get_address(ir);
|
||||||
|
|
||||||
ir.wait();
|
ir.wait();
|
||||||
|
|
||||||
@ -160,7 +160,7 @@ int AddressRangeIPAM::free_addr(unsigned int index, std::string& error_msg)
|
|||||||
|
|
||||||
IPAMRequest ir(this, address_xml);
|
IPAMRequest ir(this, address_xml);
|
||||||
|
|
||||||
ipamm->trigger(IPMAction::FREE_ADDRESS, &ir);
|
ipamm->trigger_free_address(ir);
|
||||||
|
|
||||||
ir.wait();
|
ir.wait();
|
||||||
|
|
||||||
|
@ -218,7 +218,7 @@ int AddressRangePool::rm_ar(unsigned int ar_id, bool force, string& error_msg)
|
|||||||
|
|
||||||
IPAMRequest ir(ar_ptr);
|
IPAMRequest ir(ar_ptr);
|
||||||
|
|
||||||
ipamm->trigger(IPMAction::UNREGISTER_ADDRESS_RANGE, &ir);
|
ipamm->trigger_unregister_address_range(ir);
|
||||||
|
|
||||||
ir.wait();
|
ir.wait();
|
||||||
|
|
||||||
@ -256,7 +256,7 @@ int AddressRangePool::rm_ars(string& error_msg)
|
|||||||
|
|
||||||
IPAMRequest ir(it->second->attr);
|
IPAMRequest ir(it->second->attr);
|
||||||
|
|
||||||
ipamm->trigger(IPMAction::UNREGISTER_ADDRESS_RANGE, &ir);
|
ipamm->trigger_unregister_address_range(ir);
|
||||||
|
|
||||||
ir.wait();
|
ir.wait();
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user