mirror of
https://github.com/samba-team/samba.git
synced 2024-12-22 13:34:15 +03:00
tevent: Flow: add tevent_thread_call_depth_set_callback()
Note the tevent-0.14.1.sigs changes will be reverted in the 'tevent 0.15.0' commit. Signed-off-by: Pavel Filipenský <pfilipensky@samba.org> Reviewed-by: Stefan Metzmacher <metze@samba.org>
This commit is contained in:
parent
0c4d6e630f
commit
d7b29125c0
@ -26,6 +26,7 @@ _tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, const char *
|
||||
_tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn, const char *)
|
||||
_tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn, const char *)
|
||||
_tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *)
|
||||
_tevent_thread_call_depth_reset_from_req: void (struct tevent_req *, const char *)
|
||||
_tevent_threaded_schedule_immediate: void (struct tevent_threaded_context *, struct tevent_immediate *, tevent_immediate_handler_t, void *, const char *, const char *)
|
||||
tevent_abort: void (struct tevent_context *, const char *)
|
||||
tevent_backend_list: const char **(TALLOC_CTX *)
|
||||
@ -139,6 +140,7 @@ tevent_signal_support: bool (struct tevent_context *)
|
||||
tevent_thread_call_depth_activate: void (size_t *)
|
||||
tevent_thread_call_depth_deactivate: void (void)
|
||||
tevent_thread_call_depth_reset_from_req: void (struct tevent_req *)
|
||||
tevent_thread_call_depth_set_callback: void (tevent_call_depth_callback_t, void *)
|
||||
tevent_thread_call_depth_start: void (struct tevent_req *)
|
||||
tevent_thread_proxy_create: struct tevent_thread_proxy *(struct tevent_context *)
|
||||
tevent_thread_proxy_schedule: void (struct tevent_thread_proxy *, struct tevent_immediate **, tevent_immediate_handler_t, void *)
|
||||
|
@ -824,7 +824,10 @@ int _tevent_loop_once(struct tevent_context *ev, const char *location)
|
||||
tevent_trace_point_callback(ev, TEVENT_TRACE_AFTER_LOOP_ONCE);
|
||||
|
||||
/* New event (and request) will always start with call depth 0. */
|
||||
tevent_thread_call_depth_set(0);
|
||||
tevent_thread_call_depth_notify(TEVENT_CALL_FLOW_REQ_RESET,
|
||||
NULL,
|
||||
0,
|
||||
__func__);
|
||||
|
||||
if (ev->nesting.level > 0) {
|
||||
if (ev->nesting.hook_fn) {
|
||||
|
@ -2066,6 +2066,10 @@ pid_t tevent_cached_getpid(void);
|
||||
*
|
||||
* Part 1: activation/deactivation
|
||||
*
|
||||
* void tevent_thread_call_depth_set_callback(f, private_data)
|
||||
* Register a callback that can track 'call depth' and 'request flow'
|
||||
* NULL as a function callback means deactivation.
|
||||
*
|
||||
* Part 2: Mark the request (and its subrequests) to be tracked
|
||||
*
|
||||
* tevent_thread_call_depth_start(struct tevent_req *req)
|
||||
@ -2092,6 +2096,40 @@ pid_t tevent_cached_getpid(void);
|
||||
* @{
|
||||
*/
|
||||
|
||||
enum tevent_thread_call_depth_cmd {
|
||||
TEVENT_CALL_FLOW_REQ_RESET,
|
||||
TEVENT_CALL_FLOW_REQ_CREATE,
|
||||
TEVENT_CALL_FLOW_REQ_CANCEL,
|
||||
TEVENT_CALL_FLOW_REQ_CLEANUP,
|
||||
TEVENT_CALL_FLOW_REQ_NOTIFY_CB,
|
||||
TEVENT_CALL_FLOW_REQ_QUEUE_ENTER,
|
||||
TEVENT_CALL_FLOW_REQ_QUEUE_TRIGGER,
|
||||
TEVENT_CALL_FLOW_REQ_QUEUE_LEAVE,
|
||||
};
|
||||
|
||||
typedef void (*tevent_call_depth_callback_t)(
|
||||
void *private_data,
|
||||
enum tevent_thread_call_depth_cmd cmd,
|
||||
struct tevent_req *req,
|
||||
size_t depth,
|
||||
const char *fname);
|
||||
|
||||
struct tevent_thread_call_depth_state {
|
||||
tevent_call_depth_callback_t cb;
|
||||
void *cb_private;
|
||||
};
|
||||
|
||||
extern __thread struct tevent_thread_call_depth_state
|
||||
tevent_thread_call_depth_state_g;
|
||||
|
||||
/**
|
||||
* Register callback function for request/subrequest call depth / flow tracking.
|
||||
*
|
||||
* @param[in] f External call depth and flow handling function
|
||||
*/
|
||||
void tevent_thread_call_depth_set_callback(tevent_call_depth_callback_t f,
|
||||
void *private_data);
|
||||
|
||||
#ifdef TEVENT_DEPRECATED
|
||||
|
||||
void tevent_thread_call_depth_activate(size_t *ptr) _DEPRECATED_;
|
||||
@ -2101,13 +2139,19 @@ void tevent_thread_call_depth_start(struct tevent_req *req) _DEPRECATED_;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Set the external variable to the call depth of the request req.
|
||||
* Reset the external call depth to the call depth of the request.
|
||||
*
|
||||
* @param[in] req Request from which the call depth is assigned to ext.
|
||||
* @param[in] req Request from which the call depth is reset.
|
||||
* variable.
|
||||
*/
|
||||
void tevent_thread_call_depth_reset_from_req(struct tevent_req *req);
|
||||
|
||||
void _tevent_thread_call_depth_reset_from_req(struct tevent_req *req,
|
||||
const char *fname);
|
||||
|
||||
#define tevent_thread_call_depth_reset_from_req(req) \
|
||||
_tevent_thread_call_depth_reset_from_req(req, __func__)
|
||||
|
||||
/* @} */
|
||||
|
||||
|
||||
|
@ -27,6 +27,8 @@
|
||||
#include "tevent.h"
|
||||
#include "tevent_internal.h"
|
||||
|
||||
#undef tevent_thread_call_depth_reset_from_req
|
||||
|
||||
/********************************************************************
|
||||
* Debug wrapper functions, modeled (with lot's of code copied as is)
|
||||
* after the ev debug wrapper functions
|
||||
@ -294,7 +296,8 @@ void tevent_trace_queue_callback(struct tevent_context *ev,
|
||||
}
|
||||
}
|
||||
|
||||
static __thread size_t *tevent_thread_call_depth_ptr = NULL;
|
||||
_PRIVATE_ __thread
|
||||
struct tevent_thread_call_depth_state tevent_thread_call_depth_state_g;
|
||||
|
||||
void tevent_thread_call_depth_activate(size_t *ptr)
|
||||
{
|
||||
@ -310,14 +313,37 @@ void tevent_thread_call_depth_start(struct tevent_req *req)
|
||||
|
||||
void tevent_thread_call_depth_reset_from_req(struct tevent_req *req)
|
||||
{
|
||||
if (tevent_thread_call_depth_ptr != NULL) {
|
||||
*tevent_thread_call_depth_ptr = req->internal.call_depth;
|
||||
_tevent_thread_call_depth_reset_from_req(req, NULL);
|
||||
}
|
||||
|
||||
void _tevent_thread_call_depth_reset_from_req(struct tevent_req *req,
|
||||
const char *fname)
|
||||
{
|
||||
if (tevent_thread_call_depth_state_g.cb != NULL) {
|
||||
tevent_thread_call_depth_state_g.cb(
|
||||
tevent_thread_call_depth_state_g.cb_private,
|
||||
TEVENT_CALL_FLOW_REQ_RESET,
|
||||
req,
|
||||
req->internal.call_depth,
|
||||
fname);
|
||||
}
|
||||
}
|
||||
|
||||
_PRIVATE_ void tevent_thread_call_depth_set(size_t depth)
|
||||
void tevent_thread_call_depth_set_callback(tevent_call_depth_callback_t f,
|
||||
void *private_data)
|
||||
{
|
||||
if (tevent_thread_call_depth_ptr != NULL) {
|
||||
*tevent_thread_call_depth_ptr = depth;
|
||||
/* In case of deactivation, make sure that call depth is set to 0 */
|
||||
if (tevent_thread_call_depth_state_g.cb != NULL) {
|
||||
tevent_thread_call_depth_state_g.cb(
|
||||
tevent_thread_call_depth_state_g.cb_private,
|
||||
TEVENT_CALL_FLOW_REQ_RESET,
|
||||
NULL,
|
||||
0,
|
||||
"tevent_thread_call_depth_set_callback");
|
||||
}
|
||||
tevent_thread_call_depth_state_g = (struct tevent_thread_call_depth_state)
|
||||
{
|
||||
.cb = f,
|
||||
.cb_private = private_data,
|
||||
};
|
||||
}
|
||||
|
@ -507,7 +507,21 @@ void tevent_epoll_set_panic_fallback(struct tevent_context *ev,
|
||||
bool replay));
|
||||
#endif
|
||||
|
||||
void tevent_thread_call_depth_set(size_t depth);
|
||||
static inline void tevent_thread_call_depth_notify(
|
||||
enum tevent_thread_call_depth_cmd cmd,
|
||||
struct tevent_req *req,
|
||||
size_t depth,
|
||||
const char *fname)
|
||||
{
|
||||
if (tevent_thread_call_depth_state_g.cb != NULL) {
|
||||
tevent_thread_call_depth_state_g.cb(
|
||||
tevent_thread_call_depth_state_g.cb_private,
|
||||
cmd,
|
||||
req,
|
||||
depth,
|
||||
fname);
|
||||
}
|
||||
}
|
||||
|
||||
void tevent_trace_point_callback(struct tevent_context *ev,
|
||||
enum tevent_trace_point);
|
||||
|
@ -70,6 +70,10 @@ static int tevent_queue_entry_destructor(struct tevent_queue_entry *e)
|
||||
}
|
||||
|
||||
tevent_trace_queue_callback(q->list->ev, e, TEVENT_EVENT_TRACE_DETACH);
|
||||
tevent_thread_call_depth_notify(TEVENT_CALL_FLOW_REQ_QUEUE_LEAVE,
|
||||
q->list->req,
|
||||
q->list->req->internal.call_depth,
|
||||
e->trigger_name);
|
||||
DLIST_REMOVE(q->list, e);
|
||||
q->length--;
|
||||
|
||||
@ -155,7 +159,10 @@ static void tevent_queue_immediate_trigger(struct tevent_context *ev,
|
||||
tevent_trace_queue_callback(ev, q->list,
|
||||
TEVENT_EVENT_TRACE_BEFORE_HANDLER);
|
||||
/* Set the call depth of the request coming from the queue. */
|
||||
tevent_thread_call_depth_set(q->list->req->internal.call_depth);
|
||||
tevent_thread_call_depth_notify(TEVENT_CALL_FLOW_REQ_QUEUE_TRIGGER,
|
||||
q->list->req,
|
||||
q->list->req->internal.call_depth,
|
||||
q->list->trigger_name);
|
||||
q->list->triggered = true;
|
||||
q->list->trigger(q->list->req, q->list->private_data);
|
||||
}
|
||||
@ -218,6 +225,10 @@ static struct tevent_queue_entry *tevent_queue_add_internal(
|
||||
queue->length++;
|
||||
talloc_set_destructor(e, tevent_queue_entry_destructor);
|
||||
tevent_trace_queue_callback(ev, e, TEVENT_EVENT_TRACE_ATTACH);
|
||||
tevent_thread_call_depth_notify(TEVENT_CALL_FLOW_REQ_QUEUE_ENTER,
|
||||
req,
|
||||
req->internal.call_depth,
|
||||
e->trigger_name);
|
||||
|
||||
if (!queue->running) {
|
||||
return e;
|
||||
|
@ -141,10 +141,13 @@ struct tevent_req *__tevent_req_create(TALLOC_CTX *mem_ctx,
|
||||
*ppdata = data;
|
||||
|
||||
/* Initially, talloc_zero_size() sets internal.call_depth to 0 */
|
||||
if (parent != NULL && parent->internal.call_depth > 0) {
|
||||
if (parent != NULL) {
|
||||
req->internal.call_depth = parent->internal.call_depth + 1;
|
||||
tevent_thread_call_depth_set(req->internal.call_depth);
|
||||
}
|
||||
tevent_thread_call_depth_notify(TEVENT_CALL_FLOW_REQ_CREATE,
|
||||
req,
|
||||
req->internal.call_depth,
|
||||
func);
|
||||
|
||||
return req;
|
||||
}
|
||||
@ -165,18 +168,18 @@ void _tevent_req_notify_callback(struct tevent_req *req, const char *location)
|
||||
}
|
||||
if (req->async.fn != NULL) {
|
||||
/* Calling back the parent code, decrement the call depth. */
|
||||
tevent_thread_call_depth_set(req->internal.call_depth > 0 ?
|
||||
req->internal.call_depth - 1 : 0);
|
||||
size_t new_depth = req->internal.call_depth > 0 ?
|
||||
req->internal.call_depth - 1 : 0;
|
||||
tevent_thread_call_depth_notify(TEVENT_CALL_FLOW_REQ_NOTIFY_CB,
|
||||
req,
|
||||
new_depth,
|
||||
req->async.fn_name);
|
||||
req->async.fn(req);
|
||||
}
|
||||
}
|
||||
|
||||
static void tevent_req_cleanup(struct tevent_req *req)
|
||||
{
|
||||
if (req->private_cleanup.fn == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (req->private_cleanup.state >= req->internal.state) {
|
||||
/*
|
||||
* Don't call the cleanup_function multiple times for the same
|
||||
@ -185,6 +188,15 @@ static void tevent_req_cleanup(struct tevent_req *req)
|
||||
return;
|
||||
}
|
||||
|
||||
tevent_thread_call_depth_notify(TEVENT_CALL_FLOW_REQ_CLEANUP,
|
||||
req,
|
||||
req->internal.call_depth,
|
||||
req->private_cleanup.fn_name);
|
||||
|
||||
if (req->private_cleanup.fn == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
req->private_cleanup.state = req->internal.state;
|
||||
req->private_cleanup.fn(req, req->internal.state);
|
||||
}
|
||||
@ -429,6 +441,11 @@ void _tevent_req_set_cancel_fn(struct tevent_req *req,
|
||||
|
||||
bool _tevent_req_cancel(struct tevent_req *req, const char *location)
|
||||
{
|
||||
tevent_thread_call_depth_notify(TEVENT_CALL_FLOW_REQ_CANCEL,
|
||||
req,
|
||||
req->internal.call_depth,
|
||||
req->private_cancel.fn_name);
|
||||
|
||||
if (req->private_cancel.fn == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user