mirror of
https://github.com/samba-team/samba.git
synced 2025-02-03 13:47:25 +03:00
- fixed a problem with packets to ourselves. The packets were being
processed immediately, but the input routines indirectly assumed they were being called as a new event (for example, a calling routine might queue the packet, then afterwards modify the ltdb record). The solution was to make self packets queue via a zero timeout. - fixed unlinking of the socket in a exit in the lockwait code. Needed an _exit instead of exit so atexit() doesn't trigger - print latency of lockwait delays (This used to be ctdb commit 1b0684b4f6a976f4c5fe54394ac54d121810b298)
This commit is contained in:
parent
bba02ce182
commit
00c706c2b8
@ -331,6 +331,44 @@ void ctdb_daemon_connect_wait(struct ctdb_context *ctdb)
|
||||
DEBUG(3,("ctdb_connect_wait: got all %d nodes\n", expected));
|
||||
}
|
||||
|
||||
struct queue_next {
|
||||
struct ctdb_context *ctdb;
|
||||
struct ctdb_req_header *hdr;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
trigered when a deferred packet is due
|
||||
*/
|
||||
static void queue_next_trigger(struct event_context *ev, struct timed_event *te,
|
||||
struct timeval t, void *private_data)
|
||||
{
|
||||
struct queue_next *q = talloc_get_type(private_data, struct queue_next);
|
||||
ctdb_recv_pkt(q->ctdb, (uint8_t *)q->hdr, q->hdr->length);
|
||||
talloc_free(q);
|
||||
}
|
||||
|
||||
/*
|
||||
defer a packet, so it is processed on the next event loop
|
||||
this is used for sending packets to ourselves
|
||||
*/
|
||||
static void ctdb_defer_packet(struct ctdb_context *ctdb, struct ctdb_req_header *hdr)
|
||||
{
|
||||
struct queue_next *q;
|
||||
q = talloc(ctdb, struct queue_next);
|
||||
if (q == NULL) {
|
||||
DEBUG(0,(__location__ " Failed to allocate deferred packet\n"));
|
||||
return;
|
||||
}
|
||||
q->ctdb = ctdb;
|
||||
q->hdr = talloc_memdup(ctdb, hdr, hdr->length);
|
||||
if (q->hdr == NULL) {
|
||||
DEBUG(0,("Error copying deferred packet to self\n"));
|
||||
return;
|
||||
}
|
||||
event_add_timed(ctdb->ev, q, timeval_zero(), queue_next_trigger, q);
|
||||
}
|
||||
|
||||
/*
|
||||
queue a packet or die
|
||||
*/
|
||||
@ -338,7 +376,9 @@ void ctdb_queue_packet(struct ctdb_context *ctdb, struct ctdb_req_header *hdr)
|
||||
{
|
||||
struct ctdb_node *node;
|
||||
node = ctdb->nodes[hdr->destnode];
|
||||
if (ctdb->methods->queue_pkt(node, (uint8_t *)hdr, hdr->length) != 0) {
|
||||
if (hdr->destnode == ctdb->vnn && !(ctdb->flags & CTDB_FLAG_SELF_CONNECT)) {
|
||||
ctdb_defer_packet(ctdb, hdr);
|
||||
} else if (ctdb->methods->queue_pkt(node, (uint8_t *)hdr, hdr->length) != 0) {
|
||||
ctdb_fatal(ctdb, "Unable to queue packet\n");
|
||||
}
|
||||
}
|
||||
|
@ -222,17 +222,12 @@ static void ctdb_call_send_dmaster(struct ctdb_db_context *ctdb_db,
|
||||
memcpy(&r->data[0], key->dptr, key->dsize);
|
||||
memcpy(&r->data[key->dsize], data->dptr, data->dsize);
|
||||
|
||||
if (r->hdr.destnode == ctdb->vnn) {
|
||||
/* we are the lmaster - don't send to ourselves */
|
||||
ctdb_recv_pkt(ctdb, (uint8_t *)&r->hdr, r->hdr.length);
|
||||
return;
|
||||
} else {
|
||||
ctdb_queue_packet(ctdb, &r->hdr);
|
||||
|
||||
/* update the ltdb to record the new dmaster */
|
||||
header->dmaster = r->hdr.destnode;
|
||||
ctdb_ltdb_store(ctdb_db, *key, header, *data);
|
||||
}
|
||||
/* XXX - probably not necessary when lmaster==dmaster
|
||||
update the ltdb to record the new dmaster */
|
||||
header->dmaster = r->hdr.destnode;
|
||||
ctdb_ltdb_store(ctdb_db, *key, header, *data);
|
||||
|
||||
ctdb_queue_packet(ctdb, &r->hdr);
|
||||
|
||||
talloc_free(r);
|
||||
}
|
||||
@ -280,7 +275,8 @@ void ctdb_request_dmaster(struct ctdb_context *ctdb, struct ctdb_req_header *hdr
|
||||
}
|
||||
|
||||
/* its a protocol error if the sending node is not the current dmaster */
|
||||
if (header.dmaster != hdr->srcnode) {
|
||||
if (header.dmaster != hdr->srcnode &&
|
||||
hdr->srcnode != ctdb_lmaster(ctdb_db->ctdb, &key)) {
|
||||
ctdb_fatal(ctdb, "dmaster request from non-master");
|
||||
return;
|
||||
}
|
||||
@ -313,13 +309,7 @@ void ctdb_request_dmaster(struct ctdb_context *ctdb, struct ctdb_req_header *hdr
|
||||
r->datalen = data.dsize;
|
||||
memcpy(&r->data[0], data.dptr, data.dsize);
|
||||
|
||||
if (r->hdr.destnode == r->hdr.srcnode) {
|
||||
/* inject the packet back into the input queue */
|
||||
talloc_steal(ctdb, r);
|
||||
ctdb_recv_pkt(ctdb, (uint8_t *)&r->hdr, r->hdr.length);
|
||||
} else {
|
||||
ctdb_queue_packet(ctdb, &r->hdr);
|
||||
}
|
||||
ctdb_queue_packet(ctdb, &r->hdr);
|
||||
|
||||
talloc_free(tmp_ctx);
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ struct lockwait_handle {
|
||||
pid_t child;
|
||||
void *private_data;
|
||||
void (*callback)(void *);
|
||||
struct timeval t;
|
||||
};
|
||||
|
||||
static void lockwait_handler(struct event_context *ev, struct fd_event *fde,
|
||||
@ -46,6 +47,7 @@ static void lockwait_handler(struct event_context *ev, struct fd_event *fde,
|
||||
pid_t child = h->child;
|
||||
talloc_set_destructor(h, NULL);
|
||||
close(h->fd[0]);
|
||||
DEBUG(3,(__location__ " lockwait took %.6f seconds\n", timeval_elapsed(&h->t)));
|
||||
talloc_free(h);
|
||||
callback(p);
|
||||
waitpid(child, NULL, 0);
|
||||
@ -106,7 +108,7 @@ struct lockwait_handle *ctdb_lockwait(struct ctdb_db_context *ctdb_db,
|
||||
* Do we need a tdb_reopen here?
|
||||
*/
|
||||
tdb_chainlock(ctdb_db->ltdb->tdb, key);
|
||||
exit(0);
|
||||
_exit(0);
|
||||
}
|
||||
|
||||
close(result->fd[1]);
|
||||
@ -120,5 +122,7 @@ struct lockwait_handle *ctdb_lockwait(struct ctdb_db_context *ctdb_db,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
result->t = timeval_current();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user