mirror of
https://github.com/samba-team/samba.git
synced 2025-03-19 18:50:24 +03:00
merge from tridge
(This used to be ctdb commit 08173e3ab77178b9841db0081a51b93291d9e8dc)
This commit is contained in:
commit
0f6d9c73d8
@ -23,9 +23,6 @@ CFLAGS=-g -I$(srcdir)/include -Iinclude -Ilib -Ilib/util -I$(srcdir) \
|
||||
|
||||
LIB_FLAGS=@LDFLAGS@ -Llib @LIBS@ $(POPT_LIBS) @INFINIBAND_LIBS@
|
||||
|
||||
EVENTS_OBJ = lib/events/events.o lib/events/events_standard.o \
|
||||
lib/events/events_signal.o lib/events/events_timed.o
|
||||
|
||||
UTIL_OBJ = lib/util/idtree.o lib/util/db_wrap.o lib/util/strlist.o lib/util/util.o
|
||||
|
||||
CTDB_COMMON_OBJ = common/ctdb.o common/ctdb_daemon.o common/ctdb_client.o \
|
||||
@ -38,7 +35,7 @@ CTDB_TCP_OBJ = tcp/tcp_connect.o tcp/tcp_io.o tcp/tcp_init.o
|
||||
|
||||
CTDB_OBJ = $(CTDB_COMMON_OBJ) $(CTDB_TCP_OBJ) $(POPT_OBJ)
|
||||
|
||||
OBJS = @TDB_OBJ@ @TALLOC_OBJ@ @LIBREPLACEOBJ@ @INFINIBAND_WRAPPER_OBJ@ $(EXTRA_OBJ) $(EVENTS_OBJ) $(CTDB_OBJ) $(UTIL_OBJ)
|
||||
OBJS = @TDB_OBJ@ @TALLOC_OBJ@ @LIBREPLACEOBJ@ @INFINIBAND_WRAPPER_OBJ@ $(EXTRA_OBJ) @EVENTS_OBJ@ $(CTDB_OBJ) $(UTIL_OBJ)
|
||||
|
||||
BINS = bin/ctdbd bin/ctdbd_test bin/ctdb_test bin/ctdb_bench bin/ctdb_messaging bin/ctdb_fetch bin/ctdb_fetch1 bin/lockwait bin/ctdb_control bin/ctdb_dump bin/recoverd @INFINIBAND_BINS@
|
||||
|
||||
|
@ -7,6 +7,8 @@ IPATHS="-I libreplace -I lib/replace -I ../libreplace -I ../replace"
|
||||
IPATHS="$IPATHS -I lib/talloc -I talloc -I ../talloc"
|
||||
IPATHS="$IPATHS -I lib/tdb -I tdb -I ../tdb"
|
||||
IPATHS="$IPATHS -I lib/popt -I popt -I ../popt"
|
||||
IPATHS="$IPATHS -I lib/events"
|
||||
|
||||
autoheader $IPATHS || exit 1
|
||||
autoconf $IPATHS || exit 1
|
||||
|
||||
|
@ -37,6 +37,7 @@ static struct {
|
||||
const char *db_dir;
|
||||
int torture;
|
||||
const char *logfile;
|
||||
const char *events;
|
||||
} ctdb_cmdline = {
|
||||
.nlist = NULL,
|
||||
.transport = "tcp",
|
||||
@ -45,11 +46,26 @@ static struct {
|
||||
.self_connect = 0,
|
||||
.db_dir = NULL,
|
||||
.torture = 0,
|
||||
.logfile = NULL
|
||||
.logfile = NULL,
|
||||
};
|
||||
|
||||
enum {OPT_EVENTSYSTEM=1};
|
||||
|
||||
static void ctdb_cmdline_callback(poptContext con,
|
||||
enum poptCallbackReason reason,
|
||||
const struct poptOption *opt,
|
||||
const char *arg, const void *data)
|
||||
{
|
||||
switch (opt->val) {
|
||||
case OPT_EVENTSYSTEM:
|
||||
event_set_default_backend(arg);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
struct poptOption popt_ctdb_cmdline[] = {
|
||||
{ NULL, 0, POPT_ARG_CALLBACK, (void *)ctdb_cmdline_callback },
|
||||
{ "nlist", 0, POPT_ARG_STRING, &ctdb_cmdline.nlist, 0, "node list file", "filename" },
|
||||
{ "listen", 0, POPT_ARG_STRING, &ctdb_cmdline.myaddress, 0, "address to listen on", "address" },
|
||||
{ "socket", 0, POPT_ARG_STRING, &ctdb_cmdline.socketname, 0, "local socket name", "filename" },
|
||||
@ -59,6 +75,7 @@ struct poptOption popt_ctdb_cmdline[] = {
|
||||
{ "dbdir", 0, POPT_ARG_STRING, &ctdb_cmdline.db_dir, 0, "directory for the tdb files", NULL },
|
||||
{ "torture", 0, POPT_ARG_NONE, &ctdb_cmdline.torture, 0, "enable nastiness in library", NULL },
|
||||
{ "logfile", 0, POPT_ARG_STRING, &ctdb_cmdline.logfile, 0, "log file location", "filename" },
|
||||
{ "events", 0, POPT_ARG_STRING, NULL, OPT_EVENTSYSTEM, "event system", NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
|
@ -249,8 +249,6 @@ static int ctdb_client_destructor(struct ctdb_client *client)
|
||||
{
|
||||
ctdb_reqid_remove(client->ctdb, client->client_id);
|
||||
client->ctdb->status.num_clients--;
|
||||
close(client->fd);
|
||||
client->fd = -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -678,8 +676,8 @@ int ctdb_start(struct ctdb_context *ctdb)
|
||||
|
||||
|
||||
ctdb->ev = event_context_init(NULL);
|
||||
fde = event_add_fd(ctdb->ev, ctdb, fd[0], EVENT_FD_READ, ctdb_read_from_parent, &fd[0]);
|
||||
fde = event_add_fd(ctdb->ev, ctdb, ctdb->daemon.sd, EVENT_FD_READ, ctdb_accept_client, ctdb);
|
||||
fde = event_add_fd(ctdb->ev, ctdb, fd[0], EVENT_FD_READ|EVENT_FD_AUTOCLOSE, ctdb_read_from_parent, &fd[0]);
|
||||
fde = event_add_fd(ctdb->ev, ctdb, ctdb->daemon.sd, EVENT_FD_READ|EVENT_FD_AUTOCLOSE, ctdb_accept_client, ctdb);
|
||||
ctdb_main_loop(ctdb);
|
||||
|
||||
return 0;
|
||||
@ -720,7 +718,7 @@ int ctdb_start_daemon(struct ctdb_context *ctdb)
|
||||
talloc_set_destructor(domain_socket_name, unlink_destructor);
|
||||
|
||||
ctdb->ev = event_context_init(NULL);
|
||||
fde = event_add_fd(ctdb->ev, ctdb, ctdb->daemon.sd, EVENT_FD_READ,
|
||||
fde = event_add_fd(ctdb->ev, ctdb, ctdb->daemon.sd, EVENT_FD_READ|EVENT_FD_AUTOCLOSE,
|
||||
ctdb_accept_client, ctdb);
|
||||
ctdb_main_loop(ctdb);
|
||||
|
||||
|
@ -273,7 +273,7 @@ int ctdb_queue_set_fd(struct ctdb_queue *queue, int fd)
|
||||
queue->fde = NULL;
|
||||
|
||||
if (fd != -1) {
|
||||
queue->fde = event_add_fd(queue->ctdb->ev, queue, fd, EVENT_FD_READ,
|
||||
queue->fde = event_add_fd(queue->ctdb->ev, queue, fd, EVENT_FD_READ|EVENT_FD_AUTOCLOSE,
|
||||
queue_io_handler, queue);
|
||||
if (queue->fde == NULL) {
|
||||
return -1;
|
||||
|
@ -29,11 +29,13 @@
|
||||
|
||||
struct lockwait_handle {
|
||||
struct ctdb_context *ctdb;
|
||||
struct ctdb_db_context *ctdb_db;
|
||||
struct fd_event *fde;
|
||||
int fd[2];
|
||||
pid_t child;
|
||||
void *private_data;
|
||||
void (*callback)(void *);
|
||||
TDB_DATA key;
|
||||
struct timeval start_time;
|
||||
};
|
||||
|
||||
@ -45,19 +47,30 @@ static void lockwait_handler(struct event_context *ev, struct fd_event *fde,
|
||||
void (*callback)(void *) = h->callback;
|
||||
void *p = h->private_data;
|
||||
pid_t child = h->child;
|
||||
TDB_DATA key = h->key;
|
||||
struct tdb_context *tdb = h->ctdb_db->ltdb->tdb;
|
||||
TALLOC_CTX *tmp_ctx = talloc_new(ev);
|
||||
|
||||
talloc_free(fde);
|
||||
|
||||
key.dptr = talloc_memdup(tmp_ctx, key.dptr, key.dsize);
|
||||
|
||||
talloc_set_destructor(h, NULL);
|
||||
close(h->fd[0]);
|
||||
ctdb_latency(&h->ctdb->status.max_lockwait_latency, h->start_time);
|
||||
h->ctdb->status.pending_lockwait_calls--;
|
||||
talloc_free(h);
|
||||
|
||||
tdb_chainlock_mark(tdb, key);
|
||||
callback(p);
|
||||
tdb_chainlock_unmark(tdb, key);
|
||||
|
||||
kill(child, SIGKILL);
|
||||
waitpid(child, NULL, 0);
|
||||
talloc_free(tmp_ctx);
|
||||
}
|
||||
|
||||
static int lockwait_destructor(struct lockwait_handle *h)
|
||||
{
|
||||
h->ctdb->status.pending_lockwait_calls--;
|
||||
close(h->fd[0]);
|
||||
kill(h->child, SIGKILL);
|
||||
waitpid(h->child, NULL, 0);
|
||||
return 0;
|
||||
@ -84,7 +97,7 @@ struct lockwait_handle *ctdb_lockwait(struct ctdb_db_context *ctdb_db,
|
||||
ctdb_db->ctdb->status.lockwait_calls++;
|
||||
ctdb_db->ctdb->status.pending_lockwait_calls++;
|
||||
|
||||
if (!(result = talloc_zero(ctdb_db, struct lockwait_handle))) {
|
||||
if (!(result = talloc_zero(private_data, struct lockwait_handle))) {
|
||||
ctdb_db->ctdb->status.pending_lockwait_calls--;
|
||||
return NULL;
|
||||
}
|
||||
@ -110,13 +123,15 @@ struct lockwait_handle *ctdb_lockwait(struct ctdb_db_context *ctdb_db,
|
||||
result->callback = callback;
|
||||
result->private_data = private_data;
|
||||
result->ctdb = ctdb_db->ctdb;
|
||||
result->ctdb_db = ctdb_db;
|
||||
result->key = key;
|
||||
|
||||
if (result->child == 0) {
|
||||
char c = 0;
|
||||
close(result->fd[0]);
|
||||
/*
|
||||
* Do we need a tdb_reopen here?
|
||||
*/
|
||||
tdb_chainlock(ctdb_db->ltdb->tdb, key);
|
||||
write(result->fd[1], &c, 1);
|
||||
pause();
|
||||
_exit(0);
|
||||
}
|
||||
|
||||
@ -124,7 +139,7 @@ struct lockwait_handle *ctdb_lockwait(struct ctdb_db_context *ctdb_db,
|
||||
talloc_set_destructor(result, lockwait_destructor);
|
||||
|
||||
result->fde = event_add_fd(ctdb_db->ctdb->ev, result, result->fd[0],
|
||||
EVENT_FD_READ, lockwait_handler,
|
||||
EVENT_FD_READ|EVENT_FD_AUTOCLOSE, lockwait_handler,
|
||||
(void *)result);
|
||||
if (result->fde == NULL) {
|
||||
talloc_free(result);
|
||||
|
@ -193,7 +193,6 @@ static void lock_fetch_callback(void *p)
|
||||
{
|
||||
struct lock_fetch_state *state = talloc_get_type(p, struct lock_fetch_state);
|
||||
state->recv_pkt(state->recv_context, (uint8_t *)state->hdr, state->hdr->length);
|
||||
talloc_free(state);
|
||||
DEBUG(2,(__location__ " PACKET REQUEUED\n"));
|
||||
}
|
||||
|
||||
@ -252,7 +251,7 @@ int ctdb_ltdb_lock_requeue(struct ctdb_db_context *ctdb_db,
|
||||
return 0;
|
||||
}
|
||||
|
||||
state = talloc(ctdb_db, struct lock_fetch_state);
|
||||
state = talloc(hdr, struct lock_fetch_state);
|
||||
state->ctdb = ctdb_db->ctdb;
|
||||
state->hdr = hdr;
|
||||
state->recv_pkt = recv_pkt;
|
||||
|
@ -74,7 +74,6 @@ static void ctdb_traverse_local_handler(uint8_t *rawdata, size_t length, void *p
|
||||
*/
|
||||
static int traverse_local_destructor(struct ctdb_traverse_local_handle *h)
|
||||
{
|
||||
close(h->fd[0]);
|
||||
kill(h->child, SIGKILL);
|
||||
waitpid(h->child, NULL, 0);
|
||||
return 0;
|
||||
|
@ -27,6 +27,7 @@ EXTRA_OBJ=""
|
||||
m4_include(libpopt.m4)
|
||||
m4_include(libtalloc.m4)
|
||||
m4_include(libtdb.m4)
|
||||
m4_include(libevents.m4)
|
||||
m4_include(ib/config.m4)
|
||||
|
||||
AC_SUBST(EXTRA_OBJ)
|
||||
|
@ -5,6 +5,7 @@ AC_CHECK_HEADERS(sys/epoll.h)
|
||||
AC_CHECK_FUNCS(epoll_create)
|
||||
if test x"$ac_cv_header_sys_epoll_h" = x"yes" -a x"$ac_cv_func_epoll_create" = x"yes";then
|
||||
SMB_ENABLE(EVENTS_EPOLL,YES)
|
||||
AC_DEFINE(HAVE_EVENTS_EPOLL, 1, [Whether epoll is available])
|
||||
|
||||
# check for native Linux AIO interface
|
||||
AC_CHECK_HEADERS(libaio.h)
|
||||
|
@ -71,6 +71,8 @@ struct event_ops_list {
|
||||
/* list of registered event backends */
|
||||
static struct event_ops_list *event_backends;
|
||||
|
||||
static char *event_default_backend = NULL;
|
||||
|
||||
/*
|
||||
register an events backend
|
||||
*/
|
||||
@ -85,6 +87,15 @@ bool event_register_backend(const char *name, const struct event_ops *ops)
|
||||
return True;
|
||||
}
|
||||
|
||||
/*
|
||||
set the default event backend
|
||||
*/
|
||||
void event_set_default_backend(const char *backend)
|
||||
{
|
||||
if (event_default_backend) free(event_default_backend);
|
||||
event_default_backend = strdup(backend);
|
||||
}
|
||||
|
||||
/*
|
||||
initialise backends if not already done
|
||||
*/
|
||||
@ -99,7 +110,15 @@ static void event_backend_init(void)
|
||||
run_init_functions(shared_init);
|
||||
#else
|
||||
bool events_standard_init(void);
|
||||
bool events_select_init(void);
|
||||
events_select_init();
|
||||
events_standard_init();
|
||||
#if HAVE_EVENTS_EPOLL
|
||||
{
|
||||
bool events_epoll_init(void);
|
||||
events_epoll_init();
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -169,6 +188,9 @@ struct event_context *event_context_init_byname(TALLOC_CTX *mem_ctx, const char
|
||||
name = lp_parm_string(-1, "event", "backend");
|
||||
}
|
||||
#endif
|
||||
if (name == NULL) {
|
||||
name = event_default_backend;
|
||||
}
|
||||
if (name == NULL) {
|
||||
name = "standard";
|
||||
}
|
||||
@ -195,6 +217,9 @@ struct event_context *event_context_init(TALLOC_CTX *mem_ctx)
|
||||
/*
|
||||
add a fd based event
|
||||
return NULL on failure (memory allocation error)
|
||||
|
||||
if flags contains EVENT_FD_AUTOCLOSE then the fd will be closed when
|
||||
the returned fd_event context is freed
|
||||
*/
|
||||
struct fd_event *event_add_fd(struct event_context *ev, TALLOC_CTX *mem_ctx,
|
||||
int fd, uint16_t flags, event_fd_handler_t handler,
|
||||
|
@ -23,7 +23,7 @@
|
||||
#ifndef __EVENTS_H__
|
||||
#define __EVENTS_H__
|
||||
|
||||
#include "lib/talloc/talloc.h"
|
||||
#include "talloc/talloc.h"
|
||||
#include <stdlib.h>
|
||||
|
||||
struct event_context;
|
||||
@ -46,6 +46,7 @@ typedef void (*event_aio_handler_t)(struct event_context *, struct aio_event *,
|
||||
struct event_context *event_context_init(TALLOC_CTX *mem_ctx);
|
||||
struct event_context *event_context_init_byname(TALLOC_CTX *mem_ctx, const char *name);
|
||||
const char **event_backend_list(TALLOC_CTX *mem_ctx);
|
||||
void event_set_default_backend(const char *backend);
|
||||
|
||||
struct fd_event *event_add_fd(struct event_context *ev, TALLOC_CTX *mem_ctx,
|
||||
int fd, uint16_t flags, event_fd_handler_t handler,
|
||||
@ -79,6 +80,7 @@ struct event_context *event_context_find(TALLOC_CTX *mem_ctx);
|
||||
/* bits for file descriptor event flags */
|
||||
#define EVENT_FD_READ 1
|
||||
#define EVENT_FD_WRITE 2
|
||||
#define EVENT_FD_AUTOCLOSE 4
|
||||
|
||||
#define EVENT_FD_WRITEABLE(fde) \
|
||||
event_set_fd_flags(fde, event_get_fd_flags(fde) | EVENT_FD_WRITE)
|
||||
|
@ -250,7 +250,8 @@ static int aio_event_loop(struct aio_event_context *aio_ev, struct timeval *tval
|
||||
}
|
||||
|
||||
if (ret == 0 && tvalp) {
|
||||
common_event_loop_timer(aio_ev->ev);
|
||||
/* we don't care about a possible delay here */
|
||||
common_event_loop_timer_delay(aio_ev->ev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -360,6 +361,11 @@ static int aio_event_fd_destructor(struct fd_event *fde)
|
||||
|
||||
epoll_del_event(aio_ev, fde);
|
||||
|
||||
if (fde->flags & EVENT_FD_AUTOCLOSE) {
|
||||
close(fde->fd);
|
||||
fde->fd = -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -431,10 +437,8 @@ static int aio_event_loop_once(struct event_context *ev)
|
||||
struct aio_event_context);
|
||||
struct timeval tval;
|
||||
|
||||
tval = common_event_loop_delay(ev);
|
||||
|
||||
tval = common_event_loop_timer_delay(ev);
|
||||
if (timeval_is_zero(&tval)) {
|
||||
common_event_loop_timer(ev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -136,7 +136,9 @@ static void epoll_del_event(struct epoll_event_context *epoll_ev, struct fd_even
|
||||
ZERO_STRUCT(event);
|
||||
event.events = epoll_map_flags(fde->flags);
|
||||
event.data.ptr = fde;
|
||||
epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_DEL, fde->fd, &event);
|
||||
if (epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_DEL, fde->fd, &event) != 0) {
|
||||
DEBUG(0,("epoll_del_event failed! probable early close bug (%s)\n", strerror(errno)));
|
||||
}
|
||||
fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
|
||||
}
|
||||
|
||||
@ -202,7 +204,7 @@ static void epoll_change_event(struct epoll_event_context *epoll_ev, struct fd_e
|
||||
static int epoll_event_loop(struct epoll_event_context *epoll_ev, struct timeval *tvalp)
|
||||
{
|
||||
int ret, i;
|
||||
#define MAXEVENTS 8
|
||||
#define MAXEVENTS 32
|
||||
struct epoll_event events[MAXEVENTS];
|
||||
uint32_t destruction_count = ++epoll_ev->destruction_count;
|
||||
int timeout = -1;
|
||||
@ -233,7 +235,8 @@ static int epoll_event_loop(struct epoll_event_context *epoll_ev, struct timeval
|
||||
}
|
||||
|
||||
if (ret == 0 && tvalp) {
|
||||
common_event_loop_timer(epoll_ev->ev);
|
||||
/* we don't care about a possible delay here */
|
||||
common_event_loop_timer_delay(epoll_ev->ev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -305,6 +308,11 @@ static int epoll_event_fd_destructor(struct fd_event *fde)
|
||||
|
||||
epoll_del_event(epoll_ev, fde);
|
||||
|
||||
if (fde->flags & EVENT_FD_AUTOCLOSE) {
|
||||
close(fde->fd);
|
||||
fde->fd = -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -376,10 +384,8 @@ static int epoll_event_loop_once(struct event_context *ev)
|
||||
struct epoll_event_context);
|
||||
struct timeval tval;
|
||||
|
||||
tval = common_event_loop_delay(ev);
|
||||
|
||||
tval = common_event_loop_timer_delay(ev);
|
||||
if (timeval_is_zero(&tval)) {
|
||||
common_event_loop_timer(ev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -117,8 +117,7 @@ bool event_register_backend(const char *name, const struct event_ops *ops);
|
||||
|
||||
struct timed_event *common_event_add_timed(struct event_context *, TALLOC_CTX *,
|
||||
struct timeval, event_timed_handler_t, void *);
|
||||
void common_event_loop_timer(struct event_context *);
|
||||
struct timeval common_event_loop_delay(struct event_context *);
|
||||
struct timeval common_event_loop_timer_delay(struct event_context *);
|
||||
|
||||
struct signal_event *common_event_add_signal(struct event_context *ev,
|
||||
TALLOC_CTX *mem_ctx,
|
||||
|
@ -101,6 +101,11 @@ static int oop_event_fd_destructor(struct fd_event *fde)
|
||||
if (fde->flags & EVENT_FD_WRITE)
|
||||
oop->cancel_fd(oop, fde->fd, OOP_WRITE);
|
||||
|
||||
if (fde->flags & EVENT_FD_AUTOCLOSE) {
|
||||
close(fde->fd);
|
||||
fde->fd = -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -104,6 +104,11 @@ static int select_event_fd_destructor(struct fd_event *fde)
|
||||
DLIST_REMOVE(select_ev->fd_events, fde);
|
||||
select_ev->destruction_count++;
|
||||
|
||||
if (fde->flags & EVENT_FD_AUTOCLOSE) {
|
||||
close(fde->fd);
|
||||
fde->fd = -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -218,7 +223,8 @@ static int select_event_loop_select(struct select_event_context *select_ev, stru
|
||||
}
|
||||
|
||||
if (selrtn == 0 && tvalp) {
|
||||
common_event_loop_timer(select_ev->ev);
|
||||
/* we don't care about a possible delay here */
|
||||
common_event_loop_timer_delay(select_ev->ev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -252,10 +258,8 @@ static int select_event_loop_once(struct event_context *ev)
|
||||
struct select_event_context);
|
||||
struct timeval tval;
|
||||
|
||||
tval = common_event_loop_delay(ev);
|
||||
|
||||
tval = common_event_loop_timer_delay(ev);
|
||||
if (timeval_is_zero(&tval)) {
|
||||
common_event_loop_timer(ev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,7 @@
|
||||
|
||||
#include "includes.h"
|
||||
#include "system/filesys.h"
|
||||
#include "system/select.h" /* needed for WITH_EPOLL */
|
||||
#include "system/select.h" /* needed for HAVE_EVENTS_EPOLL */
|
||||
#include "lib/util/dlinklist.h"
|
||||
#include "lib/events/events.h"
|
||||
#include "lib/events/events_internal.h"
|
||||
@ -61,7 +61,7 @@ struct std_event_context {
|
||||
};
|
||||
|
||||
/* use epoll if it is available */
|
||||
#if WITH_EPOLL
|
||||
#if HAVE_EVENTS_EPOLL
|
||||
/*
|
||||
called when a epoll call fails, and we should fallback
|
||||
to using select
|
||||
@ -229,15 +229,15 @@ static int epoll_event_loop(struct std_event_context *std_ev, struct timeval *tv
|
||||
timeout = ((tvalp->tv_usec+999) / 1000) + (tvalp->tv_sec*1000);
|
||||
}
|
||||
|
||||
if (epoll_ev->ev->num_signal_handlers &&
|
||||
common_event_check_signal(epoll_ev->ev)) {
|
||||
if (std_ev->ev->num_signal_handlers &&
|
||||
common_event_check_signal(std_ev->ev)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = epoll_wait(std_ev->epoll_fd, events, MAXEVENTS, timeout);
|
||||
|
||||
if (ret == -1 && errno == EINTR && epoll_ev->ev->num_signal_handlers) {
|
||||
if (common_event_check_signal(epoll_ev->ev)) {
|
||||
if (ret == -1 && errno == EINTR && std_ev->ev->num_signal_handlers) {
|
||||
if (common_event_check_signal(std_ev->ev)) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -248,7 +248,8 @@ static int epoll_event_loop(struct std_event_context *std_ev, struct timeval *tv
|
||||
}
|
||||
|
||||
if (ret == 0 && tvalp) {
|
||||
common_event_loop_timer(std_ev->ev);
|
||||
/* we don't care about a possible delay here */
|
||||
common_event_loop_timer_delay(std_ev->ev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -352,6 +353,11 @@ static int std_event_fd_destructor(struct fd_event *fde)
|
||||
|
||||
epoll_del_event(std_ev, fde);
|
||||
|
||||
if (fde->flags & EVENT_FD_AUTOCLOSE) {
|
||||
close(fde->fd);
|
||||
fde->fd = -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -471,7 +477,8 @@ static int std_event_loop_select(struct std_event_context *std_ev, struct timeva
|
||||
}
|
||||
|
||||
if (selrtn == 0 && tvalp) {
|
||||
common_event_loop_timer(std_ev->ev);
|
||||
/* we don't care about a possible delay here */
|
||||
common_event_loop_timer_delay(std_ev->ev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -505,10 +512,8 @@ static int std_event_loop_once(struct event_context *ev)
|
||||
struct std_event_context);
|
||||
struct timeval tval;
|
||||
|
||||
tval = common_event_loop_delay(ev);
|
||||
|
||||
tval = common_event_loop_timer_delay(ev);
|
||||
if (timeval_is_zero(&tval)) {
|
||||
common_event_loop_timer(ev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -68,9 +68,7 @@ struct timed_event *common_event_add_timed(struct event_context *ev, TALLOC_CTX
|
||||
last_te = NULL;
|
||||
for (cur_te = ev->timed_events; cur_te; cur_te = cur_te->next) {
|
||||
/* if the new event comes before the current one break */
|
||||
if (!timeval_is_zero(&cur_te->next_event) &&
|
||||
timeval_compare(&te->next_event,
|
||||
&cur_te->next_event) < 0) {
|
||||
if (timeval_compare(&te->next_event, &cur_te->next_event) < 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
@ -85,17 +83,47 @@ struct timed_event *common_event_add_timed(struct event_context *ev, TALLOC_CTX
|
||||
}
|
||||
|
||||
/*
|
||||
a timer has gone off - call it
|
||||
do a single event loop using the events defined in ev
|
||||
|
||||
return the delay untill the next timed event,
|
||||
or zero if a timed event was triggered
|
||||
*/
|
||||
void common_event_loop_timer(struct event_context *ev)
|
||||
struct timeval common_event_loop_timer_delay(struct event_context *ev)
|
||||
{
|
||||
struct timeval t = timeval_current();
|
||||
struct timeval current_time = timeval_zero();
|
||||
struct timed_event *te = ev->timed_events;
|
||||
|
||||
if (te == NULL) {
|
||||
return;
|
||||
if (!te) {
|
||||
/* have a default tick time of 30 seconds. This guarantees
|
||||
that code that uses its own timeout checking will be
|
||||
able to proceeed eventually */
|
||||
return timeval_set(30, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* work out the right timeout for the next timed event
|
||||
*
|
||||
* avoid the syscall to gettimeofday() if the timed event should
|
||||
* be triggered directly
|
||||
*
|
||||
* if there's a delay till the next timed event, we're done
|
||||
* with just returning the delay
|
||||
*/
|
||||
if (!timeval_is_zero(&te->next_event)) {
|
||||
struct timeval delay;
|
||||
|
||||
current_time = timeval_current();
|
||||
|
||||
delay = timeval_until(¤t_time, &te->next_event);
|
||||
if (!timeval_is_zero(&delay)) {
|
||||
return delay;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* ok, we have a timed event that we'll process ...
|
||||
*/
|
||||
|
||||
/* deny the handler to free the event */
|
||||
talloc_set_destructor(te, common_event_timed_deny_destructor);
|
||||
|
||||
@ -104,33 +132,21 @@ void common_event_loop_timer(struct event_context *ev)
|
||||
* handler we don't want to come across this event again -- vl */
|
||||
DLIST_REMOVE(ev->timed_events, te);
|
||||
|
||||
te->handler(ev, te, t, te->private_data);
|
||||
/*
|
||||
* If the timed event was registered for a zero current_time,
|
||||
* then we pass a zero timeval here too! To avoid the
|
||||
* overhead of gettimeofday() calls.
|
||||
*
|
||||
* otherwise we pass the current time
|
||||
*/
|
||||
te->handler(ev, te, current_time, te->private_data);
|
||||
|
||||
/* The destructor isn't necessary anymore, we've already removed the
|
||||
* event from the list. */
|
||||
talloc_set_destructor(te, NULL);
|
||||
|
||||
talloc_free(te);
|
||||
}
|
||||
|
||||
/*
|
||||
do a single event loop using the events defined in ev
|
||||
*/
|
||||
struct timeval common_event_loop_delay(struct event_context *ev)
|
||||
{
|
||||
struct timeval tval;
|
||||
|
||||
/* work out the right timeout for all timed events */
|
||||
if (ev->timed_events) {
|
||||
struct timeval t = timeval_current();
|
||||
tval = timeval_until(&t, &ev->timed_events->next_event);
|
||||
} else {
|
||||
/* have a default tick time of 30 seconds. This guarantees
|
||||
that code that uses its own timeout checking will be
|
||||
able to proceeed eventually */
|
||||
tval = timeval_set(30, 0);
|
||||
}
|
||||
|
||||
return tval;
|
||||
|
||||
return timeval_zero();
|
||||
}
|
||||
|
||||
|
11
ctdb/lib/events/libevents.m4
Normal file
11
ctdb/lib/events/libevents.m4
Normal file
@ -0,0 +1,11 @@
|
||||
EVENTS_OBJ="lib/events/events.o lib/events/events_select.o lib/events/events_signal.o lib/events/events_timed.o lib/events/events_standard.o"
|
||||
|
||||
AC_CHECK_HEADERS(sys/epoll.h)
|
||||
AC_CHECK_FUNCS(epoll_create)
|
||||
|
||||
if test x"$ac_cv_header_sys_epoll_h" = x"yes" -a x"$ac_cv_func_epoll_create" = x"yes"; then
|
||||
EVENTS_OBJ="$EVENTS_OBJ lib/events/events_epoll.o"
|
||||
AC_DEFINE(HAVE_EVENTS_EPOLL, 1, [Whether epoll available])
|
||||
fi
|
||||
|
||||
AC_SUBST(EVENTS_OBJ)
|
@ -26,6 +26,10 @@
|
||||
#include <sys/select.h>
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_SYS_EPOLL_H
|
||||
#include <sys/epoll.h>
|
||||
#endif
|
||||
|
||||
#ifndef SELECT_CAST
|
||||
#define SELECT_CAST
|
||||
#endif
|
||||
|
@ -19,8 +19,8 @@ EXEEXT = @EXEEXT@
|
||||
|
||||
.PHONY: test
|
||||
|
||||
PROGS = bin/tdbtool$(EXEEXT) bin/tdbtorture$(EXEEXT)
|
||||
PROGS_NOINSTALL = bin/tdbtest$(EXEEXT) bin/tdbdump$(EXEEXT) bin/tdbbackup$(EXEEXT)
|
||||
PROGS = bin/tdbtool$(EXEEXT) bin/tdbdump$(EXEEXT) bin/tdbbackup$(EXEEXT)
|
||||
PROGS_NOINSTALL = bin/tdbtest$(EXEEXT) bin/tdbtorture$(EXEEXT)
|
||||
ALL_PROGS = $(PROGS) $(PROGS_NOINSTALL)
|
||||
|
||||
TDB_OBJ = @TDB_OBJ@ @LIBREPLACEOBJ@
|
||||
|
@ -28,6 +28,8 @@
|
||||
|
||||
#include "tdb_private.h"
|
||||
|
||||
#define TDB_MARK_LOCK 0x80000000
|
||||
|
||||
/* a byte range locking function - return 0 on success
|
||||
this functions locks/unlocks 1 byte at the specified offset.
|
||||
|
||||
@ -109,6 +111,9 @@ static int _tdb_lock(struct tdb_context *tdb, int list, int ltype, int op)
|
||||
{
|
||||
struct tdb_lock_type *new_lck;
|
||||
int i;
|
||||
bool mark_lock = ((ltype & TDB_MARK_LOCK) == TDB_MARK_LOCK);
|
||||
|
||||
ltype &= ~TDB_MARK_LOCK;
|
||||
|
||||
/* a global lock allows us to avoid per chain locks */
|
||||
if (tdb->global_lock.count &&
|
||||
@ -158,7 +163,8 @@ static int _tdb_lock(struct tdb_context *tdb, int list, int ltype, int op)
|
||||
|
||||
/* Since fcntl locks don't nest, we do a lock for the first one,
|
||||
and simply bump the count for future ones */
|
||||
if (tdb->methods->tdb_brlock(tdb,FREELIST_TOP+4*list,ltype, op,
|
||||
if (!mark_lock &&
|
||||
tdb->methods->tdb_brlock(tdb,FREELIST_TOP+4*list, ltype, op,
|
||||
0, 1)) {
|
||||
return -1;
|
||||
}
|
||||
@ -200,6 +206,9 @@ int tdb_unlock(struct tdb_context *tdb, int list, int ltype)
|
||||
int ret = -1;
|
||||
int i;
|
||||
struct tdb_lock_type *lck = NULL;
|
||||
bool mark_lock = ((ltype & TDB_MARK_LOCK) == TDB_MARK_LOCK);
|
||||
|
||||
ltype &= ~TDB_MARK_LOCK;
|
||||
|
||||
/* a global lock allows us to avoid per chain locks */
|
||||
if (tdb->global_lock.count &&
|
||||
@ -244,8 +253,12 @@ int tdb_unlock(struct tdb_context *tdb, int list, int ltype)
|
||||
* anyway.
|
||||
*/
|
||||
|
||||
ret = tdb->methods->tdb_brlock(tdb, FREELIST_TOP+4*list, F_UNLCK,
|
||||
F_SETLKW, 0, 1);
|
||||
if (mark_lock) {
|
||||
ret = 0;
|
||||
} else {
|
||||
ret = tdb->methods->tdb_brlock(tdb, FREELIST_TOP+4*list, F_UNLCK,
|
||||
F_SETLKW, 0, 1);
|
||||
}
|
||||
tdb->num_locks--;
|
||||
|
||||
/*
|
||||
@ -376,6 +389,18 @@ int tdb_chainlock_nonblock(struct tdb_context *tdb, TDB_DATA key)
|
||||
return tdb_lock_nonblock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK);
|
||||
}
|
||||
|
||||
/* mark a chain as locked without actually locking it. Warning! use with great caution! */
|
||||
int tdb_chainlock_mark(struct tdb_context *tdb, TDB_DATA key)
|
||||
{
|
||||
return tdb_lock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK | TDB_MARK_LOCK);
|
||||
}
|
||||
|
||||
/* unmark a chain as locked without actually locking it. Warning! use with great caution! */
|
||||
int tdb_chainlock_unmark(struct tdb_context *tdb, TDB_DATA key)
|
||||
{
|
||||
return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK | TDB_MARK_LOCK);
|
||||
}
|
||||
|
||||
int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key)
|
||||
{
|
||||
return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK);
|
||||
|
@ -2,7 +2,7 @@ AC_PREREQ(2.50)
|
||||
AC_DEFUN([SMB_MODULE_DEFAULT], [echo -n ""])
|
||||
AC_DEFUN([SMB_LIBRARY_ENABLE], [echo -n ""])
|
||||
AC_DEFUN([SMB_ENABLE], [echo -n ""])
|
||||
AC_INIT(tdb, 1.1)
|
||||
AC_INIT(tdb, 1.1.0)
|
||||
AC_CONFIG_SRCDIR([common/tdb.c])
|
||||
AC_CONFIG_HEADER(include/config.h)
|
||||
AC_LIBREPLACE_ALL_CHECKS
|
||||
|
@ -140,6 +140,8 @@ int tdb_chainlock_nonblock(struct tdb_context *tdb, TDB_DATA key);
|
||||
int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key);
|
||||
int tdb_chainlock_read(struct tdb_context *tdb, TDB_DATA key);
|
||||
int tdb_chainunlock_read(struct tdb_context *tdb, TDB_DATA key);
|
||||
int tdb_chainlock_mark(struct tdb_context *tdb, TDB_DATA key);
|
||||
int tdb_chainlock_unmark(struct tdb_context *tdb, TDB_DATA key);
|
||||
|
||||
/* Debug functions. Not used in production. */
|
||||
void tdb_dump_all(struct tdb_context *tdb);
|
||||
|
Loading…
x
Reference in New Issue
Block a user