2003-08-13 05:53:07 +04:00
/*
Unix SMB / CIFS implementation .
main select loop and event handling
2005-02-15 13:36:59 +03:00
Copyright ( C ) Andrew Tridgell 2003 - 2005
Copyright ( C ) Stefan Metzmacher 2005
2003-08-13 05:53:07 +04:00
This program is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
2007-07-10 06:07:03 +04:00
the Free Software Foundation ; either version 3 of the License , or
2003-08-13 05:53:07 +04:00
( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
2007-07-10 06:07:03 +04:00
along with this program . If not , see < http : //www.gnu.org/licenses/>.
2003-08-13 05:53:07 +04:00
*/
/*
2005-02-15 13:36:59 +03:00
This is SAMBA ' s default event loop code
2003-08-13 05:53:07 +04:00
2005-02-15 13:36:59 +03:00
- we try to use epoll if configure detected support for it
otherwise we use select ( )
- if epoll is broken on the system or the kernel doesn ' t support it
at runtime we fallback to select ( )
2003-08-13 05:53:07 +04:00
*/
2008-04-25 01:28:30 +04:00
# include "replace.h"
2005-02-10 08:09:35 +03:00
# include "system/filesys.h"
2008-05-14 19:28:42 +04:00
# include "system/network.h"
2007-05-14 04:57:48 +04:00
# include "system/select.h" /* needed for HAVE_EVENTS_EPOLL */
2008-04-25 01:28:30 +04:00
# include "events.h"
2008-06-17 04:21:46 +04:00
# include "events_util.h"
2008-04-25 01:28:30 +04:00
# include "events_internal.h"
2003-08-13 05:53:07 +04:00
2005-02-15 13:36:59 +03:00
struct std_event_context {
2005-12-09 20:30:14 +03:00
/* a pointer back to the generic event_context */
struct event_context * ev ;
2005-02-03 05:35:52 +03:00
/* list of filedescriptor events */
2005-02-15 13:36:59 +03:00
struct fd_event * fd_events ;
2005-02-03 05:35:52 +03:00
/* the maximum file descriptor number in fd_events */
int maxfd ;
/* information for exiting from the event loop */
int exit_code ;
2005-02-03 11:24:08 +03:00
/* this is changed by the destructors for the fd event
type . It is used to detect event destruction by event
handlers , which means the code that is calling the event
handler needs to assume that the linked list is no longer
valid
2005-02-03 05:35:52 +03:00
*/
uint32_t destruction_count ;
2005-02-03 11:24:08 +03:00
/* when using epoll this is the handle from epoll_create */
int epoll_fd ;
2007-05-17 06:18:29 +04:00
/* our pid at the time the epoll_fd was created */
pid_t pid ;
2005-02-03 05:35:52 +03:00
} ;
2006-01-11 18:07:14 +03:00
/* use epoll if it is available */
2007-05-14 04:57:48 +04:00
# if HAVE_EVENTS_EPOLL
2005-02-10 06:16:33 +03:00
/*
2005-12-09 20:30:14 +03:00
called when a epoll call fails , and we should fallback
to using select
2005-02-10 06:16:33 +03:00
*/
2005-12-09 20:30:14 +03:00
static void epoll_fallback_to_select ( struct std_event_context * std_ev , const char * reason )
2005-02-10 06:16:33 +03:00
{
2008-06-14 19:23:31 +04:00
ev_debug ( std_ev - > ev , EV_DEBUG_FATAL ,
" %s (%s) - falling back to select() \n " ,
reason , strerror ( errno ) ) ;
2005-12-09 20:30:14 +03:00
close ( std_ev - > epoll_fd ) ;
std_ev - > epoll_fd = - 1 ;
talloc_set_destructor ( std_ev , NULL ) ;
}
/*
map from EVENT_FD_ * to EPOLLIN / EPOLLOUT
*/
static uint32_t epoll_map_flags ( uint16_t flags )
{
uint32_t ret = 0 ;
2005-12-09 20:49:48 +03:00
if ( flags & EVENT_FD_READ ) ret | = ( EPOLLIN | EPOLLERR | EPOLLHUP ) ;
if ( flags & EVENT_FD_WRITE ) ret | = ( EPOLLOUT | EPOLLERR | EPOLLHUP ) ;
2005-12-09 20:30:14 +03:00
return ret ;
}
/*
free the epoll fd
*/
2006-05-24 11:34:11 +04:00
static int epoll_ctx_destructor ( struct std_event_context * std_ev )
2005-12-09 20:30:14 +03:00
{
2007-05-17 06:18:29 +04:00
if ( std_ev - > epoll_fd ! = - 1 ) {
close ( std_ev - > epoll_fd ) ;
}
2005-12-09 20:30:14 +03:00
std_ev - > epoll_fd = - 1 ;
return 0 ;
}
/*
init the epoll fd
*/
2007-01-05 12:35:49 +03:00
static void epoll_init_ctx ( struct std_event_context * std_ev )
2005-12-09 20:30:14 +03:00
{
std_ev - > epoll_fd = epoll_create ( 64 ) ;
2007-05-17 06:18:29 +04:00
std_ev - > pid = getpid ( ) ;
2005-12-09 20:30:14 +03:00
talloc_set_destructor ( std_ev , epoll_ctx_destructor ) ;
}
2007-05-17 06:18:29 +04:00
static void epoll_add_event ( struct std_event_context * std_ev , struct fd_event * fde ) ;
/*
reopen the epoll handle when our pid changes
see http : //junkcode.samba.org/ftp/unpacked/junkcode/epoll_fork.c for an
demonstration of why this is needed
*/
2007-05-17 11:52:33 +04:00
static void epoll_check_reopen ( struct std_event_context * std_ev )
2007-05-17 06:18:29 +04:00
{
struct fd_event * fde ;
2007-05-17 11:52:33 +04:00
if ( std_ev - > pid = = getpid ( ) ) {
return ;
}
2007-05-17 06:18:29 +04:00
close ( std_ev - > epoll_fd ) ;
std_ev - > epoll_fd = epoll_create ( 64 ) ;
if ( std_ev - > epoll_fd = = - 1 ) {
2008-06-14 19:23:31 +04:00
ev_debug ( std_ev - > ev , EV_DEBUG_FATAL ,
" Failed to recreate epoll handle after fork \n " ) ;
2007-05-17 06:18:29 +04:00
return ;
}
std_ev - > pid = getpid ( ) ;
for ( fde = std_ev - > fd_events ; fde ; fde = fde - > next ) {
epoll_add_event ( std_ev , fde ) ;
}
}
2005-12-09 23:42:09 +03:00
# define EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT (1<<0)
# define EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR (1<<1)
# define EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR (1<<2)
2005-12-09 20:30:14 +03:00
/*
add the epoll event to the given fd_event
*/
static void epoll_add_event ( struct std_event_context * std_ev , struct fd_event * fde )
{
struct epoll_event event ;
if ( std_ev - > epoll_fd = = - 1 ) return ;
2005-12-09 23:42:09 +03:00
fde - > additional_flags & = ~ EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR ;
/* if we don't want events yet, don't add an epoll_event */
if ( fde - > flags = = 0 ) return ;
2005-12-09 20:30:14 +03:00
ZERO_STRUCT ( event ) ;
event . events = epoll_map_flags ( fde - > flags ) ;
event . data . ptr = fde ;
if ( epoll_ctl ( std_ev - > epoll_fd , EPOLL_CTL_ADD , fde - > fd , & event ) ! = 0 ) {
epoll_fallback_to_select ( std_ev , " EPOLL_CTL_ADD failed " ) ;
2005-02-10 06:16:33 +03:00
}
2005-12-09 23:42:09 +03:00
fde - > additional_flags | = EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT ;
/* only if we want to read we want to tell the event handler about errors */
if ( fde - > flags & EVENT_FD_READ ) {
fde - > additional_flags | = EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR ;
}
2005-12-09 20:30:14 +03:00
}
/*
2005-12-09 23:42:09 +03:00
delete the epoll event for given fd_event
2005-12-09 20:30:14 +03:00
*/
2005-12-09 23:42:09 +03:00
static void epoll_del_event ( struct std_event_context * std_ev , struct fd_event * fde )
2005-12-09 20:30:14 +03:00
{
struct epoll_event event ;
if ( std_ev - > epoll_fd = = - 1 ) return ;
2005-12-09 23:42:09 +03:00
fde - > additional_flags & = ~ EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR ;
/* if there's no epoll_event, we don't need to delete it */
if ( ! ( fde - > additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT ) ) return ;
2005-12-09 20:30:14 +03:00
ZERO_STRUCT ( event ) ;
event . events = epoll_map_flags ( fde - > flags ) ;
event . data . ptr = fde ;
epoll_ctl ( std_ev - > epoll_fd , EPOLL_CTL_DEL , fde - > fd , & event ) ;
2005-12-09 23:42:09 +03:00
fde - > additional_flags & = ~ EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT ;
2005-12-09 20:30:14 +03:00
}
/*
change the epoll event to the given fd_event
*/
2005-12-09 23:42:09 +03:00
static void epoll_mod_event ( struct std_event_context * std_ev , struct fd_event * fde )
2005-12-09 20:30:14 +03:00
{
struct epoll_event event ;
if ( std_ev - > epoll_fd = = - 1 ) return ;
2005-12-09 23:42:09 +03:00
fde - > additional_flags & = ~ EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR ;
2005-12-09 20:30:14 +03:00
ZERO_STRUCT ( event ) ;
2005-12-09 23:42:09 +03:00
event . events = epoll_map_flags ( fde - > flags ) ;
2005-12-09 20:30:14 +03:00
event . data . ptr = fde ;
if ( epoll_ctl ( std_ev - > epoll_fd , EPOLL_CTL_MOD , fde - > fd , & event ) ! = 0 ) {
epoll_fallback_to_select ( std_ev , " EPOLL_CTL_MOD failed " ) ;
}
2005-12-09 23:42:09 +03:00
/* only if we want to read we want to tell the event handler about errors */
if ( fde - > flags & EVENT_FD_READ ) {
fde - > additional_flags | = EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR ;
}
}
static void epoll_change_event ( struct std_event_context * std_ev , struct fd_event * fde )
{
2007-05-02 01:29:42 +04:00
bool got_error = ( fde - > additional_flags & EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR ) ;
bool want_read = ( fde - > flags & EVENT_FD_READ ) ;
bool want_write = ( fde - > flags & EVENT_FD_WRITE ) ;
2005-12-09 23:42:09 +03:00
if ( std_ev - > epoll_fd = = - 1 ) return ;
fde - > additional_flags & = ~ EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR ;
/* there's already an event */
if ( fde - > additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT ) {
if ( want_read | | ( want_write & & ! got_error ) ) {
epoll_mod_event ( std_ev , fde ) ;
return ;
}
/*
* if we want to match the select behavior , we need to remove the epoll_event
* when the caller isn ' t interested in events .
*
* this is because epoll reports EPOLLERR and EPOLLHUP , even without asking for them
*/
epoll_del_event ( std_ev , fde ) ;
return ;
}
/* there's no epoll_event attached to the fde */
if ( want_read | | ( want_write & & ! got_error ) ) {
epoll_add_event ( std_ev , fde ) ;
return ;
}
2005-12-09 20:30:14 +03:00
}
/*
event loop handling using epoll
*/
static int epoll_event_loop ( struct std_event_context * std_ev , struct timeval * tvalp )
{
int ret , i ;
# define MAXEVENTS 8
struct epoll_event events [ MAXEVENTS ] ;
2007-02-06 07:43:48 +03:00
uint32_t destruction_count = + + std_ev - > destruction_count ;
2005-12-09 20:30:14 +03:00
int timeout = - 1 ;
if ( std_ev - > epoll_fd = = - 1 ) return - 1 ;
if ( tvalp ) {
/* it's better to trigger timed events a bit later than to early */
timeout = ( ( tvalp - > tv_usec + 999 ) / 1000 ) + ( tvalp - > tv_sec * 1000 ) ;
}
2007-05-14 04:57:48 +04:00
if ( std_ev - > ev - > num_signal_handlers & &
common_event_check_signal ( std_ev - > ev ) ) {
2007-01-21 11:23:14 +03:00
return 0 ;
}
2005-12-09 20:30:14 +03:00
ret = epoll_wait ( std_ev - > epoll_fd , events , MAXEVENTS , timeout ) ;
2007-05-14 04:57:48 +04:00
if ( ret = = - 1 & & errno = = EINTR & & std_ev - > ev - > num_signal_handlers ) {
if ( common_event_check_signal ( std_ev - > ev ) ) {
2007-01-21 11:23:14 +03:00
return 0 ;
}
}
2005-12-09 20:30:14 +03:00
if ( ret = = - 1 & & errno ! = EINTR ) {
epoll_fallback_to_select ( std_ev , " epoll_wait() failed " ) ;
return - 1 ;
}
if ( ret = = 0 & & tvalp ) {
r22661: optimize the handling of directly triggered timed events:
- if someone adds a timed_event with a zero timeval
we now avoid serval gettimeofday() calls and the
event handler doesn't get the current time when it's
called, instead we also pass a zero timeval
- this also makes sure multiple timed events with a zero timeval
are processed in the order there're added.
the little benchmark shows that processing 2000000 directly timed events
is now much faster, while avoiding syscalls at all!
> time ./evtest (with the old code)
real 0m6.388s
user 0m1.740s
sys 0m4.632s
> time ./evtest (with the new code)
real 0m1.498s
user 0m1.496s
sys 0m0.004s
metze@SERNOX:~/devel/samba/4.0/samba4-ci/source> cat evtest.c
#include <stdio.h>
#include <stdint.h>
#include <sys/time.h>
#include <talloc.h>
#include <events.h>
static void dummy_fde_handler(struct event_context *ev_ctx, struct fd_event *fde,
uint16_t flags, void *private_data)
{
}
static void timeout_handler(struct event_context *ev, struct timed_event *te,
struct timeval tval, void *private_data)
{
uint32_t *countp = (uint32_t *)private_data;
(*countp)++;
if (*countp > 2000000) exit(0);
event_add_timed(ev, ev, tval, timeout_handler, countp);
}
int main(void)
{
struct event_context *ev;
struct timeval tval = { 0, 0 };
uint32_t count = 0;
ev = event_context_init(NULL);
event_add_fd(ev, ev, 0, 0, dummy_fde_handler, NULL);
event_add_timed(ev, ev, tval, timeout_handler, &count);
return event_loop_wait(ev);
}
(This used to be commit 4db64b4ce2320b88d648078cbf86385f6fb44f1f)
2007-05-04 13:22:52 +04:00
/* we don't care about a possible delay here */
common_event_loop_timer_delay ( std_ev - > ev ) ;
2005-12-09 20:30:14 +03:00
return 0 ;
}
for ( i = 0 ; i < ret ; i + + ) {
struct fd_event * fde = talloc_get_type ( events [ i ] . data . ptr ,
struct fd_event ) ;
uint16_t flags = 0 ;
if ( fde = = NULL ) {
epoll_fallback_to_select ( std_ev , " epoll_wait() gave bad data " ) ;
return - 1 ;
}
2005-12-09 23:42:09 +03:00
if ( events [ i ] . events & ( EPOLLHUP | EPOLLERR ) ) {
fde - > additional_flags | = EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR ;
/*
* if we only wait for EVENT_FD_WRITE , we should not tell the
* event handler about it , and remove the epoll_event ,
* as we only report errors when waiting for read events ,
* to match the select ( ) behavior
*/
if ( ! ( fde - > additional_flags & EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR ) ) {
epoll_del_event ( std_ev , fde ) ;
continue ;
}
flags | = EVENT_FD_READ ;
}
2005-12-09 20:30:14 +03:00
if ( events [ i ] . events & EPOLLIN ) flags | = EVENT_FD_READ ;
if ( events [ i ] . events & EPOLLOUT ) flags | = EVENT_FD_WRITE ;
if ( flags ) {
fde - > handler ( std_ev - > ev , fde , flags , fde - > private_data ) ;
if ( destruction_count ! = std_ev - > destruction_count ) {
break ;
}
}
}
2005-02-10 06:16:33 +03:00
return 0 ;
}
2005-12-09 20:30:14 +03:00
# else
2007-01-05 12:35:49 +03:00
# define epoll_init_ctx(std_ev)
2005-12-09 20:30:14 +03:00
# define epoll_add_event(std_ev,fde)
2005-12-09 23:42:09 +03:00
# define epoll_del_event(std_ev,fde)
# define epoll_change_event(std_ev,fde)
2005-12-09 20:30:14 +03:00
# define epoll_event_loop(std_ev,tvalp) (-1)
2007-05-17 13:48:17 +04:00
# define epoll_check_reopen(std_ev)
2005-12-09 20:30:14 +03:00
# endif
2005-02-10 06:16:33 +03:00
2003-08-13 05:53:07 +04:00
/*
2005-02-15 13:36:59 +03:00
create a std_event_context structure .
2003-08-13 05:53:07 +04:00
*/
2007-01-05 12:35:49 +03:00
static int std_event_context_init ( struct event_context * ev )
2003-08-13 05:53:07 +04:00
{
2005-02-15 13:36:59 +03:00
struct std_event_context * std_ev ;
2003-08-13 05:53:07 +04:00
2005-02-15 13:36:59 +03:00
std_ev = talloc_zero ( ev , struct std_event_context ) ;
if ( ! std_ev ) return - 1 ;
2005-12-09 20:30:14 +03:00
std_ev - > ev = ev ;
std_ev - > epoll_fd = - 1 ;
2003-08-13 05:53:07 +04:00
2007-01-05 12:35:49 +03:00
epoll_init_ctx ( std_ev ) ;
2005-02-03 11:24:08 +03:00
2005-02-15 13:36:59 +03:00
ev - > additional_data = std_ev ;
return 0 ;
}
2005-02-03 11:24:08 +03:00
2003-08-13 05:53:07 +04:00
/*
recalculate the maxfd
*/
2005-12-09 20:30:14 +03:00
static void calc_maxfd ( struct std_event_context * std_ev )
2003-08-13 05:53:07 +04:00
{
2005-02-15 13:36:59 +03:00
struct fd_event * fde ;
std_ev - > maxfd = 0 ;
for ( fde = std_ev - > fd_events ; fde ; fde = fde - > next ) {
if ( fde - > fd > std_ev - > maxfd ) {
std_ev - > maxfd = fde - > fd ;
2003-08-13 05:53:07 +04:00
}
}
}
2005-02-03 11:24:08 +03:00
2005-01-23 14:49:15 +03:00
/* to mark the ev->maxfd invalid
* this means we need to recalculate it
*/
# define EVENT_INVALID_MAXFD (-1)
2005-02-03 11:24:08 +03:00
/*
destroy an fd_event
*/
2006-05-24 11:34:11 +04:00
static int std_event_fd_destructor ( struct fd_event * fde )
2005-01-23 14:49:15 +03:00
{
2005-02-03 11:24:08 +03:00
struct event_context * ev = fde - > event_ctx ;
2005-02-15 13:36:59 +03:00
struct std_event_context * std_ev = talloc_get_type ( ev - > additional_data ,
struct std_event_context ) ;
2005-02-03 11:24:08 +03:00
2007-06-02 04:32:49 +04:00
epoll_check_reopen ( std_ev ) ;
2005-02-15 13:36:59 +03:00
if ( std_ev - > maxfd = = fde - > fd ) {
std_ev - > maxfd = EVENT_INVALID_MAXFD ;
2005-01-23 14:49:15 +03:00
}
2005-12-09 20:30:14 +03:00
2005-02-15 13:36:59 +03:00
DLIST_REMOVE ( std_ev - > fd_events , fde ) ;
std_ev - > destruction_count + + ;
2005-12-09 20:30:14 +03:00
2005-12-09 23:42:09 +03:00
epoll_del_event ( std_ev , fde ) ;
2005-12-09 20:30:14 +03:00
2007-05-14 04:57:48 +04:00
if ( fde - > flags & EVENT_FD_AUTOCLOSE ) {
close ( fde - > fd ) ;
fde - > fd = - 1 ;
}
2005-01-23 14:49:15 +03:00
return 0 ;
}
2004-07-23 10:40:49 +04:00
/*
add a fd based event
return NULL on failure ( memory allocation error )
*/
2005-02-15 13:36:59 +03:00
static struct fd_event * std_event_add_fd ( struct event_context * ev , TALLOC_CTX * mem_ctx ,
int fd , uint16_t flags ,
event_fd_handler_t handler ,
void * private_data )
2004-07-23 10:40:49 +04:00
{
2005-02-15 13:36:59 +03:00
struct std_event_context * std_ev = talloc_get_type ( ev - > additional_data ,
struct std_event_context ) ;
struct fd_event * fde ;
2005-02-03 05:35:52 +03:00
2007-05-17 11:52:33 +04:00
epoll_check_reopen ( std_ev ) ;
2005-02-15 13:36:59 +03:00
fde = talloc ( mem_ctx ? mem_ctx : ev , struct fd_event ) ;
if ( ! fde ) return NULL ;
2005-02-03 05:35:52 +03:00
2005-02-15 13:36:59 +03:00
fde - > event_ctx = ev ;
fde - > fd = fd ;
fde - > flags = flags ;
fde - > handler = handler ;
fde - > private_data = private_data ;
2005-12-09 19:43:19 +03:00
fde - > additional_flags = 0 ;
2005-02-15 13:36:59 +03:00
fde - > additional_data = NULL ;
2005-02-03 05:35:52 +03:00
2005-02-15 13:36:59 +03:00
DLIST_ADD ( std_ev - > fd_events , fde ) ;
2007-05-02 00:57:54 +04:00
if ( ( std_ev - > maxfd ! = EVENT_INVALID_MAXFD )
& & ( fde - > fd > std_ev - > maxfd ) ) {
2005-02-15 13:36:59 +03:00
std_ev - > maxfd = fde - > fd ;
2005-01-23 15:17:45 +03:00
}
2005-02-15 13:36:59 +03:00
talloc_set_destructor ( fde , std_event_fd_destructor ) ;
2005-12-09 20:30:14 +03:00
epoll_add_event ( std_ev , fde ) ;
2005-02-03 11:24:08 +03:00
2005-02-15 13:36:59 +03:00
return fde ;
2004-07-23 10:40:49 +04:00
}
2005-02-03 05:35:52 +03:00
/*
return the fd event flags
*/
2005-02-15 13:36:59 +03:00
static uint16_t std_event_get_fd_flags ( struct fd_event * fde )
2005-01-23 14:49:15 +03:00
{
2005-12-08 11:31:59 +03:00
return fde - > flags ;
2005-01-23 14:49:15 +03:00
}
2003-08-13 05:53:07 +04:00
/*
2005-02-03 05:35:52 +03:00
set the fd event flags
2003-08-13 05:53:07 +04:00
*/
2005-02-15 13:36:59 +03:00
static void std_event_set_fd_flags ( struct fd_event * fde , uint16_t flags )
2003-08-13 05:53:07 +04:00
{
2005-02-03 11:24:08 +03:00
struct event_context * ev ;
2005-02-15 13:36:59 +03:00
struct std_event_context * std_ev ;
2005-12-09 20:30:14 +03:00
if ( fde - > flags = = flags ) return ;
2005-02-03 11:24:08 +03:00
ev = fde - > event_ctx ;
2005-02-15 13:36:59 +03:00
std_ev = talloc_get_type ( ev - > additional_data , struct std_event_context ) ;
2005-12-09 20:30:14 +03:00
2005-12-08 11:31:59 +03:00
fde - > flags = flags ;
2005-12-09 23:42:09 +03:00
2007-05-17 11:52:33 +04:00
epoll_check_reopen ( std_ev ) ;
2005-12-09 23:42:09 +03:00
epoll_change_event ( std_ev , fde ) ;
2003-08-13 05:53:07 +04:00
}
2005-02-03 11:24:08 +03:00
/*
event loop handling using select ( )
*/
2005-12-09 20:30:14 +03:00
static int std_event_loop_select ( struct std_event_context * std_ev , struct timeval * tvalp )
2003-08-13 05:53:07 +04:00
{
2004-07-23 10:40:49 +04:00
fd_set r_fds , w_fds ;
2005-02-15 13:36:59 +03:00
struct fd_event * fde ;
2004-07-23 10:40:49 +04:00
int selrtn ;
2007-02-06 07:43:48 +03:00
uint32_t destruction_count = + + std_ev - > destruction_count ;
2003-08-13 05:53:07 +04:00
2005-02-03 11:24:08 +03:00
/* we maybe need to recalculate the maxfd */
2005-02-15 13:36:59 +03:00
if ( std_ev - > maxfd = = EVENT_INVALID_MAXFD ) {
2005-12-09 20:30:14 +03:00
calc_maxfd ( std_ev ) ;
2005-02-03 11:24:08 +03:00
}
2005-02-15 13:36:59 +03:00
2004-07-23 10:40:49 +04:00
FD_ZERO ( & r_fds ) ;
FD_ZERO ( & w_fds ) ;
/* setup any fd events */
2005-02-15 13:36:59 +03:00
for ( fde = std_ev - > fd_events ; fde ; fde = fde - > next ) {
if ( fde - > flags & EVENT_FD_READ ) {
FD_SET ( fde - > fd , & r_fds ) ;
2005-01-23 14:49:15 +03:00
}
2005-02-15 13:36:59 +03:00
if ( fde - > flags & EVENT_FD_WRITE ) {
FD_SET ( fde - > fd , & w_fds ) ;
2003-08-13 05:53:07 +04:00
}
2004-07-23 10:40:49 +04:00
}
2003-08-13 05:53:07 +04:00
2007-01-21 11:23:14 +03:00
if ( std_ev - > ev - > num_signal_handlers & &
common_event_check_signal ( std_ev - > ev ) ) {
return 0 ;
}
2005-02-15 13:36:59 +03:00
selrtn = select ( std_ev - > maxfd + 1 , & r_fds , & w_fds , NULL , tvalp ) ;
2007-01-21 11:23:14 +03:00
if ( selrtn = = - 1 & & errno = = EINTR & &
std_ev - > ev - > num_signal_handlers ) {
common_event_check_signal ( std_ev - > ev ) ;
return 0 ;
}
2005-02-03 07:02:48 +03:00
if ( selrtn = = - 1 & & errno = = EBADF ) {
/* the socket is dead! this should never
happen as the socket should have first been
made readable and that should have removed
the event , so this must be a bug . This is a
fatal error . */
2008-06-14 19:23:31 +04:00
ev_debug ( std_ev - > ev , EV_DEBUG_FATAL ,
" ERROR: EBADF on std_event_loop_once \n " ) ;
2005-02-15 13:36:59 +03:00
std_ev - > exit_code = EBADF ;
2005-02-03 07:02:48 +03:00
return - 1 ;
}
2005-02-03 11:24:08 +03:00
if ( selrtn = = 0 & & tvalp ) {
r22661: optimize the handling of directly triggered timed events:
- if someone adds a timed_event with a zero timeval
we now avoid serval gettimeofday() calls and the
event handler doesn't get the current time when it's
called, instead we also pass a zero timeval
- this also makes sure multiple timed events with a zero timeval
are processed in the order there're added.
the little benchmark shows that processing 2000000 directly timed events
is now much faster, while avoiding syscalls at all!
> time ./evtest (with the old code)
real 0m6.388s
user 0m1.740s
sys 0m4.632s
> time ./evtest (with the new code)
real 0m1.498s
user 0m1.496s
sys 0m0.004s
metze@SERNOX:~/devel/samba/4.0/samba4-ci/source> cat evtest.c
#include <stdio.h>
#include <stdint.h>
#include <sys/time.h>
#include <talloc.h>
#include <events.h>
static void dummy_fde_handler(struct event_context *ev_ctx, struct fd_event *fde,
uint16_t flags, void *private_data)
{
}
static void timeout_handler(struct event_context *ev, struct timed_event *te,
struct timeval tval, void *private_data)
{
uint32_t *countp = (uint32_t *)private_data;
(*countp)++;
if (*countp > 2000000) exit(0);
event_add_timed(ev, ev, tval, timeout_handler, countp);
}
int main(void)
{
struct event_context *ev;
struct timeval tval = { 0, 0 };
uint32_t count = 0;
ev = event_context_init(NULL);
event_add_fd(ev, ev, 0, 0, dummy_fde_handler, NULL);
event_add_timed(ev, ev, tval, timeout_handler, &count);
return event_loop_wait(ev);
}
(This used to be commit 4db64b4ce2320b88d648078cbf86385f6fb44f1f)
2007-05-04 13:22:52 +04:00
/* we don't care about a possible delay here */
common_event_loop_timer_delay ( std_ev - > ev ) ;
2005-02-03 11:24:08 +03:00
return 0 ;
2005-02-03 07:02:48 +03:00
}
2005-02-03 11:24:08 +03:00
2005-02-03 07:02:48 +03:00
if ( selrtn > 0 ) {
/* at least one file descriptor is ready - check
which ones and call the handler , being careful to allow
the handler to remove itself when called */
2005-02-15 13:36:59 +03:00
for ( fde = std_ev - > fd_events ; fde ; fde = fde - > next ) {
2005-02-03 07:02:48 +03:00
uint16_t flags = 0 ;
2005-02-15 13:36:59 +03:00
if ( FD_ISSET ( fde - > fd , & r_fds ) ) flags | = EVENT_FD_READ ;
if ( FD_ISSET ( fde - > fd , & w_fds ) ) flags | = EVENT_FD_WRITE ;
2005-02-03 07:02:48 +03:00
if ( flags ) {
2005-12-09 20:30:14 +03:00
fde - > handler ( std_ev - > ev , fde , flags , fde - > private_data ) ;
2005-02-15 13:36:59 +03:00
if ( destruction_count ! = std_ev - > destruction_count ) {
2005-02-03 07:02:48 +03:00
break ;
2003-08-13 05:53:07 +04:00
}
}
}
2004-07-23 10:40:49 +04:00
}
2003-08-13 05:53:07 +04:00
2004-09-11 19:05:49 +04:00
return 0 ;
2005-02-03 11:24:08 +03:00
}
/*
2005-02-03 14:25:52 +03:00
do a single event loop using the events defined in ev
2005-02-03 11:24:08 +03:00
*/
2005-02-15 13:36:59 +03:00
static int std_event_loop_once ( struct event_context * ev )
2005-02-03 11:24:08 +03:00
{
2005-02-15 13:36:59 +03:00
struct std_event_context * std_ev = talloc_get_type ( ev - > additional_data ,
struct std_event_context ) ;
2005-07-13 07:55:22 +04:00
struct timeval tval ;
2005-02-15 13:36:59 +03:00
r22661: optimize the handling of directly triggered timed events:
- if someone adds a timed_event with a zero timeval
we now avoid serval gettimeofday() calls and the
event handler doesn't get the current time when it's
called, instead we also pass a zero timeval
- this also makes sure multiple timed events with a zero timeval
are processed in the order there're added.
the little benchmark shows that processing 2000000 directly timed events
is now much faster, while avoiding syscalls at all!
> time ./evtest (with the old code)
real 0m6.388s
user 0m1.740s
sys 0m4.632s
> time ./evtest (with the new code)
real 0m1.498s
user 0m1.496s
sys 0m0.004s
metze@SERNOX:~/devel/samba/4.0/samba4-ci/source> cat evtest.c
#include <stdio.h>
#include <stdint.h>
#include <sys/time.h>
#include <talloc.h>
#include <events.h>
static void dummy_fde_handler(struct event_context *ev_ctx, struct fd_event *fde,
uint16_t flags, void *private_data)
{
}
static void timeout_handler(struct event_context *ev, struct timed_event *te,
struct timeval tval, void *private_data)
{
uint32_t *countp = (uint32_t *)private_data;
(*countp)++;
if (*countp > 2000000) exit(0);
event_add_timed(ev, ev, tval, timeout_handler, countp);
}
int main(void)
{
struct event_context *ev;
struct timeval tval = { 0, 0 };
uint32_t count = 0;
ev = event_context_init(NULL);
event_add_fd(ev, ev, 0, 0, dummy_fde_handler, NULL);
event_add_timed(ev, ev, tval, timeout_handler, &count);
return event_loop_wait(ev);
}
(This used to be commit 4db64b4ce2320b88d648078cbf86385f6fb44f1f)
2007-05-04 13:22:52 +04:00
tval = common_event_loop_timer_delay ( ev ) ;
2008-06-14 19:23:31 +04:00
if ( ev_timeval_is_zero ( & tval ) ) {
2007-01-05 12:35:49 +03:00
return 0 ;
2005-02-03 11:24:08 +03:00
}
2007-05-17 11:52:33 +04:00
epoll_check_reopen ( std_ev ) ;
2005-12-09 20:30:14 +03:00
if ( epoll_event_loop ( std_ev , & tval ) = = 0 ) {
return 0 ;
2005-02-03 11:24:08 +03:00
}
2005-12-09 20:30:14 +03:00
return std_event_loop_select ( std_ev , & tval ) ;
2004-07-23 10:40:49 +04:00
}
/*
2005-02-15 13:36:59 +03:00
return on failure or ( with 0 ) if all fd events are removed
2004-07-23 10:40:49 +04:00
*/
2005-02-15 13:36:59 +03:00
static int std_event_loop_wait ( struct event_context * ev )
2004-07-23 10:40:49 +04:00
{
2005-02-15 13:36:59 +03:00
struct std_event_context * std_ev = talloc_get_type ( ev - > additional_data ,
struct std_event_context ) ;
std_ev - > exit_code = 0 ;
2004-07-23 10:40:49 +04:00
2005-02-15 13:36:59 +03:00
while ( std_ev - > fd_events & & std_ev - > exit_code = = 0 ) {
if ( std_event_loop_once ( ev ) ! = 0 ) {
2004-09-11 19:05:49 +04:00
break ;
}
2003-08-13 05:53:07 +04:00
}
2005-02-15 13:36:59 +03:00
return std_ev - > exit_code ;
}
static const struct event_ops std_event_ops = {
. context_init = std_event_context_init ,
. add_fd = std_event_add_fd ,
. get_fd_flags = std_event_get_fd_flags ,
. set_fd_flags = std_event_set_fd_flags ,
2007-01-05 12:35:49 +03:00
. add_timed = common_event_add_timed ,
2007-01-21 11:23:14 +03:00
. add_signal = common_event_add_signal ,
2005-02-15 13:36:59 +03:00
. loop_once = std_event_loop_once ,
. loop_wait = std_event_loop_wait ,
} ;
2007-01-05 12:35:49 +03:00
2007-05-02 01:29:42 +04:00
bool events_standard_init ( void )
2005-02-15 13:36:59 +03:00
{
2007-01-05 12:35:49 +03:00
return event_register_backend ( " standard " , & std_event_ops ) ;
2003-08-13 05:53:07 +04:00
}
2007-05-02 01:29:42 +04:00