2000-09-11 11:02:43 +04:00
/*
2002-01-30 09:08:46 +03:00
Unix SMB / CIFS implementation .
2000-09-11 11:02:43 +04:00
Samba internal messaging functions
Copyright ( C ) Andrew Tridgell 2000
2001-12-21 03:37:49 +03:00
Copyright ( C ) 2001 by Martin Pool
2003-01-10 23:17:02 +03:00
Copyright ( C ) 2002 by Jeremy Allison
2007-05-21 00:11:23 +04:00
Copyright ( C ) 2007 by Volker Lendecke
2000-09-11 11:02:43 +04:00
This program is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
2007-07-09 23:25:36 +04:00
the Free Software Foundation ; either version 3 of the License , or
2000-09-11 11:02:43 +04:00
( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
2007-07-10 04:52:41 +04:00
along with this program . If not , see < http : //www.gnu.org/licenses/>.
2000-09-11 11:02:43 +04:00
*/
2001-12-19 10:49:25 +03:00
/**
2003-03-18 01:42:56 +03:00
@ defgroup messages Internal messaging framework
@ {
@ file messages . c
@ brief Module for internal messaging between Samba daemons .
2000-09-12 04:47:11 +04:00
The idea is that if a part of Samba wants to do communication with
another Samba process then it will do a message_register ( ) of a
dispatch function , and use message_send_pid ( ) to send messages to
that process .
2001-12-19 10:49:25 +03:00
The dispatch function is given the pid of the sender , and it can
use that to reply by message_send_pid ( ) . See ping_message ( ) for a
simple example .
2003-03-18 01:42:56 +03:00
@ caution Dispatch functions must be able to cope with incoming
2003-01-10 23:17:02 +03:00
messages on an * odd * byte boundary .
2000-09-12 04:47:11 +04:00
This system doesn ' t have any inherent size limitations but is not
very efficient for large messages or when messages are sent in very
quick succession .
*/
2000-09-11 11:02:43 +04:00
# include "includes.h"
2010-08-18 20:59:23 +04:00
# include "dbwrap.h"
2011-02-25 01:05:57 +03:00
# include "serverid.h"
2011-03-24 17:31:06 +03:00
# include "messages.h"
2000-09-11 11:02:43 +04:00
2007-05-22 02:17:13 +04:00
struct messaging_callback {
struct messaging_callback * prev , * next ;
uint32 msg_type ;
void ( * fn ) ( struct messaging_context * msg , void * private_data ,
uint32_t msg_type ,
struct server_id server_id , DATA_BLOB * data ) ;
r21064: The core of this patch is
void message_register(int msg_type,
void (*fn)(int msg_type, struct process_id pid,
- void *buf, size_t len))
+ void *buf, size_t len,
+ void *private_data),
+ void *private_data)
{
struct dispatch_fns *dfn;
So this adds a (so far unused) private pointer that is passed from
message_register to the message handler. A prerequisite to implement a tiny
samba4-API compatible wrapper around our messaging system. That itself is
necessary for the Samba4 notify system.
Yes, I know, I could import the whole Samba4 messaging system, but I want to
do it step by step and I think getting notify in is more important in this
step.
Volker
(This used to be commit c8ae60ed65dcce9660ee39c75488f2838cf9a28b)
2007-01-31 01:22:06 +03:00
void * private_data ;
2007-05-22 02:17:13 +04:00
} ;
2006-04-08 21:25:31 +04:00
2000-09-12 10:57:25 +04:00
/****************************************************************************
2001-11-21 01:55:46 +03:00
A useful function for testing the message system .
2000-09-12 10:57:25 +04:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2001-11-21 01:55:46 +03:00
2007-05-22 02:17:13 +04:00
static void ping_message ( struct messaging_context * msg_ctx ,
void * private_data ,
uint32_t msg_type ,
struct server_id src ,
DATA_BLOB * data )
2000-09-12 10:57:25 +04:00
{
2007-05-22 02:17:13 +04:00
const char * msg = data - > data ? ( const char * ) data - > data : " none " ;
2006-06-20 05:20:38 +04:00
2005-09-30 21:13:37 +04:00
DEBUG ( 1 , ( " INFO: Received PING message from PID %s [%s] \n " ,
procid_str_static ( & src ) , msg ) ) ;
2007-05-22 02:17:13 +04:00
messaging_send ( msg_ctx , src , MSG_PONG , data ) ;
2000-09-12 10:57:25 +04:00
}
2001-11-21 01:55:46 +03:00
2000-09-12 04:47:11 +04:00
/****************************************************************************
2006-12-14 04:00:16 +03:00
Register / replace a dispatch function for a particular message type .
JRA changed Dec 13 2006. Only one message handler now permitted per type .
2003-01-10 23:17:02 +03:00
* NOTE * : Dispatch functions must be able to cope with incoming
messages on an * odd * byte boundary .
2000-09-12 04:47:11 +04:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2001-11-21 01:55:46 +03:00
2001-01-23 23:25:25 +03:00
struct msg_all {
2007-05-22 02:17:13 +04:00
struct messaging_context * msg_ctx ;
2000-09-13 11:07:17 +04:00
int msg_type ;
2002-09-25 19:19:00 +04:00
uint32 msg_flag ;
2001-12-21 03:37:49 +03:00
const void * buf ;
2000-09-13 11:07:17 +04:00
size_t len ;
2002-09-25 19:19:00 +04:00
int n_sent ;
2001-01-23 23:25:25 +03:00
} ;
2000-09-13 11:07:17 +04:00
/****************************************************************************
2001-11-21 01:55:46 +03:00
Send one of the messages for the broadcast .
2000-09-13 11:07:17 +04:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2001-11-21 01:55:46 +03:00
s3: Fix a long-standing problem with recycled PIDs
When a samba server process dies hard, it has no chance to clean up its entries
in locking.tdb, brlock.tdb, connections.tdb and sessionid.tdb.
For locking.tdb and brlock.tdb Samba is robust by checking every time we read
an entry from the database if the corresponding process still exists. If it
does not exist anymore, the entry is deleted. This is not 100% failsafe though:
On systems with a limited PID space there is a non-zero chance that between the
smbd's death and the fresh access, the PID is recycled by another long-running
process. This renders all files that had been locked by the killed smbd
potentially unusable until the new process also dies.
This patch is supposed to fix the problem the following way: Every process ID
in every database is augmented by a random 64-bit number that is stored in a
serverid.tdb. Whenever we need to check if a process still exists we know its
PID and the 64-bit number. We look up the PID in serverid.tdb and compare the
64-bit number. If it's the same, the process still is a valid smbd holding the
lock. If it is different, a new smbd has taken over.
I believe this is safe against an smbd that has died hard and the PID has been
taken over by a non-samba process. This process would not have registered
itself with a fresh 64-bit number in serverid.tdb, so the old one still exists
in serverid.tdb. We protect against this case by the parent smbd taking care of
deregistering PIDs from serverid.tdb and the fact that serverid.tdb is
CLEAR_IF_FIRST.
CLEAR_IF_FIRST does not work in a cluster, so the automatic cleanup does not
work when all smbds are restarted. For this, "net serverid wipe" has to be run
before smbd starts up. As a convenience, "net serverid wipedbs" also cleans up
sessionid.tdb and connections.tdb.
While there, this also cleans up overloading connections.tdb with all the
process entries just for messaging_send_all().
Volker
2010-03-02 19:02:01 +03:00
static int traverse_fn ( struct db_record * rec , const struct server_id * id ,
uint32_t msg_flags , void * state )
2000-09-13 11:07:17 +04:00
{
2007-05-28 15:38:42 +04:00
struct msg_all * msg_all = ( struct msg_all * ) state ;
2007-02-01 22:29:07 +03:00
NTSTATUS status ;
2000-09-13 11:07:17 +04:00
2002-09-25 19:19:00 +04:00
/* Don't send if the receiver hasn't registered an interest. */
s3: Fix a long-standing problem with recycled PIDs
When a samba server process dies hard, it has no chance to clean up its entries
in locking.tdb, brlock.tdb, connections.tdb and sessionid.tdb.
For locking.tdb and brlock.tdb Samba is robust by checking every time we read
an entry from the database if the corresponding process still exists. If it
does not exist anymore, the entry is deleted. This is not 100% failsafe though:
On systems with a limited PID space there is a non-zero chance that between the
smbd's death and the fresh access, the PID is recycled by another long-running
process. This renders all files that had been locked by the killed smbd
potentially unusable until the new process also dies.
This patch is supposed to fix the problem the following way: Every process ID
in every database is augmented by a random 64-bit number that is stored in a
serverid.tdb. Whenever we need to check if a process still exists we know its
PID and the 64-bit number. We look up the PID in serverid.tdb and compare the
64-bit number. If it's the same, the process still is a valid smbd holding the
lock. If it is different, a new smbd has taken over.
I believe this is safe against an smbd that has died hard and the PID has been
taken over by a non-samba process. This process would not have registered
itself with a fresh 64-bit number in serverid.tdb, so the old one still exists
in serverid.tdb. We protect against this case by the parent smbd taking care of
deregistering PIDs from serverid.tdb and the fact that serverid.tdb is
CLEAR_IF_FIRST.
CLEAR_IF_FIRST does not work in a cluster, so the automatic cleanup does not
work when all smbds are restarted. For this, "net serverid wipe" has to be run
before smbd starts up. As a convenience, "net serverid wipedbs" also cleans up
sessionid.tdb and connections.tdb.
While there, this also cleans up overloading connections.tdb with all the
process entries just for messaging_send_all().
Volker
2010-03-02 19:02:01 +03:00
if ( ( msg_flags & msg_all - > msg_flag ) = = 0 ) {
2002-09-25 19:19:00 +04:00
return 0 ;
s3: Fix a long-standing problem with recycled PIDs
When a samba server process dies hard, it has no chance to clean up its entries
in locking.tdb, brlock.tdb, connections.tdb and sessionid.tdb.
For locking.tdb and brlock.tdb Samba is robust by checking every time we read
an entry from the database if the corresponding process still exists. If it
does not exist anymore, the entry is deleted. This is not 100% failsafe though:
On systems with a limited PID space there is a non-zero chance that between the
smbd's death and the fresh access, the PID is recycled by another long-running
process. This renders all files that had been locked by the killed smbd
potentially unusable until the new process also dies.
This patch is supposed to fix the problem the following way: Every process ID
in every database is augmented by a random 64-bit number that is stored in a
serverid.tdb. Whenever we need to check if a process still exists we know its
PID and the 64-bit number. We look up the PID in serverid.tdb and compare the
64-bit number. If it's the same, the process still is a valid smbd holding the
lock. If it is different, a new smbd has taken over.
I believe this is safe against an smbd that has died hard and the PID has been
taken over by a non-samba process. This process would not have registered
itself with a fresh 64-bit number in serverid.tdb, so the old one still exists
in serverid.tdb. We protect against this case by the parent smbd taking care of
deregistering PIDs from serverid.tdb and the fact that serverid.tdb is
CLEAR_IF_FIRST.
CLEAR_IF_FIRST does not work in a cluster, so the automatic cleanup does not
work when all smbds are restarted. For this, "net serverid wipe" has to be run
before smbd starts up. As a convenience, "net serverid wipedbs" also cleans up
sessionid.tdb and connections.tdb.
While there, this also cleans up overloading connections.tdb with all the
process entries just for messaging_send_all().
Volker
2010-03-02 19:02:01 +03:00
}
2002-09-25 19:19:00 +04:00
/* If the msg send fails because the pid was not found (i.e. smbd died),
2001-02-03 20:19:10 +03:00
* the msg has already been deleted from the messages . tdb . */
2002-09-25 19:19:00 +04:00
s3: Fix a long-standing problem with recycled PIDs
When a samba server process dies hard, it has no chance to clean up its entries
in locking.tdb, brlock.tdb, connections.tdb and sessionid.tdb.
For locking.tdb and brlock.tdb Samba is robust by checking every time we read
an entry from the database if the corresponding process still exists. If it
does not exist anymore, the entry is deleted. This is not 100% failsafe though:
On systems with a limited PID space there is a non-zero chance that between the
smbd's death and the fresh access, the PID is recycled by another long-running
process. This renders all files that had been locked by the killed smbd
potentially unusable until the new process also dies.
This patch is supposed to fix the problem the following way: Every process ID
in every database is augmented by a random 64-bit number that is stored in a
serverid.tdb. Whenever we need to check if a process still exists we know its
PID and the 64-bit number. We look up the PID in serverid.tdb and compare the
64-bit number. If it's the same, the process still is a valid smbd holding the
lock. If it is different, a new smbd has taken over.
I believe this is safe against an smbd that has died hard and the PID has been
taken over by a non-samba process. This process would not have registered
itself with a fresh 64-bit number in serverid.tdb, so the old one still exists
in serverid.tdb. We protect against this case by the parent smbd taking care of
deregistering PIDs from serverid.tdb and the fact that serverid.tdb is
CLEAR_IF_FIRST.
CLEAR_IF_FIRST does not work in a cluster, so the automatic cleanup does not
work when all smbds are restarted. For this, "net serverid wipe" has to be run
before smbd starts up. As a convenience, "net serverid wipedbs" also cleans up
sessionid.tdb and connections.tdb.
While there, this also cleans up overloading connections.tdb with all the
process entries just for messaging_send_all().
Volker
2010-03-02 19:02:01 +03:00
status = messaging_send_buf ( msg_all - > msg_ctx , * id , msg_all - > msg_type ,
2011-05-06 00:42:05 +04:00
( const uint8 * ) msg_all - > buf , msg_all - > len ) ;
2007-02-01 22:29:07 +03:00
if ( NT_STATUS_EQUAL ( status , NT_STATUS_INVALID_HANDLE ) ) {
2001-02-03 20:19:10 +03:00
2007-05-28 15:38:42 +04:00
/* If the pid was not found delete the entry from connections.tdb */
2007-05-22 02:17:13 +04:00
s3: Fix a long-standing problem with recycled PIDs
When a samba server process dies hard, it has no chance to clean up its entries
in locking.tdb, brlock.tdb, connections.tdb and sessionid.tdb.
For locking.tdb and brlock.tdb Samba is robust by checking every time we read
an entry from the database if the corresponding process still exists. If it
does not exist anymore, the entry is deleted. This is not 100% failsafe though:
On systems with a limited PID space there is a non-zero chance that between the
smbd's death and the fresh access, the PID is recycled by another long-running
process. This renders all files that had been locked by the killed smbd
potentially unusable until the new process also dies.
This patch is supposed to fix the problem the following way: Every process ID
in every database is augmented by a random 64-bit number that is stored in a
serverid.tdb. Whenever we need to check if a process still exists we know its
PID and the 64-bit number. We look up the PID in serverid.tdb and compare the
64-bit number. If it's the same, the process still is a valid smbd holding the
lock. If it is different, a new smbd has taken over.
I believe this is safe against an smbd that has died hard and the PID has been
taken over by a non-samba process. This process would not have registered
itself with a fresh 64-bit number in serverid.tdb, so the old one still exists
in serverid.tdb. We protect against this case by the parent smbd taking care of
deregistering PIDs from serverid.tdb and the fact that serverid.tdb is
CLEAR_IF_FIRST.
CLEAR_IF_FIRST does not work in a cluster, so the automatic cleanup does not
work when all smbds are restarted. For this, "net serverid wipe" has to be run
before smbd starts up. As a convenience, "net serverid wipedbs" also cleans up
sessionid.tdb and connections.tdb.
While there, this also cleans up overloading connections.tdb with all the
process entries just for messaging_send_all().
Volker
2010-03-02 19:02:01 +03:00
DEBUG ( 2 , ( " pid %s doesn't exist \n " , procid_str_static ( id ) ) ) ;
2007-05-22 02:17:13 +04:00
2007-05-28 15:38:42 +04:00
rec - > delete_rec ( rec ) ;
2001-02-03 20:19:10 +03:00
}
2001-12-21 03:37:49 +03:00
msg_all - > n_sent + + ;
2000-09-13 11:07:17 +04:00
return 0 ;
}
2001-12-21 03:37:49 +03:00
/**
* Send a message to all smbd processes .
*
* It isn ' t very efficient , but should be OK for the sorts of
* applications that use it . When we need efficient broadcast we can add
* it .
*
* @ param n_sent Set to the number of messages sent . This should be
* equal to the number of processes , but be careful for races .
*
2003-03-18 01:42:56 +03:00
* @ retval True for success .
2001-12-21 03:37:49 +03:00
* */
2007-10-19 04:40:25 +04:00
bool message_send_all ( struct messaging_context * msg_ctx ,
2007-05-15 19:49:55 +04:00
int msg_type ,
2001-12-21 03:37:49 +03:00
const void * buf , size_t len ,
int * n_sent )
2000-09-13 11:07:17 +04:00
{
2001-01-23 23:25:25 +03:00
struct msg_all msg_all ;
2000-09-13 11:07:17 +04:00
msg_all . msg_type = msg_type ;
2002-09-25 19:19:00 +04:00
if ( msg_type < 1000 )
msg_all . msg_flag = FLAG_MSG_GENERAL ;
else if ( msg_type > 1000 & & msg_type < 2000 )
msg_all . msg_flag = FLAG_MSG_NMBD ;
2004-08-18 17:55:58 +04:00
else if ( msg_type > 2000 & & msg_type < 2100 )
msg_all . msg_flag = FLAG_MSG_PRINT_NOTIFY ;
else if ( msg_type > 2100 & & msg_type < 3000 )
msg_all . msg_flag = FLAG_MSG_PRINT_GENERAL ;
2002-09-25 19:19:00 +04:00
else if ( msg_type > 3000 & & msg_type < 4000 )
msg_all . msg_flag = FLAG_MSG_SMBD ;
2008-03-31 14:50:23 +04:00
else if ( msg_type > 4000 & & msg_type < 5000 )
msg_all . msg_flag = FLAG_MSG_DBWRAP ;
2002-09-25 19:19:00 +04:00
else
return False ;
2000-09-13 11:07:17 +04:00
msg_all . buf = buf ;
msg_all . len = len ;
2001-12-21 03:37:49 +03:00
msg_all . n_sent = 0 ;
2007-05-22 02:17:13 +04:00
msg_all . msg_ctx = msg_ctx ;
2000-09-13 11:07:17 +04:00
s3: Fix a long-standing problem with recycled PIDs
When a samba server process dies hard, it has no chance to clean up its entries
in locking.tdb, brlock.tdb, connections.tdb and sessionid.tdb.
For locking.tdb and brlock.tdb Samba is robust by checking every time we read
an entry from the database if the corresponding process still exists. If it
does not exist anymore, the entry is deleted. This is not 100% failsafe though:
On systems with a limited PID space there is a non-zero chance that between the
smbd's death and the fresh access, the PID is recycled by another long-running
process. This renders all files that had been locked by the killed smbd
potentially unusable until the new process also dies.
This patch is supposed to fix the problem the following way: Every process ID
in every database is augmented by a random 64-bit number that is stored in a
serverid.tdb. Whenever we need to check if a process still exists we know its
PID and the 64-bit number. We look up the PID in serverid.tdb and compare the
64-bit number. If it's the same, the process still is a valid smbd holding the
lock. If it is different, a new smbd has taken over.
I believe this is safe against an smbd that has died hard and the PID has been
taken over by a non-samba process. This process would not have registered
itself with a fresh 64-bit number in serverid.tdb, so the old one still exists
in serverid.tdb. We protect against this case by the parent smbd taking care of
deregistering PIDs from serverid.tdb and the fact that serverid.tdb is
CLEAR_IF_FIRST.
CLEAR_IF_FIRST does not work in a cluster, so the automatic cleanup does not
work when all smbds are restarted. For this, "net serverid wipe" has to be run
before smbd starts up. As a convenience, "net serverid wipedbs" also cleans up
sessionid.tdb and connections.tdb.
While there, this also cleans up overloading connections.tdb with all the
process entries just for messaging_send_all().
Volker
2010-03-02 19:02:01 +03:00
serverid_traverse ( traverse_fn , & msg_all ) ;
2001-12-21 03:37:49 +03:00
if ( n_sent )
* n_sent = msg_all . n_sent ;
2000-09-13 11:07:17 +04:00
return True ;
}
2006-02-04 01:19:41 +03:00
2007-05-16 17:02:53 +04:00
struct event_context * messaging_event_context ( struct messaging_context * msg_ctx )
{
return msg_ctx - > event_ctx ;
}
2007-01-31 15:01:52 +03:00
struct messaging_context * messaging_init ( TALLOC_CTX * mem_ctx ,
struct server_id server_id ,
struct event_context * ev )
{
struct messaging_context * ctx ;
2007-05-24 18:47:24 +04:00
NTSTATUS status ;
2007-01-31 15:01:52 +03:00
if ( ! ( ctx = TALLOC_ZERO_P ( mem_ctx , struct messaging_context ) ) ) {
return NULL ;
}
ctx - > id = server_id ;
2007-05-16 17:02:53 +04:00
ctx - > event_ctx = ev ;
2007-05-15 19:14:32 +04:00
2007-05-24 18:47:24 +04:00
status = messaging_tdb_init ( ctx , ctx , & ctx - > local ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
2010-11-14 07:28:41 +03:00
DEBUG ( 2 , ( " messaging_tdb_init failed: %s \n " ,
2007-06-04 23:45:41 +04:00
nt_errstr ( status ) ) ) ;
2007-05-15 19:14:32 +04:00
TALLOC_FREE ( ctx ) ;
2007-06-04 23:45:41 +04:00
return NULL ;
2007-05-15 19:14:32 +04:00
}
2007-06-10 21:02:09 +04:00
# ifdef CLUSTER_SUPPORT
if ( lp_clustering ( ) ) {
status = messaging_ctdbd_init ( ctx , ctx , & ctx - > remote ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
2010-11-14 07:28:41 +03:00
DEBUG ( 2 , ( " messaging_ctdb_init failed: %s \n " ,
2007-06-10 21:02:09 +04:00
nt_errstr ( status ) ) ) ;
TALLOC_FREE ( ctx ) ;
return NULL ;
}
}
2010-08-31 18:51:28 +04:00
ctx - > id . vnn = get_my_vnn ( ) ;
2007-06-10 21:02:09 +04:00
# endif
2007-05-24 15:09:37 +04:00
messaging_register ( ctx , NULL , MSG_PING , ping_message ) ;
/* Register some debugging related messages */
register_msg_pool_usage ( ctx ) ;
register_dmalloc_msgs ( ctx ) ;
debug_register_msgs ( ctx ) ;
2007-01-31 15:01:52 +03:00
return ctx ;
}
2010-07-04 19:57:57 +04:00
struct server_id messaging_server_id ( const struct messaging_context * msg_ctx )
{
return msg_ctx - > id ;
}
2007-06-10 21:02:09 +04:00
/*
* re - init after a fork
*/
2010-07-04 18:18:12 +04:00
NTSTATUS messaging_reinit ( struct messaging_context * msg_ctx ,
struct server_id id )
2007-06-10 21:02:09 +04:00
{
2009-01-14 14:25:31 +03:00
NTSTATUS status ;
TALLOC_FREE ( msg_ctx - > local ) ;
2010-07-04 18:18:12 +04:00
msg_ctx - > id = id ;
2009-01-14 14:25:31 +03:00
status = messaging_tdb_init ( msg_ctx , msg_ctx , & msg_ctx - > local ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
DEBUG ( 0 , ( " messaging_tdb_init failed: %s \n " ,
nt_errstr ( status ) ) ) ;
return status ;
}
2007-06-10 21:02:09 +04:00
2009-01-14 14:25:31 +03:00
# ifdef CLUSTER_SUPPORT
2007-06-10 21:02:09 +04:00
TALLOC_FREE ( msg_ctx - > remote ) ;
if ( lp_clustering ( ) ) {
status = messaging_ctdbd_init ( msg_ctx , msg_ctx ,
& msg_ctx - > remote ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
2008-06-24 17:08:21 +04:00
DEBUG ( 1 , ( " messaging_ctdb_init failed: %s \n " ,
2007-06-10 21:02:09 +04:00
nt_errstr ( status ) ) ) ;
return status ;
}
}
# endif
return NT_STATUS_OK ;
}
2007-01-31 15:01:52 +03:00
/*
* Register a dispatch function for a particular message type . Allow multiple
* registrants
*/
2007-05-22 02:17:13 +04:00
NTSTATUS messaging_register ( struct messaging_context * msg_ctx ,
void * private_data ,
2007-01-31 15:01:52 +03:00
uint32_t msg_type ,
void ( * fn ) ( struct messaging_context * msg ,
void * private_data ,
uint32_t msg_type ,
struct server_id server_id ,
DATA_BLOB * data ) )
{
struct messaging_callback * cb ;
2007-05-22 02:17:13 +04:00
/*
* Only one callback per type
*/
for ( cb = msg_ctx - > callbacks ; cb ! = NULL ; cb = cb - > next ) {
2009-03-10 08:45:45 +03:00
/* we allow a second registration of the same message
type if it has a different private pointer . This is
needed in , for example , the internal notify code ,
which creates a new notify context for each tree
connect , and expects to receive messages to each of
them . */
if ( cb - > msg_type = = msg_type & & private_data = = cb - > private_data ) {
DEBUG ( 5 , ( " Overriding messaging pointer for type %u - private_data=%p \n " ,
( unsigned ) msg_type , private_data ) ) ;
2007-05-22 02:17:13 +04:00
cb - > fn = fn ;
cb - > private_data = private_data ;
return NT_STATUS_OK ;
}
}
if ( ! ( cb = talloc ( msg_ctx , struct messaging_callback ) ) ) {
2007-01-31 15:01:52 +03:00
return NT_STATUS_NO_MEMORY ;
}
cb - > msg_type = msg_type ;
cb - > fn = fn ;
cb - > private_data = private_data ;
2007-05-22 02:17:13 +04:00
DLIST_ADD ( msg_ctx - > callbacks , cb ) ;
2007-01-31 15:01:52 +03:00
return NT_STATUS_OK ;
}
/*
De - register the function for a particular message type .
*/
void messaging_deregister ( struct messaging_context * ctx , uint32_t msg_type ,
void * private_data )
{
struct messaging_callback * cb , * next ;
for ( cb = ctx - > callbacks ; cb ; cb = next ) {
next = cb - > next ;
if ( ( cb - > msg_type = = msg_type )
& & ( cb - > private_data = = private_data ) ) {
2009-03-10 08:45:45 +03:00
DEBUG ( 5 , ( " Deregistering messaging pointer for type %u - private_data=%p \n " ,
( unsigned ) msg_type , private_data ) ) ;
2007-01-31 15:01:52 +03:00
DLIST_REMOVE ( ctx - > callbacks , cb ) ;
TALLOC_FREE ( cb ) ;
}
}
}
2007-01-31 16:05:36 +03:00
/*
Send a message to a particular server
*/
2007-05-15 00:31:28 +04:00
NTSTATUS messaging_send ( struct messaging_context * msg_ctx ,
2007-05-24 15:09:37 +04:00
struct server_id server , uint32_t msg_type ,
const DATA_BLOB * data )
2007-01-31 16:05:36 +03:00
{
2007-07-24 15:47:37 +04:00
# ifdef CLUSTER_SUPPORT
if ( ! procid_is_local ( & server ) ) {
return msg_ctx - > remote - > send_fn ( msg_ctx , server ,
msg_type , data ,
msg_ctx - > remote ) ;
}
# endif
2007-05-24 18:47:24 +04:00
return msg_ctx - > local - > send_fn ( msg_ctx , server , msg_type , data ,
msg_ctx - > local ) ;
2007-01-31 16:05:36 +03:00
}
2007-01-31 15:01:52 +03:00
2007-05-15 00:31:28 +04:00
NTSTATUS messaging_send_buf ( struct messaging_context * msg_ctx ,
struct server_id server , uint32_t msg_type ,
const uint8 * buf , size_t len )
{
DATA_BLOB blob = data_blob_const ( buf , len ) ;
return messaging_send ( msg_ctx , server , msg_type , & blob ) ;
}
2007-05-24 18:47:24 +04:00
/*
2010-08-28 15:10:30 +04:00
Dispatch one messaging_rec
2007-05-24 18:47:24 +04:00
*/
void messaging_dispatch_rec ( struct messaging_context * msg_ctx ,
struct messaging_rec * rec )
{
struct messaging_callback * cb , * next ;
for ( cb = msg_ctx - > callbacks ; cb ! = NULL ; cb = next ) {
next = cb - > next ;
if ( cb - > msg_type = = rec - > msg_type ) {
cb - > fn ( msg_ctx , cb - > private_data , rec - > msg_type ,
rec - > src , & rec - > buf ) ;
2009-03-10 08:45:45 +03:00
/* we continue looking for matching messages
after finding one . This matters for
subsystems like the internal notify code
which register more than one handler for
the same message type */
2007-05-24 18:47:24 +04:00
}
}
return ;
}
2002-03-09 12:48:35 +03:00
/** @} **/