2005-01-30 03:55:49 +03:00
/*
Unix SMB / CIFS implementation .
2005-01-30 05:55:30 +03:00
helper functions for stream based servers
2005-01-30 03:55:49 +03:00
Copyright ( C ) Andrew Tridgell 2003 - 2005
Copyright ( C ) Stefan ( metze ) Metzmacher 2004
This program is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
2007-07-10 06:07:03 +04:00
the Free Software Foundation ; either version 3 of the License , or
2005-01-30 03:55:49 +03:00
( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
2007-07-10 06:07:03 +04:00
along with this program . If not , see < http : //www.gnu.org/licenses/>.
2005-01-30 03:55:49 +03:00
*/
# include "includes.h"
2009-01-03 17:24:31 +03:00
# include <tevent.h>
2005-01-30 03:55:49 +03:00
# include "process_model.h"
2005-06-05 10:53:07 +04:00
# include "lib/messaging/irpc.h"
2007-01-10 13:52:09 +03:00
# include "cluster/cluster.h"
2007-09-08 16:42:09 +04:00
# include "param/param.h"
2009-12-23 13:48:06 +03:00
# include "../lib/tsocket/tsocket.h"
2011-05-12 14:33:01 +04:00
# include "lib/util/util_net.h"
2005-01-30 03:55:49 +03:00
/* the range of ports to try for dcerpc over tcp endpoints */
# define SERVER_TCP_LOW_PORT 1024
# define SERVER_TCP_HIGH_PORT 1300
/* size of listen() backlog in smbd */
# define SERVER_LISTEN_BACKLOG 10
/*
private structure for a single listening stream socket
*/
struct stream_socket {
const struct stream_server_ops * ops ;
2008-01-06 04:03:43 +03:00
struct loadparm_context * lp_ctx ;
2008-12-29 22:24:57 +03:00
struct tevent_context * event_ctx ;
2005-01-30 03:55:49 +03:00
const struct model_ops * model_ops ;
struct socket_context * sock ;
2009-02-02 10:41:28 +03:00
void * private_data ;
2005-01-30 03:55:49 +03:00
} ;
/*
close the socket and shutdown a stream_connection
*/
void stream_terminate_connection ( struct stream_connection * srv_conn , const char * reason )
{
2008-12-29 22:24:57 +03:00
struct tevent_context * event_ctx = srv_conn - > event . ctx ;
2005-01-30 03:55:49 +03:00
const struct model_ops * model_ops = srv_conn - > model_ops ;
2005-12-08 12:13:28 +03:00
2006-09-22 07:49:40 +04:00
if ( ! reason ) reason = " unknown reason " ;
2005-12-08 12:13:28 +03:00
2008-09-25 04:34:35 +04:00
DEBUG ( 3 , ( " Terminating connection - '%s' \n " , reason ) ) ;
2005-12-08 12:13:28 +03:00
srv_conn - > terminate = reason ;
if ( srv_conn - > processing ) {
/*
* if we ' re currently inside the stream_io_handler ( ) ,
2006-09-22 07:49:40 +04:00
* defer the termination to the end of stream_io_hendler ( )
2005-12-08 12:13:28 +03:00
*
* and we don ' t want to read or write to the connection . . .
*/
2009-01-03 17:24:31 +03:00
tevent_fd_set_flags ( srv_conn - > event . fde , 0 ) ;
2005-12-08 12:13:28 +03:00
return ;
}
2005-10-03 13:36:52 +04:00
talloc_free ( srv_conn - > event . fde ) ;
2005-12-08 12:13:28 +03:00
srv_conn - > event . fde = NULL ;
2011-07-22 08:55:32 +04:00
imessaging_cleanup ( srv_conn - > msg_ctx ) ;
2008-09-30 05:20:46 +04:00
model_ops - > terminate ( event_ctx , srv_conn - > lp_ctx , reason ) ;
2008-09-30 06:22:54 +04:00
talloc_free ( srv_conn ) ;
2005-01-30 03:55:49 +03:00
}
2007-12-04 02:12:03 +03:00
/**
2005-01-30 03:55:49 +03:00
the select loop has indicated that a stream is ready for IO
*/
2006-07-25 12:00:30 +04:00
static void stream_io_handler ( struct stream_connection * conn , uint16_t flags )
2005-01-30 03:55:49 +03:00
{
2008-06-19 11:59:57 +04:00
conn - > processing + + ;
2009-01-03 17:24:31 +03:00
if ( flags & TEVENT_FD_WRITE ) {
2005-02-03 14:25:52 +03:00
conn - > ops - > send_handler ( conn , flags ) ;
2009-01-03 17:24:31 +03:00
} else if ( flags & TEVENT_FD_READ ) {
2005-12-08 12:13:28 +03:00
conn - > ops - > recv_handler ( conn , flags ) ;
2005-01-30 03:55:49 +03:00
}
2008-06-19 11:59:57 +04:00
conn - > processing - - ;
2005-01-30 03:55:49 +03:00
2005-12-08 12:13:28 +03:00
if ( conn - > terminate ) {
stream_terminate_connection ( conn , conn - > terminate ) ;
2005-01-30 03:55:49 +03:00
}
}
2010-02-04 19:03:04 +03:00
void stream_io_handler_fde ( struct tevent_context * ev , struct tevent_fd * fde ,
2009-02-02 10:41:28 +03:00
uint16_t flags , void * private_data )
2006-07-25 12:00:30 +04:00
{
2009-02-02 10:41:28 +03:00
struct stream_connection * conn = talloc_get_type ( private_data ,
2006-07-25 12:00:30 +04:00
struct stream_connection ) ;
stream_io_handler ( conn , flags ) ;
}
2009-02-02 10:41:28 +03:00
void stream_io_handler_callback ( void * private_data , uint16_t flags )
r17197: This patch moves the encryption of bulk data on SASL negotiated security
contexts from the application layer into the socket layer.
This improves a number of correctness aspects, as we now allow LDAP
packets to cross multiple SASL packets. It should also make it much
easier to write async LDAP tests from windows clients, as they use SASL
by default. It is also vital to allowing OpenLDAP clients to use GSSAPI
against Samba4, as it negotiates a rather small SASL buffer size.
This patch mirrors the earlier work done to move TLS into the socket
layer.
Unusual in this pstch is the extra read callback argument I take. As
SASL is a layer on top of a socket, it is entirely possible for the
SASL layer to drain a socket dry, but for the caller not to have read
all the decrypted data. This would leave the system without an event
to restart the read (as the socket is dry).
As such, I re-invoke the read handler from a timed callback, which
should trigger on the next running of the event loop. I believe that
the TLS code does require a similar callback.
In trying to understand why this is required, imagine a SASL-encrypted
LDAP packet in the following formation:
+-----------------+---------------------+
| SASL Packet #1 | SASL Packet #2 |
----------------------------------------+
| LDAP Packet #1 | LDAP Packet #2 |
----------------------------------------+
In the old code, this was illegal, but it is perfectly standard
SASL-encrypted LDAP. Without the callback, we would read and process
the first LDAP packet, and the SASL code would have read the second SASL
packet (to decrypt enough data for the LDAP packet), and no data would
remain on the socket.
Without data on the socket, read events stop. That is why I add timed
events, until the SASL buffer is drained.
Another approach would be to add a hack to the event system, to have it
pretend there remained data to read off the network (but that is ugly).
In improving the code, to handle more real-world cases, I've been able
to remove almost all the special-cases in the testnonblock code. The
only special case is that we must use a deterministic partial packet
when calling send, rather than a random length. (1 + n/2). This is
needed because of the way the SASL and TLS code works, and the 'resend
on failure' requirements.
Andrew Bartlett
(This used to be commit 5d7c9c12cb2b39673172a357092b80cd814850b0)
2006-07-23 06:50:08 +04:00
{
2009-02-02 10:41:28 +03:00
struct stream_connection * conn = talloc_get_type ( private_data ,
2006-07-25 12:00:30 +04:00
struct stream_connection ) ;
stream_io_handler ( conn , flags ) ;
r17197: This patch moves the encryption of bulk data on SASL negotiated security
contexts from the application layer into the socket layer.
This improves a number of correctness aspects, as we now allow LDAP
packets to cross multiple SASL packets. It should also make it much
easier to write async LDAP tests from windows clients, as they use SASL
by default. It is also vital to allowing OpenLDAP clients to use GSSAPI
against Samba4, as it negotiates a rather small SASL buffer size.
This patch mirrors the earlier work done to move TLS into the socket
layer.
Unusual in this pstch is the extra read callback argument I take. As
SASL is a layer on top of a socket, it is entirely possible for the
SASL layer to drain a socket dry, but for the caller not to have read
all the decrypted data. This would leave the system without an event
to restart the read (as the socket is dry).
As such, I re-invoke the read handler from a timed callback, which
should trigger on the next running of the event loop. I believe that
the TLS code does require a similar callback.
In trying to understand why this is required, imagine a SASL-encrypted
LDAP packet in the following formation:
+-----------------+---------------------+
| SASL Packet #1 | SASL Packet #2 |
----------------------------------------+
| LDAP Packet #1 | LDAP Packet #2 |
----------------------------------------+
In the old code, this was illegal, but it is perfectly standard
SASL-encrypted LDAP. Without the callback, we would read and process
the first LDAP packet, and the SASL code would have read the second SASL
packet (to decrypt enough data for the LDAP packet), and no data would
remain on the socket.
Without data on the socket, read events stop. That is why I add timed
events, until the SASL buffer is drained.
Another approach would be to add a hack to the event system, to have it
pretend there remained data to read off the network (but that is ugly).
In improving the code, to handle more real-world cases, I've been able
to remove almost all the special-cases in the testnonblock code. The
only special case is that we must use a deterministic partial packet
when calling send, rather than a random length. (1 + n/2). This is
needed because of the way the SASL and TLS code works, and the 'resend
on failure' requirements.
Andrew Bartlett
(This used to be commit 5d7c9c12cb2b39673172a357092b80cd814850b0)
2006-07-23 06:50:08 +04:00
}
2005-10-06 18:44:37 +04:00
/*
this creates a stream_connection from an already existing connection ,
used for protocols , where a client connection needs to switched into
a server connection
*/
2008-12-29 22:24:57 +03:00
NTSTATUS stream_new_connection_merge ( struct tevent_context * ev ,
2008-05-17 02:01:05 +04:00
struct loadparm_context * lp_ctx ,
2005-10-06 18:44:37 +04:00
const struct model_ops * model_ops ,
const struct stream_server_ops * stream_ops ,
2011-05-03 04:40:33 +04:00
struct imessaging_context * msg_ctx ,
2005-10-06 18:44:37 +04:00
void * private_data ,
struct stream_connection * * _srv_conn )
{
struct stream_connection * srv_conn ;
srv_conn = talloc_zero ( ev , struct stream_connection ) ;
NT_STATUS_HAVE_NO_MEMORY ( srv_conn ) ;
2009-02-02 12:30:03 +03:00
srv_conn - > private_data = private_data ;
2005-10-06 18:44:37 +04:00
srv_conn - > model_ops = model_ops ;
2010-03-05 20:30:10 +03:00
srv_conn - > socket = NULL ;
2008-02-04 09:51:38 +03:00
srv_conn - > server_id = cluster_id ( 0 , 0 ) ;
2005-10-06 18:44:37 +04:00
srv_conn - > ops = stream_ops ;
srv_conn - > msg_ctx = msg_ctx ;
srv_conn - > event . ctx = ev ;
2008-05-17 02:01:05 +04:00
srv_conn - > lp_ctx = lp_ctx ;
2010-03-05 20:30:10 +03:00
srv_conn - > event . fde = NULL ;
2009-01-03 17:24:31 +03:00
2005-10-06 18:44:37 +04:00
* _srv_conn = srv_conn ;
return NT_STATUS_OK ;
}
2005-01-30 03:55:49 +03:00
/*
called when a new socket connection has been established . This is called in the process
context of the new process ( if appropriate )
*/
2008-12-29 22:24:57 +03:00
static void stream_new_connection ( struct tevent_context * ev ,
2008-01-06 04:03:43 +03:00
struct loadparm_context * lp_ctx ,
2005-01-30 03:55:49 +03:00
struct socket_context * sock ,
2009-02-02 10:41:28 +03:00
struct server_id server_id , void * private_data )
2005-01-30 03:55:49 +03:00
{
2009-02-02 10:41:28 +03:00
struct stream_socket * stream_socket = talloc_get_type ( private_data , struct stream_socket ) ;
2005-01-30 03:55:49 +03:00
struct stream_connection * srv_conn ;
srv_conn = talloc_zero ( ev , struct stream_connection ) ;
if ( ! srv_conn ) {
DEBUG ( 0 , ( " talloc(mem_ctx, struct stream_connection) failed \n " ) ) ;
return ;
}
talloc_steal ( srv_conn , sock ) ;
2009-02-02 12:30:03 +03:00
srv_conn - > private_data = stream_socket - > private_data ;
2005-01-30 03:55:49 +03:00
srv_conn - > model_ops = stream_socket - > model_ops ;
srv_conn - > socket = sock ;
srv_conn - > server_id = server_id ;
srv_conn - > ops = stream_socket - > ops ;
2005-02-03 11:20:31 +03:00
srv_conn - > event . ctx = ev ;
2008-02-27 19:36:49 +03:00
srv_conn - > lp_ctx = lp_ctx ;
2005-01-30 03:55:49 +03:00
2010-07-16 08:32:42 +04:00
if ( ! socket_check_access ( sock , " smbd " , lpcfg_hostsallow ( NULL , lpcfg_default_service ( lp_ctx ) ) , lpcfg_hostsdeny ( NULL , lpcfg_default_service ( lp_ctx ) ) ) ) {
2005-01-30 03:55:49 +03:00
stream_terminate_connection ( srv_conn , " denied by access rules " ) ;
return ;
}
2009-01-03 17:24:31 +03:00
srv_conn - > event . fde = tevent_add_fd ( ev , srv_conn , socket_get_fd ( sock ) ,
0 , stream_io_handler_fde , srv_conn ) ;
if ( ! srv_conn - > event . fde ) {
stream_terminate_connection ( srv_conn , " tevent_add_fd() failed " ) ;
return ;
}
2005-01-30 03:55:49 +03:00
/* setup to receive internal messages on this connection */
2011-05-03 04:40:33 +04:00
srv_conn - > msg_ctx = imessaging_init ( srv_conn ,
2011-10-13 13:01:56 +04:00
lp_ctx ,
2011-07-22 08:55:32 +04:00
srv_conn - > server_id , ev , false ) ;
2005-01-30 03:55:49 +03:00
if ( ! srv_conn - > msg_ctx ) {
2011-05-03 04:40:33 +04:00
stream_terminate_connection ( srv_conn , " imessaging_init() failed " ) ;
2005-01-30 03:55:49 +03:00
return ;
}
2009-12-23 13:48:06 +03:00
srv_conn - > remote_address = socket_get_remote_addr ( srv_conn - > socket , srv_conn ) ;
if ( ! srv_conn - > remote_address ) {
stream_terminate_connection ( srv_conn , " socket_get_remote_addr() failed " ) ;
return ;
}
srv_conn - > local_address = socket_get_local_addr ( srv_conn - > socket , srv_conn ) ;
if ( ! srv_conn - > local_address ) {
stream_terminate_connection ( srv_conn , " socket_get_local_addr() failed " ) ;
return ;
}
{
TALLOC_CTX * tmp_ctx ;
2006-03-09 20:48:41 +03:00
const char * title ;
2009-12-23 13:48:06 +03:00
tmp_ctx = talloc_new ( srv_conn ) ;
title = talloc_asprintf ( tmp_ctx , " conn[%s] c[%s] s[%s] server_id[%s] " ,
2006-03-09 20:48:41 +03:00
stream_socket - > ops - > name ,
2009-12-23 13:48:06 +03:00
tsocket_address_string ( srv_conn - > remote_address , tmp_ctx ) ,
tsocket_address_string ( srv_conn - > local_address , tmp_ctx ) ,
2011-06-08 08:05:55 +04:00
server_id_str ( tmp_ctx , & server_id ) ) ;
2006-03-09 20:48:41 +03:00
if ( title ) {
stream_connection_set_title ( srv_conn , title ) ;
}
2009-12-23 13:48:06 +03:00
talloc_free ( tmp_ctx ) ;
2006-03-09 20:48:41 +03:00
}
2007-08-08 07:20:37 +04:00
/* we're now ready to start receiving events on this stream */
2009-01-03 17:24:31 +03:00
TEVENT_FD_READABLE ( srv_conn - > event . fde ) ;
2007-08-08 07:20:37 +04:00
2005-01-30 03:55:49 +03:00
/* call the server specific accept code */
stream_socket - > ops - > accept_connection ( srv_conn ) ;
}
/*
called when someone opens a connection to one of our listening ports
*/
2008-12-29 22:24:57 +03:00
static void stream_accept_handler ( struct tevent_context * ev , struct tevent_fd * fde ,
2009-02-02 10:41:28 +03:00
uint16_t flags , void * private_data )
2005-01-30 03:55:49 +03:00
{
2009-02-02 10:41:28 +03:00
struct stream_socket * stream_socket = talloc_get_type ( private_data , struct stream_socket ) ;
2005-01-30 03:55:49 +03:00
/* ask the process model to create us a process for this new
connection . When done , it calls stream_new_connection ( )
with the newly created socket */
2010-07-16 08:32:42 +04:00
stream_socket - > model_ops - > accept_connection ( ev , stream_socket - > lp_ctx ,
2008-01-06 04:03:43 +03:00
stream_socket - > sock ,
2005-01-30 03:55:49 +03:00
stream_new_connection , stream_socket ) ;
}
/*
setup a listen stream socket
if you pass * port = = 0 , then a port > 1024 is used
2007-09-03 17:13:25 +04:00
FIXME : This function is TCP / IP specific - uses an int rather than
a string for the port . Should leave allocating a port nr
to the socket implementation - JRV20070903
2005-01-30 03:55:49 +03:00
*/
2010-11-15 02:12:22 +03:00
NTSTATUS stream_setup_socket ( TALLOC_CTX * mem_ctx ,
struct tevent_context * event_context ,
2008-01-06 04:03:43 +03:00
struct loadparm_context * lp_ctx ,
2005-01-30 03:55:49 +03:00
const struct model_ops * model_ops ,
const struct stream_server_ops * stream_ops ,
const char * family ,
const char * sock_addr ,
uint16_t * port ,
2007-12-06 18:54:34 +03:00
const char * socket_options ,
2009-02-02 10:41:28 +03:00
void * private_data )
2005-01-30 03:55:49 +03:00
{
NTSTATUS status ;
struct stream_socket * stream_socket ;
2006-01-10 01:12:53 +03:00
struct socket_address * socket_address ;
2009-01-03 14:47:24 +03:00
struct tevent_fd * fde ;
2005-01-30 03:55:49 +03:00
int i ;
2011-05-12 14:33:01 +04:00
struct sockaddr_storage ss ;
2005-01-30 03:55:49 +03:00
2010-11-15 02:12:22 +03:00
stream_socket = talloc_zero ( mem_ctx , struct stream_socket ) ;
2005-01-30 03:55:49 +03:00
NT_STATUS_HAVE_NO_MEMORY ( stream_socket ) ;
2011-05-12 14:33:01 +04:00
if ( strcmp ( family , " ip " ) = = 0 ) {
/* we will get the real family from the address itself */
if ( ! interpret_string_addr ( & ss , sock_addr , 0 ) ) {
talloc_free ( stream_socket ) ;
return NT_STATUS_INVALID_ADDRESS ;
}
socket_address = socket_address_from_sockaddr_storage ( stream_socket , & ss , port ? * port : 0 ) ;
NT_STATUS_HAVE_NO_MEMORY_AND_FREE ( socket_address , stream_socket ) ;
status = socket_create ( socket_address - > family , SOCKET_TYPE_STREAM , & stream_socket - > sock , 0 ) ;
NT_STATUS_NOT_OK_RETURN ( status ) ;
} else {
status = socket_create ( family , SOCKET_TYPE_STREAM , & stream_socket - > sock , 0 ) ;
NT_STATUS_NOT_OK_RETURN ( status ) ;
/* this is for non-IP sockets, eg. unix domain sockets */
socket_address = socket_address_from_strings ( stream_socket ,
stream_socket - > sock - > backend_name ,
2011-06-03 11:57:57 +04:00
sock_addr , port ? * port : 0 ) ;
2011-05-12 14:33:01 +04:00
NT_STATUS_HAVE_NO_MEMORY ( socket_address ) ;
}
2005-01-30 03:55:49 +03:00
talloc_steal ( stream_socket , stream_socket - > sock ) ;
2008-01-06 04:03:43 +03:00
stream_socket - > lp_ctx = talloc_reference ( stream_socket , lp_ctx ) ;
2005-01-30 03:55:49 +03:00
/* ready to listen */
2005-01-30 13:24:36 +03:00
status = socket_set_option ( stream_socket - > sock , " SO_KEEPALIVE " , NULL ) ;
2005-01-30 03:55:49 +03:00
NT_STATUS_NOT_OK_RETURN ( status ) ;
2007-12-06 18:54:34 +03:00
if ( socket_options ! = NULL ) {
status = socket_set_option ( stream_socket - > sock , socket_options , NULL ) ;
NT_STATUS_NOT_OK_RETURN ( status ) ;
}
2005-01-30 03:55:49 +03:00
2008-02-04 09:51:38 +03:00
/* TODO: set socket ACL's (host allow etc) here when they're
* implemented */
2005-01-30 03:55:49 +03:00
2008-02-04 09:51:38 +03:00
/* Some sockets don't have a port, or are just described from
* the string . We are indicating this by having port = = NULL */
2007-11-10 07:48:38 +03:00
if ( ! port ) {
status = socket_listen ( stream_socket - > sock , socket_address , SERVER_LISTEN_BACKLOG , 0 ) ;
} else if ( * port = = 0 ) {
2005-01-30 03:55:49 +03:00
for ( i = SERVER_TCP_LOW_PORT ; i < = SERVER_TCP_HIGH_PORT ; i + + ) {
2011-05-12 14:33:01 +04:00
socket_address - > port = i ;
2006-01-10 01:12:53 +03:00
status = socket_listen ( stream_socket - > sock , socket_address ,
2005-01-30 03:55:49 +03:00
SERVER_LISTEN_BACKLOG , 0 ) ;
if ( NT_STATUS_IS_OK ( status ) ) {
* port = i ;
break ;
}
}
} else {
2006-01-10 01:12:53 +03:00
status = socket_listen ( stream_socket - > sock , socket_address , SERVER_LISTEN_BACKLOG , 0 ) ;
2005-01-30 03:55:49 +03:00
}
if ( ! NT_STATUS_IS_OK ( status ) ) {
DEBUG ( 0 , ( " Failed to listen on %s:%u - %s \n " ,
2009-05-02 15:12:57 +04:00
sock_addr , port ? ( unsigned int ) ( * port ) : 0 ,
nt_errstr ( status ) ) ) ;
2005-01-30 03:55:49 +03:00
talloc_free ( stream_socket ) ;
return status ;
}
2008-02-04 09:51:38 +03:00
/* Add the FD from the newly created socket into the event
* subsystem . it will call the accept handler whenever we get
* new connections */
2009-01-03 14:47:24 +03:00
fde = tevent_add_fd ( event_context , stream_socket - > sock ,
socket_get_fd ( stream_socket - > sock ) ,
TEVENT_FD_READ ,
stream_accept_handler , stream_socket ) ;
if ( ! fde ) {
DEBUG ( 0 , ( " Failed to setup fd event \n " ) ) ;
talloc_free ( stream_socket ) ;
return NT_STATUS_NO_MEMORY ;
}
/* we let events system to the close on the socket. This avoids
* nasty interactions with waiting for talloc to close the socket . */
tevent_fd_set_close_fn ( fde , socket_tevent_fd_close_fn ) ;
socket_set_flags ( stream_socket - > sock , SOCKET_FLAG_NOCLOSE ) ;
2005-01-30 03:55:49 +03:00
2009-02-02 10:41:28 +03:00
stream_socket - > private_data = talloc_reference ( stream_socket , private_data ) ;
2005-01-30 03:55:49 +03:00
stream_socket - > ops = stream_ops ;
stream_socket - > event_ctx = event_context ;
stream_socket - > model_ops = model_ops ;
return NT_STATUS_OK ;
}
2006-03-09 20:48:41 +03:00
2011-05-12 14:33:01 +04:00
2006-03-09 20:48:41 +03:00
/*
setup a connection title
*/
void stream_connection_set_title ( struct stream_connection * conn , const char * title )
{
conn - > model_ops - > set_title ( conn - > event . ctx , title ) ;
}