2004-09-13 14:36:59 +04:00
/*
Unix SMB / CIFS implementation .
2005-06-19 11:21:18 +04:00
2004-09-13 14:36:59 +04:00
LDAP server
2005-06-19 11:21:18 +04:00
Copyright ( C ) Andrew Tridgell 2005
2004-09-13 14:36:59 +04:00
Copyright ( C ) Volker Lendecke 2004
Copyright ( C ) Stefan Metzmacher 2004
This program is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
2007-07-10 06:07:03 +04:00
the Free Software Foundation ; either version 3 of the License , or
2004-09-13 14:36:59 +04:00
( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
2007-07-10 06:07:03 +04:00
along with this program . If not , see < http : //www.gnu.org/licenses/>.
2004-09-13 14:36:59 +04:00
*/
# include "includes.h"
2005-02-03 14:56:03 +03:00
# include "lib/events/events.h"
2004-11-02 05:57:18 +03:00
# include "auth/auth.h"
2006-11-07 03:48:36 +03:00
# include "auth/credentials/credentials.h"
# include "librpc/gen_ndr/ndr_samr.h"
2006-08-30 15:29:34 +04:00
# include "lib/util/dlinklist.h"
2008-01-15 03:04:38 +03:00
# include "lib/util/asn1.h"
2004-11-02 09:52:59 +03:00
# include "ldap_server/ldap_server.h"
2005-06-19 11:21:18 +04:00
# include "smbd/service_task.h"
2005-01-30 03:54:57 +03:00
# include "smbd/service_stream.h"
2006-03-07 17:13:38 +03:00
# include "smbd/service.h"
2006-08-21 05:25:20 +04:00
# include "smbd/process_model.h"
2005-06-19 11:21:18 +04:00
# include "lib/tls/tls.h"
2005-07-10 05:08:10 +04:00
# include "lib/messaging/irpc.h"
2006-01-13 18:40:15 +03:00
# include "lib/ldb/include/ldb.h"
# include "lib/ldb/include/ldb_errors.h"
2006-03-07 14:07:23 +03:00
# include "system/network.h"
2006-08-17 17:37:04 +04:00
# include "lib/socket/netif.h"
2006-12-13 14:19:51 +03:00
# include "dsdb/samdb/samdb.h"
2007-09-08 16:42:09 +04:00
# include "param/param.h"
2004-09-13 14:36:59 +04:00
/*
close the socket and shutdown a server_context
*/
r17197: This patch moves the encryption of bulk data on SASL negotiated security
contexts from the application layer into the socket layer.
This improves a number of correctness aspects, as we now allow LDAP
packets to cross multiple SASL packets. It should also make it much
easier to write async LDAP tests from windows clients, as they use SASL
by default. It is also vital to allowing OpenLDAP clients to use GSSAPI
against Samba4, as it negotiates a rather small SASL buffer size.
This patch mirrors the earlier work done to move TLS into the socket
layer.
Unusual in this pstch is the extra read callback argument I take. As
SASL is a layer on top of a socket, it is entirely possible for the
SASL layer to drain a socket dry, but for the caller not to have read
all the decrypted data. This would leave the system without an event
to restart the read (as the socket is dry).
As such, I re-invoke the read handler from a timed callback, which
should trigger on the next running of the event loop. I believe that
the TLS code does require a similar callback.
In trying to understand why this is required, imagine a SASL-encrypted
LDAP packet in the following formation:
+-----------------+---------------------+
| SASL Packet #1 | SASL Packet #2 |
----------------------------------------+
| LDAP Packet #1 | LDAP Packet #2 |
----------------------------------------+
In the old code, this was illegal, but it is perfectly standard
SASL-encrypted LDAP. Without the callback, we would read and process
the first LDAP packet, and the SASL code would have read the second SASL
packet (to decrypt enough data for the LDAP packet), and no data would
remain on the socket.
Without data on the socket, read events stop. That is why I add timed
events, until the SASL buffer is drained.
Another approach would be to add a hack to the event system, to have it
pretend there remained data to read off the network (but that is ugly).
In improving the code, to handle more real-world cases, I've been able
to remove almost all the special-cases in the testnonblock code. The
only special case is that we must use a deterministic partial packet
when calling send, rather than a random length. (1 + n/2). This is
needed because of the way the SASL and TLS code works, and the 'resend
on failure' requirements.
Andrew Bartlett
(This used to be commit 5d7c9c12cb2b39673172a357092b80cd814850b0)
2006-07-23 06:50:08 +04:00
void ldapsrv_terminate_connection ( struct ldapsrv_connection * conn ,
2005-06-19 13:31:34 +04:00
const char * reason )
2004-09-13 14:36:59 +04:00
{
2005-12-08 13:23:56 +03:00
stream_terminate_connection ( conn - > connection , reason ) ;
2004-09-13 14:36:59 +04:00
}
2005-11-10 04:41:47 +03:00
/*
handle packet errors
*/
static void ldapsrv_error_handler ( void * private , NTSTATUS status )
{
struct ldapsrv_connection * conn = talloc_get_type ( private ,
struct ldapsrv_connection ) ;
ldapsrv_terminate_connection ( conn , nt_errstr ( status ) ) ;
}
2005-06-19 13:31:34 +04:00
/*
process a decoded ldap message
*/
static void ldapsrv_process_message ( struct ldapsrv_connection * conn ,
struct ldap_message * msg )
2004-09-13 14:36:59 +04:00
{
2005-06-19 13:31:34 +04:00
struct ldapsrv_call * call ;
2004-09-20 16:31:07 +04:00
NTSTATUS status ;
2005-06-19 13:31:34 +04:00
DATA_BLOB blob ;
2004-09-13 14:36:59 +04:00
2005-06-19 13:31:34 +04:00
call = talloc ( conn , struct ldapsrv_call ) ;
if ( ! call ) {
ldapsrv_terminate_connection ( conn , " no memory " ) ;
return ;
2004-10-28 08:00:43 +04:00
}
2005-06-19 13:31:34 +04:00
call - > request = talloc_steal ( call , msg ) ;
call - > conn = conn ;
call - > replies = NULL ;
r17197: This patch moves the encryption of bulk data on SASL negotiated security
contexts from the application layer into the socket layer.
This improves a number of correctness aspects, as we now allow LDAP
packets to cross multiple SASL packets. It should also make it much
easier to write async LDAP tests from windows clients, as they use SASL
by default. It is also vital to allowing OpenLDAP clients to use GSSAPI
against Samba4, as it negotiates a rather small SASL buffer size.
This patch mirrors the earlier work done to move TLS into the socket
layer.
Unusual in this pstch is the extra read callback argument I take. As
SASL is a layer on top of a socket, it is entirely possible for the
SASL layer to drain a socket dry, but for the caller not to have read
all the decrypted data. This would leave the system without an event
to restart the read (as the socket is dry).
As such, I re-invoke the read handler from a timed callback, which
should trigger on the next running of the event loop. I believe that
the TLS code does require a similar callback.
In trying to understand why this is required, imagine a SASL-encrypted
LDAP packet in the following formation:
+-----------------+---------------------+
| SASL Packet #1 | SASL Packet #2 |
----------------------------------------+
| LDAP Packet #1 | LDAP Packet #2 |
----------------------------------------+
In the old code, this was illegal, but it is perfectly standard
SASL-encrypted LDAP. Without the callback, we would read and process
the first LDAP packet, and the SASL code would have read the second SASL
packet (to decrypt enough data for the LDAP packet), and no data would
remain on the socket.
Without data on the socket, read events stop. That is why I add timed
events, until the SASL buffer is drained.
Another approach would be to add a hack to the event system, to have it
pretend there remained data to read off the network (but that is ugly).
In improving the code, to handle more real-world cases, I've been able
to remove almost all the special-cases in the testnonblock code. The
only special case is that we must use a deterministic partial packet
when calling send, rather than a random length. (1 + n/2). This is
needed because of the way the SASL and TLS code works, and the 'resend
on failure' requirements.
Andrew Bartlett
(This used to be commit 5d7c9c12cb2b39673172a357092b80cd814850b0)
2006-07-23 06:50:08 +04:00
call - > send_callback = NULL ;
call - > send_private = NULL ;
2005-06-19 13:31:34 +04:00
/* make the call */
status = ldapsrv_do_call ( call ) ;
2005-06-19 11:21:18 +04:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
r17197: This patch moves the encryption of bulk data on SASL negotiated security
contexts from the application layer into the socket layer.
This improves a number of correctness aspects, as we now allow LDAP
packets to cross multiple SASL packets. It should also make it much
easier to write async LDAP tests from windows clients, as they use SASL
by default. It is also vital to allowing OpenLDAP clients to use GSSAPI
against Samba4, as it negotiates a rather small SASL buffer size.
This patch mirrors the earlier work done to move TLS into the socket
layer.
Unusual in this pstch is the extra read callback argument I take. As
SASL is a layer on top of a socket, it is entirely possible for the
SASL layer to drain a socket dry, but for the caller not to have read
all the decrypted data. This would leave the system without an event
to restart the read (as the socket is dry).
As such, I re-invoke the read handler from a timed callback, which
should trigger on the next running of the event loop. I believe that
the TLS code does require a similar callback.
In trying to understand why this is required, imagine a SASL-encrypted
LDAP packet in the following formation:
+-----------------+---------------------+
| SASL Packet #1 | SASL Packet #2 |
----------------------------------------+
| LDAP Packet #1 | LDAP Packet #2 |
----------------------------------------+
In the old code, this was illegal, but it is perfectly standard
SASL-encrypted LDAP. Without the callback, we would read and process
the first LDAP packet, and the SASL code would have read the second SASL
packet (to decrypt enough data for the LDAP packet), and no data would
remain on the socket.
Without data on the socket, read events stop. That is why I add timed
events, until the SASL buffer is drained.
Another approach would be to add a hack to the event system, to have it
pretend there remained data to read off the network (but that is ugly).
In improving the code, to handle more real-world cases, I've been able
to remove almost all the special-cases in the testnonblock code. The
only special case is that we must use a deterministic partial packet
when calling send, rather than a random length. (1 + n/2). This is
needed because of the way the SASL and TLS code works, and the 'resend
on failure' requirements.
Andrew Bartlett
(This used to be commit 5d7c9c12cb2b39673172a357092b80cd814850b0)
2006-07-23 06:50:08 +04:00
talloc_free ( call ) ;
return ;
2004-10-08 16:19:08 +04:00
}
2005-06-19 13:31:34 +04:00
blob = data_blob ( NULL , 0 ) ;
2004-10-08 16:19:08 +04:00
2005-06-19 13:31:34 +04:00
if ( call - > replies = = NULL ) {
talloc_free ( call ) ;
return ;
2004-10-28 08:00:43 +04:00
}
2005-06-19 13:31:34 +04:00
/* build all the replies into a single blob */
while ( call - > replies ) {
DATA_BLOB b ;
2007-08-29 17:07:03 +04:00
bool ret ;
2004-10-08 16:19:08 +04:00
2005-06-19 13:31:34 +04:00
msg = call - > replies - > msg ;
2005-09-14 02:05:45 +04:00
if ( ! ldap_encode ( msg , & b , call ) ) {
2005-06-19 13:31:34 +04:00
DEBUG ( 0 , ( " Failed to encode ldap reply of type %d \n " , msg - > type ) ) ;
r17197: This patch moves the encryption of bulk data on SASL negotiated security
contexts from the application layer into the socket layer.
This improves a number of correctness aspects, as we now allow LDAP
packets to cross multiple SASL packets. It should also make it much
easier to write async LDAP tests from windows clients, as they use SASL
by default. It is also vital to allowing OpenLDAP clients to use GSSAPI
against Samba4, as it negotiates a rather small SASL buffer size.
This patch mirrors the earlier work done to move TLS into the socket
layer.
Unusual in this pstch is the extra read callback argument I take. As
SASL is a layer on top of a socket, it is entirely possible for the
SASL layer to drain a socket dry, but for the caller not to have read
all the decrypted data. This would leave the system without an event
to restart the read (as the socket is dry).
As such, I re-invoke the read handler from a timed callback, which
should trigger on the next running of the event loop. I believe that
the TLS code does require a similar callback.
In trying to understand why this is required, imagine a SASL-encrypted
LDAP packet in the following formation:
+-----------------+---------------------+
| SASL Packet #1 | SASL Packet #2 |
----------------------------------------+
| LDAP Packet #1 | LDAP Packet #2 |
----------------------------------------+
In the old code, this was illegal, but it is perfectly standard
SASL-encrypted LDAP. Without the callback, we would read and process
the first LDAP packet, and the SASL code would have read the second SASL
packet (to decrypt enough data for the LDAP packet), and no data would
remain on the socket.
Without data on the socket, read events stop. That is why I add timed
events, until the SASL buffer is drained.
Another approach would be to add a hack to the event system, to have it
pretend there remained data to read off the network (but that is ugly).
In improving the code, to handle more real-world cases, I've been able
to remove almost all the special-cases in the testnonblock code. The
only special case is that we must use a deterministic partial packet
when calling send, rather than a random length. (1 + n/2). This is
needed because of the way the SASL and TLS code works, and the 'resend
on failure' requirements.
Andrew Bartlett
(This used to be commit 5d7c9c12cb2b39673172a357092b80cd814850b0)
2006-07-23 06:50:08 +04:00
talloc_free ( call ) ;
return ;
2005-06-19 13:31:34 +04:00
}
2004-10-08 16:19:08 +04:00
2007-08-29 17:07:03 +04:00
ret = data_blob_append ( call , & blob , b . data , b . length ) ;
2005-09-14 02:05:45 +04:00
data_blob_free ( & b ) ;
r17197: This patch moves the encryption of bulk data on SASL negotiated security
contexts from the application layer into the socket layer.
This improves a number of correctness aspects, as we now allow LDAP
packets to cross multiple SASL packets. It should also make it much
easier to write async LDAP tests from windows clients, as they use SASL
by default. It is also vital to allowing OpenLDAP clients to use GSSAPI
against Samba4, as it negotiates a rather small SASL buffer size.
This patch mirrors the earlier work done to move TLS into the socket
layer.
Unusual in this pstch is the extra read callback argument I take. As
SASL is a layer on top of a socket, it is entirely possible for the
SASL layer to drain a socket dry, but for the caller not to have read
all the decrypted data. This would leave the system without an event
to restart the read (as the socket is dry).
As such, I re-invoke the read handler from a timed callback, which
should trigger on the next running of the event loop. I believe that
the TLS code does require a similar callback.
In trying to understand why this is required, imagine a SASL-encrypted
LDAP packet in the following formation:
+-----------------+---------------------+
| SASL Packet #1 | SASL Packet #2 |
----------------------------------------+
| LDAP Packet #1 | LDAP Packet #2 |
----------------------------------------+
In the old code, this was illegal, but it is perfectly standard
SASL-encrypted LDAP. Without the callback, we would read and process
the first LDAP packet, and the SASL code would have read the second SASL
packet (to decrypt enough data for the LDAP packet), and no data would
remain on the socket.
Without data on the socket, read events stop. That is why I add timed
events, until the SASL buffer is drained.
Another approach would be to add a hack to the event system, to have it
pretend there remained data to read off the network (but that is ugly).
In improving the code, to handle more real-world cases, I've been able
to remove almost all the special-cases in the testnonblock code. The
only special case is that we must use a deterministic partial packet
when calling send, rather than a random length. (1 + n/2). This is
needed because of the way the SASL and TLS code works, and the 'resend
on failure' requirements.
Andrew Bartlett
(This used to be commit 5d7c9c12cb2b39673172a357092b80cd814850b0)
2006-07-23 06:50:08 +04:00
talloc_set_name_const ( blob . data , " Outgoing, encoded LDAP packet " ) ;
2004-10-08 16:19:08 +04:00
2007-08-29 17:07:03 +04:00
if ( ! ret ) {
r17197: This patch moves the encryption of bulk data on SASL negotiated security
contexts from the application layer into the socket layer.
This improves a number of correctness aspects, as we now allow LDAP
packets to cross multiple SASL packets. It should also make it much
easier to write async LDAP tests from windows clients, as they use SASL
by default. It is also vital to allowing OpenLDAP clients to use GSSAPI
against Samba4, as it negotiates a rather small SASL buffer size.
This patch mirrors the earlier work done to move TLS into the socket
layer.
Unusual in this pstch is the extra read callback argument I take. As
SASL is a layer on top of a socket, it is entirely possible for the
SASL layer to drain a socket dry, but for the caller not to have read
all the decrypted data. This would leave the system without an event
to restart the read (as the socket is dry).
As such, I re-invoke the read handler from a timed callback, which
should trigger on the next running of the event loop. I believe that
the TLS code does require a similar callback.
In trying to understand why this is required, imagine a SASL-encrypted
LDAP packet in the following formation:
+-----------------+---------------------+
| SASL Packet #1 | SASL Packet #2 |
----------------------------------------+
| LDAP Packet #1 | LDAP Packet #2 |
----------------------------------------+
In the old code, this was illegal, but it is perfectly standard
SASL-encrypted LDAP. Without the callback, we would read and process
the first LDAP packet, and the SASL code would have read the second SASL
packet (to decrypt enough data for the LDAP packet), and no data would
remain on the socket.
Without data on the socket, read events stop. That is why I add timed
events, until the SASL buffer is drained.
Another approach would be to add a hack to the event system, to have it
pretend there remained data to read off the network (but that is ugly).
In improving the code, to handle more real-world cases, I've been able
to remove almost all the special-cases in the testnonblock code. The
only special case is that we must use a deterministic partial packet
when calling send, rather than a random length. (1 + n/2). This is
needed because of the way the SASL and TLS code works, and the 'resend
on failure' requirements.
Andrew Bartlett
(This used to be commit 5d7c9c12cb2b39673172a357092b80cd814850b0)
2006-07-23 06:50:08 +04:00
talloc_free ( call ) ;
return ;
2005-06-19 13:31:34 +04:00
}
r17197: This patch moves the encryption of bulk data on SASL negotiated security
contexts from the application layer into the socket layer.
This improves a number of correctness aspects, as we now allow LDAP
packets to cross multiple SASL packets. It should also make it much
easier to write async LDAP tests from windows clients, as they use SASL
by default. It is also vital to allowing OpenLDAP clients to use GSSAPI
against Samba4, as it negotiates a rather small SASL buffer size.
This patch mirrors the earlier work done to move TLS into the socket
layer.
Unusual in this pstch is the extra read callback argument I take. As
SASL is a layer on top of a socket, it is entirely possible for the
SASL layer to drain a socket dry, but for the caller not to have read
all the decrypted data. This would leave the system without an event
to restart the read (as the socket is dry).
As such, I re-invoke the read handler from a timed callback, which
should trigger on the next running of the event loop. I believe that
the TLS code does require a similar callback.
In trying to understand why this is required, imagine a SASL-encrypted
LDAP packet in the following formation:
+-----------------+---------------------+
| SASL Packet #1 | SASL Packet #2 |
----------------------------------------+
| LDAP Packet #1 | LDAP Packet #2 |
----------------------------------------+
In the old code, this was illegal, but it is perfectly standard
SASL-encrypted LDAP. Without the callback, we would read and process
the first LDAP packet, and the SASL code would have read the second SASL
packet (to decrypt enough data for the LDAP packet), and no data would
remain on the socket.
Without data on the socket, read events stop. That is why I add timed
events, until the SASL buffer is drained.
Another approach would be to add a hack to the event system, to have it
pretend there remained data to read off the network (but that is ugly).
In improving the code, to handle more real-world cases, I've been able
to remove almost all the special-cases in the testnonblock code. The
only special case is that we must use a deterministic partial packet
when calling send, rather than a random length. (1 + n/2). This is
needed because of the way the SASL and TLS code works, and the 'resend
on failure' requirements.
Andrew Bartlett
(This used to be commit 5d7c9c12cb2b39673172a357092b80cd814850b0)
2006-07-23 06:50:08 +04:00
DLIST_REMOVE ( call - > replies , call - > replies ) ;
2004-10-08 16:19:08 +04:00
}
r17197: This patch moves the encryption of bulk data on SASL negotiated security
contexts from the application layer into the socket layer.
This improves a number of correctness aspects, as we now allow LDAP
packets to cross multiple SASL packets. It should also make it much
easier to write async LDAP tests from windows clients, as they use SASL
by default. It is also vital to allowing OpenLDAP clients to use GSSAPI
against Samba4, as it negotiates a rather small SASL buffer size.
This patch mirrors the earlier work done to move TLS into the socket
layer.
Unusual in this pstch is the extra read callback argument I take. As
SASL is a layer on top of a socket, it is entirely possible for the
SASL layer to drain a socket dry, but for the caller not to have read
all the decrypted data. This would leave the system without an event
to restart the read (as the socket is dry).
As such, I re-invoke the read handler from a timed callback, which
should trigger on the next running of the event loop. I believe that
the TLS code does require a similar callback.
In trying to understand why this is required, imagine a SASL-encrypted
LDAP packet in the following formation:
+-----------------+---------------------+
| SASL Packet #1 | SASL Packet #2 |
----------------------------------------+
| LDAP Packet #1 | LDAP Packet #2 |
----------------------------------------+
In the old code, this was illegal, but it is perfectly standard
SASL-encrypted LDAP. Without the callback, we would read and process
the first LDAP packet, and the SASL code would have read the second SASL
packet (to decrypt enough data for the LDAP packet), and no data would
remain on the socket.
Without data on the socket, read events stop. That is why I add timed
events, until the SASL buffer is drained.
Another approach would be to add a hack to the event system, to have it
pretend there remained data to read off the network (but that is ugly).
In improving the code, to handle more real-world cases, I've been able
to remove almost all the special-cases in the testnonblock code. The
only special case is that we must use a deterministic partial packet
when calling send, rather than a random length. (1 + n/2). This is
needed because of the way the SASL and TLS code works, and the 'resend
on failure' requirements.
Andrew Bartlett
(This used to be commit 5d7c9c12cb2b39673172a357092b80cd814850b0)
2006-07-23 06:50:08 +04:00
packet_send_callback ( conn - > packet , blob ,
call - > send_callback , call - > send_private ) ;
2005-06-19 13:31:34 +04:00
talloc_free ( call ) ;
return ;
2004-10-08 16:19:08 +04:00
}
2005-06-19 13:31:34 +04:00
/*
r17197: This patch moves the encryption of bulk data on SASL negotiated security
contexts from the application layer into the socket layer.
This improves a number of correctness aspects, as we now allow LDAP
packets to cross multiple SASL packets. It should also make it much
easier to write async LDAP tests from windows clients, as they use SASL
by default. It is also vital to allowing OpenLDAP clients to use GSSAPI
against Samba4, as it negotiates a rather small SASL buffer size.
This patch mirrors the earlier work done to move TLS into the socket
layer.
Unusual in this pstch is the extra read callback argument I take. As
SASL is a layer on top of a socket, it is entirely possible for the
SASL layer to drain a socket dry, but for the caller not to have read
all the decrypted data. This would leave the system without an event
to restart the read (as the socket is dry).
As such, I re-invoke the read handler from a timed callback, which
should trigger on the next running of the event loop. I believe that
the TLS code does require a similar callback.
In trying to understand why this is required, imagine a SASL-encrypted
LDAP packet in the following formation:
+-----------------+---------------------+
| SASL Packet #1 | SASL Packet #2 |
----------------------------------------+
| LDAP Packet #1 | LDAP Packet #2 |
----------------------------------------+
In the old code, this was illegal, but it is perfectly standard
SASL-encrypted LDAP. Without the callback, we would read and process
the first LDAP packet, and the SASL code would have read the second SASL
packet (to decrypt enough data for the LDAP packet), and no data would
remain on the socket.
Without data on the socket, read events stop. That is why I add timed
events, until the SASL buffer is drained.
Another approach would be to add a hack to the event system, to have it
pretend there remained data to read off the network (but that is ugly).
In improving the code, to handle more real-world cases, I've been able
to remove almost all the special-cases in the testnonblock code. The
only special case is that we must use a deterministic partial packet
when calling send, rather than a random length. (1 + n/2). This is
needed because of the way the SASL and TLS code works, and the 'resend
on failure' requirements.
Andrew Bartlett
(This used to be commit 5d7c9c12cb2b39673172a357092b80cd814850b0)
2006-07-23 06:50:08 +04:00
decode / process data
2005-06-19 13:31:34 +04:00
*/
r17197: This patch moves the encryption of bulk data on SASL negotiated security
contexts from the application layer into the socket layer.
This improves a number of correctness aspects, as we now allow LDAP
packets to cross multiple SASL packets. It should also make it much
easier to write async LDAP tests from windows clients, as they use SASL
by default. It is also vital to allowing OpenLDAP clients to use GSSAPI
against Samba4, as it negotiates a rather small SASL buffer size.
This patch mirrors the earlier work done to move TLS into the socket
layer.
Unusual in this pstch is the extra read callback argument I take. As
SASL is a layer on top of a socket, it is entirely possible for the
SASL layer to drain a socket dry, but for the caller not to have read
all the decrypted data. This would leave the system without an event
to restart the read (as the socket is dry).
As such, I re-invoke the read handler from a timed callback, which
should trigger on the next running of the event loop. I believe that
the TLS code does require a similar callback.
In trying to understand why this is required, imagine a SASL-encrypted
LDAP packet in the following formation:
+-----------------+---------------------+
| SASL Packet #1 | SASL Packet #2 |
----------------------------------------+
| LDAP Packet #1 | LDAP Packet #2 |
----------------------------------------+
In the old code, this was illegal, but it is perfectly standard
SASL-encrypted LDAP. Without the callback, we would read and process
the first LDAP packet, and the SASL code would have read the second SASL
packet (to decrypt enough data for the LDAP packet), and no data would
remain on the socket.
Without data on the socket, read events stop. That is why I add timed
events, until the SASL buffer is drained.
Another approach would be to add a hack to the event system, to have it
pretend there remained data to read off the network (but that is ugly).
In improving the code, to handle more real-world cases, I've been able
to remove almost all the special-cases in the testnonblock code. The
only special case is that we must use a deterministic partial packet
when calling send, rather than a random length. (1 + n/2). This is
needed because of the way the SASL and TLS code works, and the 'resend
on failure' requirements.
Andrew Bartlett
(This used to be commit 5d7c9c12cb2b39673172a357092b80cd814850b0)
2006-07-23 06:50:08 +04:00
static NTSTATUS ldapsrv_decode ( void * private , DATA_BLOB blob )
2004-09-13 14:36:59 +04:00
{
2007-03-13 03:59:06 +03:00
NTSTATUS status ;
r17197: This patch moves the encryption of bulk data on SASL negotiated security
contexts from the application layer into the socket layer.
This improves a number of correctness aspects, as we now allow LDAP
packets to cross multiple SASL packets. It should also make it much
easier to write async LDAP tests from windows clients, as they use SASL
by default. It is also vital to allowing OpenLDAP clients to use GSSAPI
against Samba4, as it negotiates a rather small SASL buffer size.
This patch mirrors the earlier work done to move TLS into the socket
layer.
Unusual in this pstch is the extra read callback argument I take. As
SASL is a layer on top of a socket, it is entirely possible for the
SASL layer to drain a socket dry, but for the caller not to have read
all the decrypted data. This would leave the system without an event
to restart the read (as the socket is dry).
As such, I re-invoke the read handler from a timed callback, which
should trigger on the next running of the event loop. I believe that
the TLS code does require a similar callback.
In trying to understand why this is required, imagine a SASL-encrypted
LDAP packet in the following formation:
+-----------------+---------------------+
| SASL Packet #1 | SASL Packet #2 |
----------------------------------------+
| LDAP Packet #1 | LDAP Packet #2 |
----------------------------------------+
In the old code, this was illegal, but it is perfectly standard
SASL-encrypted LDAP. Without the callback, we would read and process
the first LDAP packet, and the SASL code would have read the second SASL
packet (to decrypt enough data for the LDAP packet), and no data would
remain on the socket.
Without data on the socket, read events stop. That is why I add timed
events, until the SASL buffer is drained.
Another approach would be to add a hack to the event system, to have it
pretend there remained data to read off the network (but that is ugly).
In improving the code, to handle more real-world cases, I've been able
to remove almost all the special-cases in the testnonblock code. The
only special case is that we must use a deterministic partial packet
when calling send, rather than a random length. (1 + n/2). This is
needed because of the way the SASL and TLS code works, and the 'resend
on failure' requirements.
Andrew Bartlett
(This used to be commit 5d7c9c12cb2b39673172a357092b80cd814850b0)
2006-07-23 06:50:08 +04:00
struct ldapsrv_connection * conn = talloc_get_type ( private ,
struct ldapsrv_connection ) ;
2007-05-21 10:12:06 +04:00
struct asn1_data * asn1 = asn1_init ( conn ) ;
2005-11-10 04:41:47 +03:00
struct ldap_message * msg = talloc ( conn , struct ldap_message ) ;
2004-10-10 02:00:00 +04:00
2007-05-21 16:47:18 +04:00
if ( asn1 = = NULL | | msg = = NULL ) {
2005-11-10 04:41:47 +03:00
return NT_STATUS_NO_MEMORY ;
2004-10-08 16:19:08 +04:00
}
2007-05-21 10:12:06 +04:00
if ( ! asn1_load ( asn1 , blob ) ) {
2007-05-21 16:47:18 +04:00
talloc_free ( msg ) ;
talloc_free ( asn1 ) ;
2005-11-10 04:41:47 +03:00
return NT_STATUS_NO_MEMORY ;
2004-10-08 16:19:08 +04:00
}
2007-05-21 10:12:06 +04:00
status = ldap_decode ( asn1 , msg ) ;
2007-03-13 03:59:06 +03:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
2007-05-21 10:12:06 +04:00
asn1_free ( asn1 ) ;
2007-03-13 03:59:06 +03:00
return status ;
2004-10-08 16:19:08 +04:00
}
2005-11-10 04:41:47 +03:00
data_blob_free ( & blob ) ;
ldapsrv_process_message ( conn , msg ) ;
2007-05-21 10:12:06 +04:00
asn1_free ( asn1 ) ;
2005-11-10 04:41:47 +03:00
return NT_STATUS_OK ;
2005-06-19 13:31:34 +04:00
}
2004-10-08 16:19:08 +04:00
2006-01-14 01:48:08 +03:00
/*
Idle timeout handler
*/
static void ldapsrv_conn_idle_timeout ( struct event_context * ev ,
struct timed_event * te ,
struct timeval t ,
void * private )
{
struct ldapsrv_connection * conn = talloc_get_type ( private , struct ldapsrv_connection ) ;
ldapsrv_terminate_connection ( conn , " Timeout. No requests after bind " ) ;
}
2004-09-13 14:36:59 +04:00
/*
called when a LDAP socket becomes readable
*/
r17197: This patch moves the encryption of bulk data on SASL negotiated security
contexts from the application layer into the socket layer.
This improves a number of correctness aspects, as we now allow LDAP
packets to cross multiple SASL packets. It should also make it much
easier to write async LDAP tests from windows clients, as they use SASL
by default. It is also vital to allowing OpenLDAP clients to use GSSAPI
against Samba4, as it negotiates a rather small SASL buffer size.
This patch mirrors the earlier work done to move TLS into the socket
layer.
Unusual in this pstch is the extra read callback argument I take. As
SASL is a layer on top of a socket, it is entirely possible for the
SASL layer to drain a socket dry, but for the caller not to have read
all the decrypted data. This would leave the system without an event
to restart the read (as the socket is dry).
As such, I re-invoke the read handler from a timed callback, which
should trigger on the next running of the event loop. I believe that
the TLS code does require a similar callback.
In trying to understand why this is required, imagine a SASL-encrypted
LDAP packet in the following formation:
+-----------------+---------------------+
| SASL Packet #1 | SASL Packet #2 |
----------------------------------------+
| LDAP Packet #1 | LDAP Packet #2 |
----------------------------------------+
In the old code, this was illegal, but it is perfectly standard
SASL-encrypted LDAP. Without the callback, we would read and process
the first LDAP packet, and the SASL code would have read the second SASL
packet (to decrypt enough data for the LDAP packet), and no data would
remain on the socket.
Without data on the socket, read events stop. That is why I add timed
events, until the SASL buffer is drained.
Another approach would be to add a hack to the event system, to have it
pretend there remained data to read off the network (but that is ugly).
In improving the code, to handle more real-world cases, I've been able
to remove almost all the special-cases in the testnonblock code. The
only special case is that we must use a deterministic partial packet
when calling send, rather than a random length. (1 + n/2). This is
needed because of the way the SASL and TLS code works, and the 'resend
on failure' requirements.
Andrew Bartlett
(This used to be commit 5d7c9c12cb2b39673172a357092b80cd814850b0)
2006-07-23 06:50:08 +04:00
void ldapsrv_recv ( struct stream_connection * c , uint16_t flags )
2004-09-13 14:36:59 +04:00
{
2005-06-19 13:31:34 +04:00
struct ldapsrv_connection * conn =
talloc_get_type ( c - > private , struct ldapsrv_connection ) ;
2006-01-13 03:38:35 +03:00
2006-01-14 01:48:08 +03:00
if ( conn - > limits . ite ) { /* clean initial timeout if any */
2006-01-13 03:38:35 +03:00
talloc_free ( conn - > limits . ite ) ;
conn - > limits . ite = NULL ;
}
2006-01-14 01:48:08 +03:00
if ( conn - > limits . te ) { /* clean idle timeout if any */
talloc_free ( conn - > limits . te ) ;
conn - > limits . te = NULL ;
}
2005-11-10 04:41:47 +03:00
packet_recv ( conn - > packet ) ;
2006-01-14 01:48:08 +03:00
/* set idle timeout */
conn - > limits . te = event_add_timed ( c - > event . ctx , conn ,
timeval_current_ofs ( conn - > limits . conn_idle_time , 0 ) ,
ldapsrv_conn_idle_timeout , conn ) ;
2004-09-13 14:36:59 +04:00
}
2005-11-10 04:41:47 +03:00
2004-09-13 14:36:59 +04:00
/*
called when a LDAP socket becomes writable
*/
2005-06-19 13:31:34 +04:00
static void ldapsrv_send ( struct stream_connection * c , uint16_t flags )
2004-09-13 14:36:59 +04:00
{
2005-06-19 13:31:34 +04:00
struct ldapsrv_connection * conn =
talloc_get_type ( c - > private , struct ldapsrv_connection ) ;
2005-11-10 04:41:47 +03:00
packet_queue_run ( conn - > packet ) ;
2004-09-13 14:36:59 +04:00
}
2006-01-13 03:38:35 +03:00
static void ldapsrv_conn_init_timeout ( struct event_context * ev ,
struct timed_event * te ,
struct timeval t ,
void * private )
{
struct ldapsrv_connection * conn = talloc_get_type ( private , struct ldapsrv_connection ) ;
ldapsrv_terminate_connection ( conn , " Timeout. No requests after initial connection " ) ;
}
2006-01-13 18:40:15 +03:00
static int ldapsrv_load_limits ( struct ldapsrv_connection * conn )
{
TALLOC_CTX * tmp_ctx ;
const char * attrs [ ] = { " configurationNamingContext " , NULL } ;
const char * attrs2 [ ] = { " lDAPAdminLimits " , NULL } ;
struct ldb_message_element * el ;
struct ldb_result * res = NULL ;
struct ldb_dn * basedn ;
struct ldb_dn * conf_dn ;
struct ldb_dn * policy_dn ;
int i , ret ;
/* set defaults limits in case of failure */
conn - > limits . initial_timeout = 120 ;
conn - > limits . conn_idle_time = 900 ;
conn - > limits . max_page_size = 1000 ;
conn - > limits . search_timeout = 120 ;
tmp_ctx = talloc_new ( conn ) ;
if ( tmp_ctx = = NULL ) {
return - 1 ;
}
2006-11-22 03:59:34 +03:00
basedn = ldb_dn_new ( tmp_ctx , conn - > ldb , NULL ) ;
if ( ! ldb_dn_validate ( basedn ) ) {
2006-01-13 18:40:15 +03:00
goto failed ;
}
ret = ldb_search ( conn - > ldb , basedn , LDB_SCOPE_BASE , NULL , attrs , & res ) ;
2006-12-13 14:19:51 +03:00
if ( ret ! = LDB_SUCCESS ) {
goto failed ;
}
2006-01-13 18:40:15 +03:00
talloc_steal ( tmp_ctx , res ) ;
2006-12-13 14:19:51 +03:00
if ( res - > count ! = 1 ) {
2006-01-13 18:40:15 +03:00
goto failed ;
}
2006-11-22 03:59:34 +03:00
conf_dn = ldb_msg_find_attr_as_dn ( conn - > ldb , tmp_ctx , res - > msgs [ 0 ] , " configurationNamingContext " ) ;
2006-01-13 18:40:15 +03:00
if ( conf_dn = = NULL ) {
goto failed ;
}
2006-11-22 03:59:34 +03:00
policy_dn = ldb_dn_copy ( tmp_ctx , conf_dn ) ;
ldb_dn_add_child_fmt ( policy_dn , " CN=Default Query Policy,CN=Query-Policies,CN=Directory Service,CN=Windows NT,CN=Services " ) ;
2006-01-13 18:40:15 +03:00
if ( policy_dn = = NULL ) {
goto failed ;
}
ret = ldb_search ( conn - > ldb , policy_dn , LDB_SCOPE_BASE , NULL , attrs2 , & res ) ;
2006-12-13 14:19:51 +03:00
if ( ret ! = LDB_SUCCESS ) {
goto failed ;
}
2006-01-13 18:40:15 +03:00
talloc_steal ( tmp_ctx , res ) ;
2006-12-13 14:19:51 +03:00
if ( res - > count ! = 1 ) {
2006-01-13 18:40:15 +03:00
goto failed ;
}
el = ldb_msg_find_element ( res - > msgs [ 0 ] , " lDAPAdminLimits " ) ;
if ( el = = NULL ) {
goto failed ;
}
for ( i = 0 ; i < el - > num_values ; i + + ) {
char policy_name [ 256 ] ;
int policy_value , s ;
2006-03-03 11:23:57 +03:00
s = sscanf ( ( const char * ) el - > values [ i ] . data , " %255[^=]=%d " , policy_name , & policy_value ) ;
2006-01-13 18:40:15 +03:00
if ( ret ! = 2 | | policy_value = = 0 )
continue ;
if ( strcasecmp ( " InitRecvTimeout " , policy_name ) = = 0 ) {
conn - > limits . initial_timeout = policy_value ;
continue ;
}
if ( strcasecmp ( " MaxConnIdleTime " , policy_name ) = = 0 ) {
conn - > limits . conn_idle_time = policy_value ;
continue ;
}
if ( strcasecmp ( " MaxPageSize " , policy_name ) = = 0 ) {
conn - > limits . max_page_size = policy_value ;
continue ;
}
if ( strcasecmp ( " MaxQueryDuration " , policy_name ) = = 0 ) {
conn - > limits . search_timeout = policy_value ;
continue ;
}
}
return 0 ;
failed :
DEBUG ( 0 , ( " Failed to load ldap server query policies \n " ) ) ;
talloc_free ( tmp_ctx ) ;
return - 1 ;
}
2004-09-13 14:36:59 +04:00
/*
initialise a server_context from a open socket and register a event handler
for reading from that socket
*/
2005-06-19 13:31:34 +04:00
static void ldapsrv_accept ( struct stream_connection * c )
2004-09-13 14:36:59 +04:00
{
2005-06-19 11:21:18 +04:00
struct ldapsrv_service * ldapsrv_service =
2005-06-19 13:31:34 +04:00
talloc_get_type ( c - > private , struct ldapsrv_service ) ;
struct ldapsrv_connection * conn ;
2006-01-03 03:10:15 +03:00
struct cli_credentials * server_credentials ;
2006-01-10 01:12:53 +03:00
struct socket_address * socket_address ;
2006-01-03 03:10:15 +03:00
NTSTATUS status ;
2005-06-19 15:10:15 +04:00
int port ;
2004-09-13 14:36:59 +04:00
2005-06-19 13:31:34 +04:00
conn = talloc_zero ( c , struct ldapsrv_connection ) ;
2005-09-08 15:26:05 +04:00
if ( ! conn ) {
stream_terminate_connection ( c , " ldapsrv_accept: out of memory " ) ;
return ;
}
2004-09-13 14:36:59 +04:00
2005-11-10 04:41:47 +03:00
conn - > packet = NULL ;
2005-06-19 13:31:34 +04:00
conn - > connection = c ;
2005-09-08 15:26:05 +04:00
conn - > service = ldapsrv_service ;
2006-07-25 23:20:04 +04:00
conn - > sockets . raw = c - > socket ;
2008-01-06 00:36:33 +03:00
conn - > lp_ctx = ldapsrv_service - > task - > lp_ctx ;
2006-01-03 03:10:15 +03:00
2005-06-19 13:31:34 +04:00
c - > private = conn ;
2005-06-19 11:21:18 +04:00
2006-01-10 01:12:53 +03:00
socket_address = socket_get_my_addr ( c - > socket , conn ) ;
if ( ! socket_address ) {
ldapsrv_terminate_connection ( conn , " ldapsrv_accept: failed to obtain local socket address! " ) ;
return ;
}
port = socket_address - > port ;
talloc_free ( socket_address ) ;
2005-06-19 15:10:15 +04:00
2006-05-03 00:15:47 +04:00
if ( port = = 636 ) {
2006-07-25 04:57:27 +04:00
struct socket_context * tls_socket = tls_init_server ( ldapsrv_service - > tls_params , c - > socket ,
c - > event . fde , NULL ) ;
if ( ! tls_socket ) {
2006-05-03 00:15:47 +04:00
ldapsrv_terminate_connection ( conn , " ldapsrv_accept: tls_init_server() failed " ) ;
return ;
}
2006-07-25 04:57:27 +04:00
talloc_unlink ( c , c - > socket ) ;
talloc_steal ( c , tls_socket ) ;
c - > socket = tls_socket ;
2006-07-25 23:20:04 +04:00
conn - > sockets . tls = tls_socket ;
2006-07-25 04:57:27 +04:00
2006-07-12 08:59:41 +04:00
} else if ( port = = 3268 ) /* Global catalog */ {
2007-10-07 01:42:58 +04:00
conn - > global_catalog = true ;
2005-09-08 15:26:05 +04:00
}
2005-11-10 04:41:47 +03:00
conn - > packet = packet_init ( conn ) ;
if ( conn - > packet = = NULL ) {
ldapsrv_terminate_connection ( conn , " out of memory " ) ;
2005-12-08 13:23:56 +03:00
return ;
2005-11-10 04:41:47 +03:00
}
2006-04-29 13:20:22 +04:00
2005-11-10 04:41:47 +03:00
packet_set_private ( conn - > packet , conn ) ;
2006-05-03 00:15:47 +04:00
packet_set_socket ( conn - > packet , c - > socket ) ;
2005-11-10 04:41:47 +03:00
packet_set_callback ( conn - > packet , ldapsrv_decode ) ;
r17197: This patch moves the encryption of bulk data on SASL negotiated security
contexts from the application layer into the socket layer.
This improves a number of correctness aspects, as we now allow LDAP
packets to cross multiple SASL packets. It should also make it much
easier to write async LDAP tests from windows clients, as they use SASL
by default. It is also vital to allowing OpenLDAP clients to use GSSAPI
against Samba4, as it negotiates a rather small SASL buffer size.
This patch mirrors the earlier work done to move TLS into the socket
layer.
Unusual in this pstch is the extra read callback argument I take. As
SASL is a layer on top of a socket, it is entirely possible for the
SASL layer to drain a socket dry, but for the caller not to have read
all the decrypted data. This would leave the system without an event
to restart the read (as the socket is dry).
As such, I re-invoke the read handler from a timed callback, which
should trigger on the next running of the event loop. I believe that
the TLS code does require a similar callback.
In trying to understand why this is required, imagine a SASL-encrypted
LDAP packet in the following formation:
+-----------------+---------------------+
| SASL Packet #1 | SASL Packet #2 |
----------------------------------------+
| LDAP Packet #1 | LDAP Packet #2 |
----------------------------------------+
In the old code, this was illegal, but it is perfectly standard
SASL-encrypted LDAP. Without the callback, we would read and process
the first LDAP packet, and the SASL code would have read the second SASL
packet (to decrypt enough data for the LDAP packet), and no data would
remain on the socket.
Without data on the socket, read events stop. That is why I add timed
events, until the SASL buffer is drained.
Another approach would be to add a hack to the event system, to have it
pretend there remained data to read off the network (but that is ugly).
In improving the code, to handle more real-world cases, I've been able
to remove almost all the special-cases in the testnonblock code. The
only special case is that we must use a deterministic partial packet
when calling send, rather than a random length. (1 + n/2). This is
needed because of the way the SASL and TLS code works, and the 'resend
on failure' requirements.
Andrew Bartlett
(This used to be commit 5d7c9c12cb2b39673172a357092b80cd814850b0)
2006-07-23 06:50:08 +04:00
packet_set_full_request ( conn - > packet , ldap_full_packet ) ;
2005-11-10 04:41:47 +03:00
packet_set_error_handler ( conn - > packet , ldapsrv_error_handler ) ;
packet_set_event_context ( conn - > packet , c - > event . ctx ) ;
2005-11-14 06:45:57 +03:00
packet_set_fde ( conn - > packet , c - > event . fde ) ;
packet_set_serialise ( conn - > packet ) ;
2006-04-29 13:20:22 +04:00
/* Ensure we don't get packets until the database is ready below */
packet_recv_disable ( conn - > packet ) ;
2007-12-02 19:56:09 +03:00
server_credentials = cli_credentials_init ( conn ) ;
2006-04-29 13:20:22 +04:00
if ( ! server_credentials ) {
stream_terminate_connection ( c , " Failed to init server credentials \n " ) ;
return ;
}
2007-12-03 23:25:06 +03:00
cli_credentials_set_conf ( server_credentials , conn - > lp_ctx ) ;
2007-12-14 00:46:17 +03:00
status = cli_credentials_set_machine_account ( server_credentials , conn - > lp_ctx ) ;
2006-04-29 13:20:22 +04:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
stream_terminate_connection ( c , talloc_asprintf ( conn , " Failed to obtain server credentials, perhaps a standalone server?: %s \n " , nt_errstr ( status ) ) ) ;
return ;
}
conn - > server_credentials = server_credentials ;
2005-11-10 04:41:47 +03:00
2005-10-07 15:31:45 +04:00
/* Connections start out anonymous */
2007-12-03 23:25:06 +03:00
if ( ! NT_STATUS_IS_OK ( auth_anonymous_session_info ( conn , conn - > lp_ctx , & conn - > session_info ) ) ) {
2005-10-07 15:31:45 +04:00
ldapsrv_terminate_connection ( conn , " failed to setup anonymous session info " ) ;
2005-12-08 13:23:56 +03:00
return ;
2005-10-07 15:31:45 +04:00
}
2006-01-13 03:38:35 +03:00
if ( ! NT_STATUS_IS_OK ( ldapsrv_backend_Init ( conn ) ) ) {
ldapsrv_terminate_connection ( conn , " backend Init failed " ) ;
2005-12-08 13:23:56 +03:00
return ;
2005-10-07 15:31:45 +04:00
}
2006-01-13 18:40:15 +03:00
/* load limits from the conf partition */
ldapsrv_load_limits ( conn ) ; /* should we fail on error ? */
2005-10-07 15:31:45 +04:00
2006-01-13 03:38:35 +03:00
/* register the server */
2005-07-10 05:08:10 +04:00
irpc_add_name ( c - > msg_ctx , " ldap_server " ) ;
2006-01-13 03:38:35 +03:00
/* set connections limits */
conn - > limits . ite = event_add_timed ( c - > event . ctx , conn ,
timeval_current_ofs ( conn - > limits . initial_timeout , 0 ) ,
ldapsrv_conn_init_timeout , conn ) ;
2006-04-29 13:20:22 +04:00
packet_recv_enable ( conn - > packet ) ;
2004-09-13 14:36:59 +04:00
}
2005-01-30 03:54:57 +03:00
static const struct stream_server_ops ldap_stream_ops = {
2005-01-14 04:32:56 +03:00
. name = " ldap " ,
. accept_connection = ldapsrv_accept ,
. recv_handler = ldapsrv_recv ,
. send_handler = ldapsrv_send ,
} ;
2005-01-30 03:54:57 +03:00
/*
add a socket address to the list of events , one event per port
*/
2005-06-19 13:31:34 +04:00
static NTSTATUS add_socket ( struct event_context * event_context ,
2007-12-02 19:09:52 +03:00
struct loadparm_context * lp_ctx ,
2005-06-19 13:31:34 +04:00
const struct model_ops * model_ops ,
2005-01-30 03:54:57 +03:00
const char * address , struct ldapsrv_service * ldap_service )
2004-09-13 14:36:59 +04:00
{
2005-01-30 03:54:57 +03:00
uint16_t port = 389 ;
NTSTATUS status ;
2006-12-13 14:19:51 +03:00
struct ldb_context * ldb ;
2005-01-30 03:54:57 +03:00
2008-01-06 04:03:43 +03:00
status = stream_setup_socket ( event_context , lp_ctx ,
model_ops , & ldap_stream_ops ,
2007-12-06 18:54:34 +03:00
" ipv4 " , address , & port ,
lp_socket_options ( lp_ctx ) ,
ldap_service ) ;
2005-06-19 11:21:18 +04:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
DEBUG ( 0 , ( " ldapsrv failed to bind to %s:%u - %s \n " ,
address , port , nt_errstr ( status ) ) ) ;
}
2005-01-30 03:54:57 +03:00
2005-06-19 13:31:34 +04:00
if ( tls_support ( ldap_service - > tls_params ) ) {
/* add ldaps server */
port = 636 ;
2008-01-06 04:03:43 +03:00
status = stream_setup_socket ( event_context , lp_ctx ,
model_ops , & ldap_stream_ops ,
2007-12-06 18:54:34 +03:00
" ipv4 " , address , & port ,
lp_socket_options ( lp_ctx ) ,
ldap_service ) ;
2005-06-19 13:31:34 +04:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
DEBUG ( 0 , ( " ldapsrv failed to bind to %s:%u - %s \n " ,
address , port , nt_errstr ( status ) ) ) ;
}
2005-06-19 11:21:18 +04:00
}
2005-06-19 13:31:34 +04:00
2006-12-13 14:19:51 +03:00
/* Load LDAP database */
2007-12-03 17:53:28 +03:00
ldb = samdb_connect ( ldap_service , lp_ctx , system_session ( ldap_service , lp_ctx ) ) ;
2006-12-13 14:19:51 +03:00
if ( ! ldb ) {
return NT_STATUS_INTERNAL_DB_CORRUPTION ;
}
2008-01-03 13:40:24 +03:00
if ( samdb_is_gc ( ldb ) ) {
2005-10-17 15:32:20 +04:00
port = 3268 ;
2008-01-06 04:03:43 +03:00
status = stream_setup_socket ( event_context , lp_ctx ,
model_ops , & ldap_stream_ops ,
2007-12-06 18:54:34 +03:00
" ipv4 " , address , & port ,
lp_socket_options ( lp_ctx ) ,
ldap_service ) ;
2005-10-17 15:32:20 +04:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
DEBUG ( 0 , ( " ldapsrv failed to bind to %s:%u - %s \n " ,
address , port , nt_errstr ( status ) ) ) ;
}
}
2005-06-19 11:21:18 +04:00
return status ;
2004-09-13 14:36:59 +04:00
}
2005-01-30 03:54:57 +03:00
/*
open the ldap server sockets
*/
2005-06-19 11:21:18 +04:00
static void ldapsrv_task_init ( struct task_server * task )
2005-01-30 03:54:57 +03:00
{
2007-11-10 07:31:26 +03:00
char * ldapi_path ;
2005-01-30 03:54:57 +03:00
struct ldapsrv_service * ldap_service ;
NTSTATUS status ;
2006-08-21 05:25:20 +04:00
const struct model_ops * model_ops ;
2004-09-13 14:36:59 +04:00
2007-12-02 19:56:09 +03:00
switch ( lp_server_role ( task - > lp_ctx ) ) {
2007-09-22 16:57:17 +04:00
case ROLE_STANDALONE :
task_server_terminate ( task , " ldap_server: no LDAP server required in standalone configuration " ) ;
return ;
case ROLE_DOMAIN_MEMBER :
task_server_terminate ( task , " ldap_server: no LDAP server required in member server configuration " ) ;
return ;
case ROLE_DOMAIN_CONTROLLER :
/* Yes, we want an LDAP server */
break ;
}
2006-03-09 20:48:41 +03:00
task_server_set_title ( task , " task[ldapsrv] " ) ;
2006-08-21 05:25:20 +04:00
/* run the ldap server as a single process */
model_ops = process_model_byname ( " single " ) ;
if ( ! model_ops ) goto failed ;
2005-06-19 11:21:18 +04:00
ldap_service = talloc_zero ( task , struct ldapsrv_service ) ;
if ( ldap_service = = NULL ) goto failed ;
2005-01-30 03:54:57 +03:00
2008-01-06 00:36:33 +03:00
ldap_service - > task = task ;
2007-12-03 02:28:22 +03:00
ldap_service - > tls_params = tls_initialise ( ldap_service , task - > lp_ctx ) ;
2005-06-19 11:21:18 +04:00
if ( ldap_service - > tls_params = = NULL ) goto failed ;
2005-01-30 03:54:57 +03:00
2007-12-02 19:09:52 +03:00
if ( lp_interfaces ( task - > lp_ctx ) & & lp_bind_interfaces_only ( task - > lp_ctx ) ) {
2007-12-12 00:23:14 +03:00
struct interface * ifaces ;
int num_interfaces ;
2005-01-30 03:54:57 +03:00
int i ;
2007-12-12 00:23:20 +03:00
load_interfaces ( task , lp_interfaces ( task - > lp_ctx ) , & ifaces ) ;
2007-12-12 00:23:14 +03:00
num_interfaces = iface_count ( ifaces ) ;
2005-01-30 03:54:57 +03:00
/* We have been given an interfaces line, and been
told to only bind to those interfaces . Create a
socket per interface and bind to only these .
*/
for ( i = 0 ; i < num_interfaces ; i + + ) {
2007-12-12 00:23:14 +03:00
const char * address = iface_n_ip ( ifaces , i ) ;
2007-12-02 19:09:52 +03:00
status = add_socket ( task - > event_ctx , task - > lp_ctx , model_ops , address , ldap_service ) ;
2005-06-19 11:21:18 +04:00
if ( ! NT_STATUS_IS_OK ( status ) ) goto failed ;
2005-01-30 03:54:57 +03:00
}
} else {
2007-12-02 19:09:52 +03:00
status = add_socket ( task - > event_ctx , task - > lp_ctx , model_ops ,
lp_socket_address ( task - > lp_ctx ) , ldap_service ) ;
2005-06-19 11:21:18 +04:00
if ( ! NT_STATUS_IS_OK ( status ) ) goto failed ;
2005-01-30 03:54:57 +03:00
}
2007-12-02 19:09:52 +03:00
ldapi_path = private_path ( ldap_service , task - > lp_ctx , " ldapi " ) ;
2007-11-10 07:31:26 +03:00
if ( ! ldapi_path ) {
goto failed ;
}
2008-01-06 04:03:43 +03:00
status = stream_setup_socket ( task - > event_ctx , task - > lp_ctx ,
model_ops , & ldap_stream_ops ,
2007-12-06 18:54:34 +03:00
" unix " , ldapi_path , NULL ,
lp_socket_options ( task - > lp_ctx ) ,
ldap_service ) ;
2007-11-10 07:31:26 +03:00
talloc_free ( ldapi_path ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
DEBUG ( 0 , ( " ldapsrv failed to bind to %s - %s \n " ,
ldapi_path , nt_errstr ( status ) ) ) ;
}
2005-06-19 11:21:18 +04:00
return ;
failed :
2005-06-26 03:53:14 +04:00
task_server_terminate ( task , " Failed to startup ldap server task " ) ;
2005-06-19 11:21:18 +04:00
}
/*
called on startup of the web server service It ' s job is to start
listening on all configured sockets
*/
static NTSTATUS ldapsrv_init ( struct event_context * event_context ,
2007-12-03 02:28:07 +03:00
struct loadparm_context * lp_ctx ,
2005-06-19 11:21:18 +04:00
const struct model_ops * model_ops )
{
2008-02-04 09:48:51 +03:00
return task_server_startup ( event_context , lp_ctx , " ldap " , model_ops ,
2008-01-06 04:03:43 +03:00
ldapsrv_task_init ) ;
2004-09-13 14:36:59 +04:00
}
2005-01-30 03:54:57 +03:00
2004-09-13 14:36:59 +04:00
NTSTATUS server_service_ldap_init ( void )
{
2005-01-30 03:54:57 +03:00
return register_server_service ( " ldap " , ldapsrv_init ) ;
2004-09-13 14:36:59 +04:00
}