1
0
mirror of https://github.com/samba-team/samba.git synced 2024-12-23 17:34:34 +03:00
samba-mirror/source4/ldap_server/ldap_server.c

1304 lines
33 KiB
C
Raw Normal View History

/*
Unix SMB/CIFS implementation.
LDAP server
Copyright (C) Andrew Tridgell 2005
Copyright (C) Volker Lendecke 2004
Copyright (C) Stefan Metzmacher 2004
2009-05-29 11:42:31 +04:00
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
2009-05-29 11:42:31 +04:00
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
2009-05-29 11:42:31 +04:00
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "includes.h"
#include "system/network.h"
#include "lib/events/events.h"
#include "auth/auth.h"
#include "auth/credentials/credentials.h"
#include "librpc/gen_ndr/ndr_samr.h"
#include "../lib/util/dlinklist.h"
#include "../lib/util/asn1.h"
#include "ldap_server/ldap_server.h"
#include "smbd/service_task.h"
r5102: This is a major simplification of the logic for controlling top level servers in smbd. The old code still contained a fairly bit of legacy from the time when smbd was only handling SMB connection. The new code gets rid of all of the smb_server specific code in smbd/, and creates a much simpler infrastructures for new server code. Major changes include: - simplified the process model code a lot. - got rid of the top level server and service structures completely. The top level context is now the event_context. This got rid of service.h and server.h completely (they were the most confusing parts of the old code) - added service_stream.[ch] for the helper functions that are specific to stream type services (services that handle streams, and use a logically separate process per connection) - got rid of the builtin idle_handler code in the service logic, as none of the servers were using it, and it can easily be handled by a server in future by adding its own timed_event to the event context. - fixed some major memory leaks in the rpc server code. - added registration of servers, rather than hard coding our list of possible servers. This allows for servers as modules in the future. - temporarily disabled the winbind code until I add the helper functions for that type of server - added error checking on service startup. If a configured server fails to startup then smbd doesn't startup. - cleaned up the command line handling in smbd, removing unused options (This used to be commit cf6a46c3cbde7b1eb1b86bd3882b953a2de3a42e)
2005-01-30 03:54:57 +03:00
#include "smbd/service_stream.h"
#include "smbd/service.h"
#include "smbd/process_model.h"
#include "lib/tls/tls.h"
#include "lib/messaging/irpc.h"
#include <ldb.h>
#include <ldb_errors.h>
#include "libcli/ldap/ldap_proto.h"
#include "system/network.h"
#include "lib/socket/netif.h"
#include "dsdb/samdb/samdb.h"
#include "param/param.h"
#include "../lib/tsocket/tsocket.h"
#include "../lib/util/tevent_ntstatus.h"
#include "../libcli/util/tstream.h"
#include "libds/common/roles.h"
static void ldapsrv_terminate_connection_done(struct tevent_req *subreq);
/*
close the socket and shutdown a server_context
*/
static void ldapsrv_terminate_connection(struct ldapsrv_connection *conn,
const char *reason)
{
struct tevent_req *subreq;
if (conn->limits.reason) {
return;
}
DLIST_REMOVE(conn->service->connections, conn);
conn->limits.endtime = timeval_current_ofs(0, 500);
tevent_queue_stop(conn->sockets.send_queue);
TALLOC_FREE(conn->sockets.read_req);
if (conn->active_call) {
tevent_req_cancel(conn->active_call);
conn->active_call = NULL;
}
conn->limits.reason = talloc_strdup(conn, reason);
if (conn->limits.reason == NULL) {
TALLOC_FREE(conn->sockets.tls);
TALLOC_FREE(conn->sockets.sasl);
TALLOC_FREE(conn->sockets.raw);
stream_terminate_connection(conn->connection, reason);
return;
}
subreq = tstream_disconnect_send(conn,
conn->connection->event.ctx,
conn->sockets.active);
if (subreq == NULL) {
TALLOC_FREE(conn->sockets.tls);
TALLOC_FREE(conn->sockets.sasl);
TALLOC_FREE(conn->sockets.raw);
stream_terminate_connection(conn->connection, reason);
return;
}
tevent_req_set_endtime(subreq,
conn->connection->event.ctx,
conn->limits.endtime);
tevent_req_set_callback(subreq, ldapsrv_terminate_connection_done, conn);
}
static void ldapsrv_terminate_connection_done(struct tevent_req *subreq)
{
struct ldapsrv_connection *conn =
tevent_req_callback_data(subreq,
struct ldapsrv_connection);
int sys_errno;
bool ok;
tstream_disconnect_recv(subreq, &sys_errno);
TALLOC_FREE(subreq);
if (conn->sockets.active == conn->sockets.raw) {
TALLOC_FREE(conn->sockets.tls);
TALLOC_FREE(conn->sockets.sasl);
TALLOC_FREE(conn->sockets.raw);
stream_terminate_connection(conn->connection,
conn->limits.reason);
return;
}
TALLOC_FREE(conn->sockets.tls);
TALLOC_FREE(conn->sockets.sasl);
conn->sockets.active = conn->sockets.raw;
subreq = tstream_disconnect_send(conn,
conn->connection->event.ctx,
conn->sockets.active);
if (subreq == NULL) {
TALLOC_FREE(conn->sockets.raw);
stream_terminate_connection(conn->connection,
conn->limits.reason);
return;
}
ok = tevent_req_set_endtime(subreq,
conn->connection->event.ctx,
conn->limits.endtime);
if (!ok) {
TALLOC_FREE(conn->sockets.raw);
stream_terminate_connection(conn->connection,
conn->limits.reason);
return;
}
tevent_req_set_callback(subreq, ldapsrv_terminate_connection_done, conn);
}
/*
called when a LDAP socket becomes readable
*/
r17197: This patch moves the encryption of bulk data on SASL negotiated security contexts from the application layer into the socket layer. This improves a number of correctness aspects, as we now allow LDAP packets to cross multiple SASL packets. It should also make it much easier to write async LDAP tests from windows clients, as they use SASL by default. It is also vital to allowing OpenLDAP clients to use GSSAPI against Samba4, as it negotiates a rather small SASL buffer size. This patch mirrors the earlier work done to move TLS into the socket layer. Unusual in this pstch is the extra read callback argument I take. As SASL is a layer on top of a socket, it is entirely possible for the SASL layer to drain a socket dry, but for the caller not to have read all the decrypted data. This would leave the system without an event to restart the read (as the socket is dry). As such, I re-invoke the read handler from a timed callback, which should trigger on the next running of the event loop. I believe that the TLS code does require a similar callback. In trying to understand why this is required, imagine a SASL-encrypted LDAP packet in the following formation: +-----------------+---------------------+ | SASL Packet #1 | SASL Packet #2 | ----------------------------------------+ | LDAP Packet #1 | LDAP Packet #2 | ----------------------------------------+ In the old code, this was illegal, but it is perfectly standard SASL-encrypted LDAP. Without the callback, we would read and process the first LDAP packet, and the SASL code would have read the second SASL packet (to decrypt enough data for the LDAP packet), and no data would remain on the socket. Without data on the socket, read events stop. That is why I add timed events, until the SASL buffer is drained. Another approach would be to add a hack to the event system, to have it pretend there remained data to read off the network (but that is ugly). In improving the code, to handle more real-world cases, I've been able to remove almost all the special-cases in the testnonblock code. The only special case is that we must use a deterministic partial packet when calling send, rather than a random length. (1 + n/2). This is needed because of the way the SASL and TLS code works, and the 'resend on failure' requirements. Andrew Bartlett (This used to be commit 5d7c9c12cb2b39673172a357092b80cd814850b0)
2006-07-23 06:50:08 +04:00
void ldapsrv_recv(struct stream_connection *c, uint16_t flags)
{
smb_panic(__location__);
}
/*
called when a LDAP socket becomes writable
*/
static void ldapsrv_send(struct stream_connection *c, uint16_t flags)
{
smb_panic(__location__);
}
static int ldapsrv_load_limits(struct ldapsrv_connection *conn)
{
TALLOC_CTX *tmp_ctx;
const char *attrs[] = { "configurationNamingContext", NULL };
const char *attrs2[] = { "lDAPAdminLimits", NULL };
struct ldb_message_element *el;
struct ldb_result *res = NULL;
struct ldb_dn *basedn;
struct ldb_dn *conf_dn;
struct ldb_dn *policy_dn;
unsigned int i;
int ret;
/* set defaults limits in case of failure */
conn->limits.initial_timeout = 120;
conn->limits.conn_idle_time = 900;
conn->limits.max_page_size = 1000;
conn->limits.max_notifications = 5;
conn->limits.search_timeout = 120;
tmp_ctx = talloc_new(conn);
if (tmp_ctx == NULL) {
return -1;
}
basedn = ldb_dn_new(tmp_ctx, conn->ldb, NULL);
if (basedn == NULL) {
goto failed;
}
ret = ldb_search(conn->ldb, tmp_ctx, &res, basedn, LDB_SCOPE_BASE, attrs, NULL);
if (ret != LDB_SUCCESS) {
goto failed;
}
if (res->count != 1) {
goto failed;
}
conf_dn = ldb_msg_find_attr_as_dn(conn->ldb, tmp_ctx, res->msgs[0], "configurationNamingContext");
if (conf_dn == NULL) {
goto failed;
}
policy_dn = ldb_dn_copy(tmp_ctx, conf_dn);
ldb_dn_add_child_fmt(policy_dn, "CN=Default Query Policy,CN=Query-Policies,CN=Directory Service,CN=Windows NT,CN=Services");
if (policy_dn == NULL) {
goto failed;
}
ret = ldb_search(conn->ldb, tmp_ctx, &res, policy_dn, LDB_SCOPE_BASE, attrs2, NULL);
if (ret != LDB_SUCCESS) {
goto failed;
}
if (res->count != 1) {
goto failed;
}
el = ldb_msg_find_element(res->msgs[0], "lDAPAdminLimits");
if (el == NULL) {
goto failed;
}
for (i = 0; i < el->num_values; i++) {
char policy_name[256];
int policy_value, s;
s = sscanf((const char *)el->values[i].data, "%255[^=]=%d", policy_name, &policy_value);
if (s != 2 || policy_value == 0)
continue;
if (strcasecmp("InitRecvTimeout", policy_name) == 0) {
conn->limits.initial_timeout = policy_value;
continue;
}
if (strcasecmp("MaxConnIdleTime", policy_name) == 0) {
conn->limits.conn_idle_time = policy_value;
continue;
}
if (strcasecmp("MaxPageSize", policy_name) == 0) {
conn->limits.max_page_size = policy_value;
continue;
}
if (strcasecmp("MaxNotificationPerConn", policy_name) == 0) {
conn->limits.max_notifications = policy_value;
continue;
}
if (strcasecmp("MaxQueryDuration", policy_name) == 0) {
conn->limits.search_timeout = policy_value;
continue;
}
}
return 0;
failed:
DEBUG(0, ("Failed to load ldap server query policies\n"));
talloc_free(tmp_ctx);
return -1;
}
static int ldapsrv_call_destructor(struct ldapsrv_call *call)
{
if (call->conn == NULL) {
return 0;
}
DLIST_REMOVE(call->conn->pending_calls, call);
call->conn = NULL;
return 0;
}
static struct tevent_req *ldapsrv_process_call_send(TALLOC_CTX *mem_ctx,
struct tevent_context *ev,
struct tevent_queue *call_queue,
struct ldapsrv_call *call);
static NTSTATUS ldapsrv_process_call_recv(struct tevent_req *req);
static bool ldapsrv_call_read_next(struct ldapsrv_connection *conn);
static void ldapsrv_accept_tls_done(struct tevent_req *subreq);
/*
initialise a server_context from a open socket and register a event handler
for reading from that socket
*/
static void ldapsrv_accept(struct stream_connection *c,
struct auth_session_info *session_info,
bool is_privileged)
{
struct ldapsrv_service *ldapsrv_service =
talloc_get_type(c->private_data, struct ldapsrv_service);
struct ldapsrv_connection *conn;
struct cli_credentials *server_credentials;
struct socket_address *socket_address;
NTSTATUS status;
int port;
int ret;
struct tevent_req *subreq;
struct timeval endtime;
char *errstring = NULL;
conn = talloc_zero(c, struct ldapsrv_connection);
if (!conn) {
stream_terminate_connection(c, "ldapsrv_accept: out of memory");
return;
}
conn->is_privileged = is_privileged;
conn->sockets.send_queue = tevent_queue_create(conn, "ldapsev send queue");
if (conn->sockets.send_queue == NULL) {
stream_terminate_connection(c,
"ldapsrv_accept: tevent_queue_create failed");
return;
}
TALLOC_FREE(c->event.fde);
ret = tstream_bsd_existing_socket(conn,
socket_get_fd(c->socket),
&conn->sockets.raw);
if (ret == -1) {
stream_terminate_connection(c,
"ldapsrv_accept: out of memory");
return;
}
socket_set_flags(c->socket, SOCKET_FLAG_NOCLOSE);
conn->connection = c;
conn->service = ldapsrv_service;
conn->lp_ctx = ldapsrv_service->task->lp_ctx;
c->private_data = conn;
socket_address = socket_get_my_addr(c->socket, conn);
if (!socket_address) {
ldapsrv_terminate_connection(conn, "ldapsrv_accept: failed to obtain local socket address!");
return;
}
port = socket_address->port;
talloc_free(socket_address);
if (port == 3268 || port == 3269) /* Global catalog */ {
conn->global_catalog = true;
}
server_credentials = cli_credentials_init(conn);
if (!server_credentials) {
stream_terminate_connection(c, "Failed to init server credentials\n");
return;
}
2009-05-29 11:42:31 +04:00
cli_credentials_set_conf(server_credentials, conn->lp_ctx);
status = cli_credentials_set_machine_account(server_credentials, conn->lp_ctx);
if (!NT_STATUS_IS_OK(status)) {
stream_terminate_connection(c, talloc_asprintf(conn, "Failed to obtain server credentials, perhaps a standalone server?: %s\n", nt_errstr(status)));
return;
}
conn->server_credentials = server_credentials;
conn->session_info = session_info;
conn->sockets.active = conn->sockets.raw;
if (conn->is_privileged) {
conn->require_strong_auth = LDAP_SERVER_REQUIRE_STRONG_AUTH_NO;
} else {
conn->require_strong_auth = lpcfg_ldap_server_require_strong_auth(conn->lp_ctx);
}
ret = ldapsrv_backend_Init(conn, &errstring);
if (ret != LDB_SUCCESS) {
char *reason = talloc_asprintf(conn,
"LDB backend for LDAP Init "
"failed: %s: %s",
errstring, ldb_strerror(ret));
ldapsrv_terminate_connection(conn, reason);
return;
}
/* load limits from the conf partition */
ldapsrv_load_limits(conn); /* should we fail on error ? */
/* register the server */
irpc_add_name(c->msg_ctx, "ldap_server");
DLIST_ADD_END(ldapsrv_service->connections, conn);
if (port != 636 && port != 3269) {
ldapsrv_call_read_next(conn);
return;
}
endtime = timeval_current_ofs(conn->limits.conn_idle_time, 0);
subreq = tstream_tls_accept_send(conn,
conn->connection->event.ctx,
conn->sockets.raw,
conn->service->tls_params);
if (subreq == NULL) {
ldapsrv_terminate_connection(conn, "ldapsrv_accept: "
"no memory for tstream_tls_accept_send");
return;
}
tevent_req_set_endtime(subreq,
conn->connection->event.ctx,
endtime);
tevent_req_set_callback(subreq, ldapsrv_accept_tls_done, conn);
}
static void ldapsrv_accept_tls_done(struct tevent_req *subreq)
{
struct ldapsrv_connection *conn =
tevent_req_callback_data(subreq,
struct ldapsrv_connection);
int ret;
int sys_errno;
ret = tstream_tls_accept_recv(subreq, &sys_errno,
conn, &conn->sockets.tls);
TALLOC_FREE(subreq);
if (ret == -1) {
const char *reason;
reason = talloc_asprintf(conn, "ldapsrv_accept_tls_loop: "
"tstream_tls_accept_recv() - %d:%s",
sys_errno, strerror(sys_errno));
if (!reason) {
reason = "ldapsrv_accept_tls_loop: "
"tstream_tls_accept_recv() - failed";
}
ldapsrv_terminate_connection(conn, reason);
return;
}
conn->sockets.active = conn->sockets.tls;
ldapsrv_call_read_next(conn);
}
static void ldapsrv_call_read_done(struct tevent_req *subreq);
static bool ldapsrv_call_read_next(struct ldapsrv_connection *conn)
{
struct tevent_req *subreq;
if (conn->pending_calls != NULL) {
conn->limits.endtime = timeval_zero();
ldapsrv_notification_retry_setup(conn->service, false);
} else if (timeval_is_zero(&conn->limits.endtime)) {
conn->limits.endtime =
timeval_current_ofs(conn->limits.initial_timeout, 0);
} else {
conn->limits.endtime =
timeval_current_ofs(conn->limits.conn_idle_time, 0);
}
if (conn->sockets.read_req != NULL) {
return true;
}
/*
* The minimum size of a LDAP pdu is 7 bytes
*
* dumpasn1 -hh ldap-unbind-min.dat
*
* <30 05 02 01 09 42 00>
* 0 5: SEQUENCE {
* <02 01 09>
* 2 1: INTEGER 9
* <42 00>
* 5 0: [APPLICATION 2]
* : Error: Object has zero length.
* : }
*
* dumpasn1 -hh ldap-unbind-windows.dat
*
* <30 84 00 00 00 05 02 01 09 42 00>
* 0 5: SEQUENCE {
* <02 01 09>
* 6 1: INTEGER 9
* <42 00>
* 9 0: [APPLICATION 2]
* : Error: Object has zero length.
* : }
*
* This means using an initial read size
* of 7 is ok.
*/
subreq = tstream_read_pdu_blob_send(conn,
conn->connection->event.ctx,
conn->sockets.active,
7, /* initial_read_size */
ldap_full_packet,
conn);
if (subreq == NULL) {
ldapsrv_terminate_connection(conn, "ldapsrv_call_read_next: "
"no memory for tstream_read_pdu_blob_send");
return false;
}
if (!timeval_is_zero(&conn->limits.endtime)) {
bool ok;
ok = tevent_req_set_endtime(subreq,
conn->connection->event.ctx,
conn->limits.endtime);
if (!ok) {
ldapsrv_terminate_connection(
conn,
"ldapsrv_call_read_next: "
"no memory for tevent_req_set_endtime");
return false;
}
}
tevent_req_set_callback(subreq, ldapsrv_call_read_done, conn);
conn->sockets.read_req = subreq;
return true;
}
static void ldapsrv_call_process_done(struct tevent_req *subreq);
static void ldapsrv_call_read_done(struct tevent_req *subreq)
{
struct ldapsrv_connection *conn =
tevent_req_callback_data(subreq,
struct ldapsrv_connection);
NTSTATUS status;
struct ldapsrv_call *call;
struct asn1_data *asn1;
DATA_BLOB blob;
conn->sockets.read_req = NULL;
call = talloc_zero(conn, struct ldapsrv_call);
if (!call) {
ldapsrv_terminate_connection(conn, "no memory");
return;
}
talloc_set_destructor(call, ldapsrv_call_destructor);
call->conn = conn;
status = tstream_read_pdu_blob_recv(subreq,
call,
&blob);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
const char *reason;
reason = talloc_asprintf(call, "ldapsrv_call_loop: "
"tstream_read_pdu_blob_recv() - %s",
nt_errstr(status));
if (!reason) {
reason = nt_errstr(status);
}
ldapsrv_terminate_connection(conn, reason);
return;
}
asn1 = asn1_init(call);
if (asn1 == NULL) {
ldapsrv_terminate_connection(conn, "no memory");
return;
}
call->request = talloc(call, struct ldap_message);
if (call->request == NULL) {
ldapsrv_terminate_connection(conn, "no memory");
return;
}
if (!asn1_load(asn1, blob)) {
ldapsrv_terminate_connection(conn, "asn1_load failed");
return;
}
status = ldap_decode(asn1, samba_ldap_control_handlers(),
call->request);
if (!NT_STATUS_IS_OK(status)) {
ldapsrv_terminate_connection(conn, nt_errstr(status));
return;
}
data_blob_free(&blob);
/* queue the call in the global queue */
subreq = ldapsrv_process_call_send(call,
conn->connection->event.ctx,
conn->service->call_queue,
call);
if (subreq == NULL) {
ldapsrv_terminate_connection(conn, "ldapsrv_process_call_send failed");
return;
}
tevent_req_set_callback(subreq, ldapsrv_call_process_done, call);
conn->active_call = subreq;
}
static void ldapsrv_call_wait_done(struct tevent_req *subreq);
static void ldapsrv_call_writev_start(struct ldapsrv_call *call);
static void ldapsrv_call_writev_done(struct tevent_req *subreq);
static void ldapsrv_call_process_done(struct tevent_req *subreq)
{
struct ldapsrv_call *call =
tevent_req_callback_data(subreq,
struct ldapsrv_call);
struct ldapsrv_connection *conn = call->conn;
NTSTATUS status;
conn->active_call = NULL;
status = ldapsrv_process_call_recv(subreq);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
ldapsrv_terminate_connection(conn, nt_errstr(status));
return;
}
if (call->wait_send != NULL) {
subreq = call->wait_send(call,
conn->connection->event.ctx,
call->wait_private);
if (subreq == NULL) {
ldapsrv_terminate_connection(conn,
"ldapsrv_call_process_done: "
"call->wait_send - no memory");
return;
}
tevent_req_set_callback(subreq,
ldapsrv_call_wait_done,
call);
conn->active_call = subreq;
return;
}
ldapsrv_call_writev_start(call);
}
static void ldapsrv_call_wait_done(struct tevent_req *subreq)
{
struct ldapsrv_call *call =
tevent_req_callback_data(subreq,
struct ldapsrv_call);
struct ldapsrv_connection *conn = call->conn;
NTSTATUS status;
conn->active_call = NULL;
status = call->wait_recv(subreq);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
const char *reason;
reason = talloc_asprintf(call, "ldapsrv_call_wait_done: "
"call->wait_recv() - %s",
nt_errstr(status));
if (reason == NULL) {
reason = nt_errstr(status);
}
ldapsrv_terminate_connection(conn, reason);
return;
}
ldapsrv_call_writev_start(call);
}
static void ldapsrv_call_writev_start(struct ldapsrv_call *call)
{
struct ldapsrv_connection *conn = call->conn;
DATA_BLOB blob = data_blob_null;
struct tevent_req *subreq = NULL;
/* build all the replies into a single blob */
while (call->replies) {
DATA_BLOB b;
bool ret;
if (!ldap_encode(call->replies->msg, samba_ldap_control_handlers(), &b, call)) {
DEBUG(0,("Failed to encode ldap reply of type %d\n",
call->replies->msg->type));
ldapsrv_terminate_connection(conn, "ldap_encode failed");
return;
}
ret = data_blob_append(call, &blob, b.data, b.length);
data_blob_free(&b);
if (!ret) {
ldapsrv_terminate_connection(conn, "data_blob_append failed");
return;
}
talloc_set_name_const(blob.data, "Outgoing, encoded LDAP packet");
DLIST_REMOVE(call->replies, call->replies);
}
if (blob.length == 0) {
if (!call->notification.busy) {
TALLOC_FREE(call);
}
ldapsrv_call_read_next(conn);
return;
}
call->out_iov.iov_base = blob.data;
call->out_iov.iov_len = blob.length;
subreq = tstream_writev_queue_send(call,
conn->connection->event.ctx,
conn->sockets.active,
conn->sockets.send_queue,
&call->out_iov, 1);
if (subreq == NULL) {
ldapsrv_terminate_connection(conn, "stream_writev_queue_send failed");
return;
}
tevent_req_set_callback(subreq, ldapsrv_call_writev_done, call);
}
static void ldapsrv_call_postprocess_done(struct tevent_req *subreq);
static void ldapsrv_call_writev_done(struct tevent_req *subreq)
{
struct ldapsrv_call *call =
tevent_req_callback_data(subreq,
struct ldapsrv_call);
struct ldapsrv_connection *conn = call->conn;
int sys_errno;
int rc;
rc = tstream_writev_queue_recv(subreq, &sys_errno);
TALLOC_FREE(subreq);
if (rc == -1) {
const char *reason;
reason = talloc_asprintf(call, "ldapsrv_call_writev_done: "
"tstream_writev_queue_recv() - %d:%s",
sys_errno, strerror(sys_errno));
if (reason == NULL) {
reason = "ldapsrv_call_writev_done: "
"tstream_writev_queue_recv() failed";
}
ldapsrv_terminate_connection(conn, reason);
return;
}
if (call->postprocess_send) {
subreq = call->postprocess_send(call,
conn->connection->event.ctx,
call->postprocess_private);
if (subreq == NULL) {
ldapsrv_terminate_connection(conn, "ldapsrv_call_writev_done: "
"call->postprocess_send - no memory");
return;
}
tevent_req_set_callback(subreq,
ldapsrv_call_postprocess_done,
call);
return;
}
if (!call->notification.busy) {
TALLOC_FREE(call);
}
ldapsrv_call_read_next(conn);
}
static void ldapsrv_call_postprocess_done(struct tevent_req *subreq)
{
struct ldapsrv_call *call =
tevent_req_callback_data(subreq,
struct ldapsrv_call);
struct ldapsrv_connection *conn = call->conn;
NTSTATUS status;
status = call->postprocess_recv(subreq);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
const char *reason;
reason = talloc_asprintf(call, "ldapsrv_call_postprocess_done: "
"call->postprocess_recv() - %s",
nt_errstr(status));
if (reason == NULL) {
reason = nt_errstr(status);
}
ldapsrv_terminate_connection(conn, reason);
return;
}
TALLOC_FREE(call);
ldapsrv_call_read_next(conn);
}
static void ldapsrv_notification_retry_done(struct tevent_req *subreq);
void ldapsrv_notification_retry_setup(struct ldapsrv_service *service, bool force)
{
struct ldapsrv_connection *conn = NULL;
struct timeval retry;
size_t num_pending = 0;
size_t num_active = 0;
if (force) {
TALLOC_FREE(service->notification.retry);
service->notification.generation += 1;
}
if (service->notification.retry != NULL) {
return;
}
for (conn = service->connections; conn != NULL; conn = conn->next) {
if (conn->pending_calls == NULL) {
continue;
}
num_pending += 1;
if (conn->pending_calls->notification.generation !=
service->notification.generation)
{
num_active += 1;
}
}
if (num_pending == 0) {
return;
}
if (num_active != 0) {
retry = timeval_current_ofs(0, 100);
} else {
retry = timeval_current_ofs(5, 0);
}
service->notification.retry = tevent_wakeup_send(service,
service->task->event_ctx,
retry);
if (service->notification.retry == NULL) {
/* retry later */
return;
}
tevent_req_set_callback(service->notification.retry,
ldapsrv_notification_retry_done,
service);
}
static void ldapsrv_notification_retry_done(struct tevent_req *subreq)
{
struct ldapsrv_service *service =
tevent_req_callback_data(subreq,
struct ldapsrv_service);
struct ldapsrv_connection *conn = NULL;
struct ldapsrv_connection *conn_next = NULL;
bool ok;
service->notification.retry = NULL;
ok = tevent_wakeup_recv(subreq);
TALLOC_FREE(subreq);
if (!ok) {
/* ignore */
}
for (conn = service->connections; conn != NULL; conn = conn_next) {
struct ldapsrv_call *call = conn->pending_calls;
conn_next = conn->next;
if (conn->pending_calls == NULL) {
continue;
}
if (conn->active_call != NULL) {
continue;
}
DLIST_DEMOTE(conn->pending_calls, call);
call->notification.generation =
service->notification.generation;
/* queue the call in the global queue */
subreq = ldapsrv_process_call_send(call,
conn->connection->event.ctx,
conn->service->call_queue,
call);
if (subreq == NULL) {
ldapsrv_terminate_connection(conn,
"ldapsrv_process_call_send failed");
continue;
}
tevent_req_set_callback(subreq, ldapsrv_call_process_done, call);
conn->active_call = subreq;
}
ldapsrv_notification_retry_setup(service, false);
}
struct ldapsrv_process_call_state {
struct ldapsrv_call *call;
};
static void ldapsrv_process_call_trigger(struct tevent_req *req,
void *private_data);
static struct tevent_req *ldapsrv_process_call_send(TALLOC_CTX *mem_ctx,
struct tevent_context *ev,
struct tevent_queue *call_queue,
struct ldapsrv_call *call)
{
struct tevent_req *req;
struct ldapsrv_process_call_state *state;
bool ok;
req = tevent_req_create(mem_ctx, &state,
struct ldapsrv_process_call_state);
if (req == NULL) {
return req;
}
state->call = call;
ok = tevent_queue_add(call_queue, ev, req,
ldapsrv_process_call_trigger, NULL);
if (!ok) {
tevent_req_oom(req);
return tevent_req_post(req, ev);
}
return req;
}
static void ldapsrv_process_call_trigger(struct tevent_req *req,
void *private_data)
{
struct ldapsrv_process_call_state *state =
tevent_req_data(req,
struct ldapsrv_process_call_state);
NTSTATUS status;
/* make the call */
status = ldapsrv_do_call(state->call);
if (!NT_STATUS_IS_OK(status)) {
tevent_req_nterror(req, status);
return;
}
tevent_req_done(req);
}
static NTSTATUS ldapsrv_process_call_recv(struct tevent_req *req)
{
NTSTATUS status;
if (tevent_req_is_nterror(req, &status)) {
tevent_req_received(req);
return status;
}
tevent_req_received(req);
return NT_STATUS_OK;
}
static void ldapsrv_accept_nonpriv(struct stream_connection *c)
{
struct ldapsrv_service *ldapsrv_service = talloc_get_type_abort(
c->private_data, struct ldapsrv_service);
struct auth_session_info *session_info;
NTSTATUS status;
status = auth_anonymous_session_info(
c, ldapsrv_service->task->lp_ctx, &session_info);
if (!NT_STATUS_IS_OK(status)) {
stream_terminate_connection(c, "failed to setup anonymous "
"session info");
return;
}
ldapsrv_accept(c, session_info, false);
}
static const struct stream_server_ops ldap_stream_nonpriv_ops = {
.name = "ldap",
.accept_connection = ldapsrv_accept_nonpriv,
.recv_handler = ldapsrv_recv,
.send_handler = ldapsrv_send,
};
/* The feature removed behind an #ifdef until we can do it properly
* with an EXTERNAL bind. */
#define WITH_LDAPI_PRIV_SOCKET
#ifdef WITH_LDAPI_PRIV_SOCKET
static void ldapsrv_accept_priv(struct stream_connection *c)
{
struct ldapsrv_service *ldapsrv_service = talloc_get_type_abort(
c->private_data, struct ldapsrv_service);
struct auth_session_info *session_info;
session_info = system_session(ldapsrv_service->task->lp_ctx);
if (!session_info) {
stream_terminate_connection(c, "failed to setup system "
"session info");
return;
}
ldapsrv_accept(c, session_info, true);
}
static const struct stream_server_ops ldap_stream_priv_ops = {
.name = "ldap",
.accept_connection = ldapsrv_accept_priv,
.recv_handler = ldapsrv_recv,
.send_handler = ldapsrv_send,
};
#endif
r5102: This is a major simplification of the logic for controlling top level servers in smbd. The old code still contained a fairly bit of legacy from the time when smbd was only handling SMB connection. The new code gets rid of all of the smb_server specific code in smbd/, and creates a much simpler infrastructures for new server code. Major changes include: - simplified the process model code a lot. - got rid of the top level server and service structures completely. The top level context is now the event_context. This got rid of service.h and server.h completely (they were the most confusing parts of the old code) - added service_stream.[ch] for the helper functions that are specific to stream type services (services that handle streams, and use a logically separate process per connection) - got rid of the builtin idle_handler code in the service logic, as none of the servers were using it, and it can easily be handled by a server in future by adding its own timed_event to the event context. - fixed some major memory leaks in the rpc server code. - added registration of servers, rather than hard coding our list of possible servers. This allows for servers as modules in the future. - temporarily disabled the winbind code until I add the helper functions for that type of server - added error checking on service startup. If a configured server fails to startup then smbd doesn't startup. - cleaned up the command line handling in smbd, removing unused options (This used to be commit cf6a46c3cbde7b1eb1b86bd3882b953a2de3a42e)
2005-01-30 03:54:57 +03:00
/*
add a socket address to the list of events, one event per port
*/
static NTSTATUS add_socket(struct task_server *task,
struct loadparm_context *lp_ctx,
const struct model_ops *model_ops,
r5102: This is a major simplification of the logic for controlling top level servers in smbd. The old code still contained a fairly bit of legacy from the time when smbd was only handling SMB connection. The new code gets rid of all of the smb_server specific code in smbd/, and creates a much simpler infrastructures for new server code. Major changes include: - simplified the process model code a lot. - got rid of the top level server and service structures completely. The top level context is now the event_context. This got rid of service.h and server.h completely (they were the most confusing parts of the old code) - added service_stream.[ch] for the helper functions that are specific to stream type services (services that handle streams, and use a logically separate process per connection) - got rid of the builtin idle_handler code in the service logic, as none of the servers were using it, and it can easily be handled by a server in future by adding its own timed_event to the event context. - fixed some major memory leaks in the rpc server code. - added registration of servers, rather than hard coding our list of possible servers. This allows for servers as modules in the future. - temporarily disabled the winbind code until I add the helper functions for that type of server - added error checking on service startup. If a configured server fails to startup then smbd doesn't startup. - cleaned up the command line handling in smbd, removing unused options (This used to be commit cf6a46c3cbde7b1eb1b86bd3882b953a2de3a42e)
2005-01-30 03:54:57 +03:00
const char *address, struct ldapsrv_service *ldap_service)
{
r5102: This is a major simplification of the logic for controlling top level servers in smbd. The old code still contained a fairly bit of legacy from the time when smbd was only handling SMB connection. The new code gets rid of all of the smb_server specific code in smbd/, and creates a much simpler infrastructures for new server code. Major changes include: - simplified the process model code a lot. - got rid of the top level server and service structures completely. The top level context is now the event_context. This got rid of service.h and server.h completely (they were the most confusing parts of the old code) - added service_stream.[ch] for the helper functions that are specific to stream type services (services that handle streams, and use a logically separate process per connection) - got rid of the builtin idle_handler code in the service logic, as none of the servers were using it, and it can easily be handled by a server in future by adding its own timed_event to the event context. - fixed some major memory leaks in the rpc server code. - added registration of servers, rather than hard coding our list of possible servers. This allows for servers as modules in the future. - temporarily disabled the winbind code until I add the helper functions for that type of server - added error checking on service startup. If a configured server fails to startup then smbd doesn't startup. - cleaned up the command line handling in smbd, removing unused options (This used to be commit cf6a46c3cbde7b1eb1b86bd3882b953a2de3a42e)
2005-01-30 03:54:57 +03:00
uint16_t port = 389;
NTSTATUS status;
struct ldb_context *ldb;
r5102: This is a major simplification of the logic for controlling top level servers in smbd. The old code still contained a fairly bit of legacy from the time when smbd was only handling SMB connection. The new code gets rid of all of the smb_server specific code in smbd/, and creates a much simpler infrastructures for new server code. Major changes include: - simplified the process model code a lot. - got rid of the top level server and service structures completely. The top level context is now the event_context. This got rid of service.h and server.h completely (they were the most confusing parts of the old code) - added service_stream.[ch] for the helper functions that are specific to stream type services (services that handle streams, and use a logically separate process per connection) - got rid of the builtin idle_handler code in the service logic, as none of the servers were using it, and it can easily be handled by a server in future by adding its own timed_event to the event context. - fixed some major memory leaks in the rpc server code. - added registration of servers, rather than hard coding our list of possible servers. This allows for servers as modules in the future. - temporarily disabled the winbind code until I add the helper functions for that type of server - added error checking on service startup. If a configured server fails to startup then smbd doesn't startup. - cleaned up the command line handling in smbd, removing unused options (This used to be commit cf6a46c3cbde7b1eb1b86bd3882b953a2de3a42e)
2005-01-30 03:54:57 +03:00
status = stream_setup_socket(task, task->event_ctx, lp_ctx,
model_ops, &ldap_stream_nonpriv_ops,
"ip", address, &port,
lpcfg_socket_options(lp_ctx),
ldap_service, task->process_context);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0,("ldapsrv failed to bind to %s:%u - %s\n",
address, port, nt_errstr(status)));
return status;
}
r5102: This is a major simplification of the logic for controlling top level servers in smbd. The old code still contained a fairly bit of legacy from the time when smbd was only handling SMB connection. The new code gets rid of all of the smb_server specific code in smbd/, and creates a much simpler infrastructures for new server code. Major changes include: - simplified the process model code a lot. - got rid of the top level server and service structures completely. The top level context is now the event_context. This got rid of service.h and server.h completely (they were the most confusing parts of the old code) - added service_stream.[ch] for the helper functions that are specific to stream type services (services that handle streams, and use a logically separate process per connection) - got rid of the builtin idle_handler code in the service logic, as none of the servers were using it, and it can easily be handled by a server in future by adding its own timed_event to the event context. - fixed some major memory leaks in the rpc server code. - added registration of servers, rather than hard coding our list of possible servers. This allows for servers as modules in the future. - temporarily disabled the winbind code until I add the helper functions for that type of server - added error checking on service startup. If a configured server fails to startup then smbd doesn't startup. - cleaned up the command line handling in smbd, removing unused options (This used to be commit cf6a46c3cbde7b1eb1b86bd3882b953a2de3a42e)
2005-01-30 03:54:57 +03:00
if (tstream_tls_params_enabled(ldap_service->tls_params)) {
/* add ldaps server */
port = 636;
status = stream_setup_socket(task, task->event_ctx, lp_ctx,
model_ops,
&ldap_stream_nonpriv_ops,
"ip", address, &port,
lpcfg_socket_options(lp_ctx),
ldap_service,
task->process_context);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0,("ldapsrv failed to bind to %s:%u - %s\n",
address, port, nt_errstr(status)));
return status;
}
}
/* Load LDAP database, but only to read our settings */
ldb = samdb_connect(ldap_service,
ldap_service->task->event_ctx,
lp_ctx,
system_session(lp_ctx),
NULL,
0);
if (!ldb) {
return NT_STATUS_INTERNAL_DB_CORRUPTION;
}
2009-05-29 11:42:31 +04:00
if (samdb_is_gc(ldb)) {
port = 3268;
status = stream_setup_socket(task, task->event_ctx, lp_ctx,
model_ops,
&ldap_stream_nonpriv_ops,
"ip", address, &port,
lpcfg_socket_options(lp_ctx),
ldap_service,
task->process_context);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0,("ldapsrv failed to bind to %s:%u - %s\n",
address, port, nt_errstr(status)));
return status;
}
if (tstream_tls_params_enabled(ldap_service->tls_params)) {
/* add ldaps server for the global catalog */
port = 3269;
status = stream_setup_socket(task, task->event_ctx, lp_ctx,
model_ops,
&ldap_stream_nonpriv_ops,
"ip", address, &port,
lpcfg_socket_options(lp_ctx),
ldap_service,
task->process_context);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0,("ldapsrv failed to bind to %s:%u - %s\n",
address, port, nt_errstr(status)));
return status;
}
}
}
/* And once we are bound, free the temporary ldb, it will
* connect again on each incoming LDAP connection */
talloc_unlink(ldap_service, ldb);
return NT_STATUS_OK;
}
r5102: This is a major simplification of the logic for controlling top level servers in smbd. The old code still contained a fairly bit of legacy from the time when smbd was only handling SMB connection. The new code gets rid of all of the smb_server specific code in smbd/, and creates a much simpler infrastructures for new server code. Major changes include: - simplified the process model code a lot. - got rid of the top level server and service structures completely. The top level context is now the event_context. This got rid of service.h and server.h completely (they were the most confusing parts of the old code) - added service_stream.[ch] for the helper functions that are specific to stream type services (services that handle streams, and use a logically separate process per connection) - got rid of the builtin idle_handler code in the service logic, as none of the servers were using it, and it can easily be handled by a server in future by adding its own timed_event to the event context. - fixed some major memory leaks in the rpc server code. - added registration of servers, rather than hard coding our list of possible servers. This allows for servers as modules in the future. - temporarily disabled the winbind code until I add the helper functions for that type of server - added error checking on service startup. If a configured server fails to startup then smbd doesn't startup. - cleaned up the command line handling in smbd, removing unused options (This used to be commit cf6a46c3cbde7b1eb1b86bd3882b953a2de3a42e)
2005-01-30 03:54:57 +03:00
/*
open the ldap server sockets
*/
static NTSTATUS ldapsrv_task_init(struct task_server *task)
r5102: This is a major simplification of the logic for controlling top level servers in smbd. The old code still contained a fairly bit of legacy from the time when smbd was only handling SMB connection. The new code gets rid of all of the smb_server specific code in smbd/, and creates a much simpler infrastructures for new server code. Major changes include: - simplified the process model code a lot. - got rid of the top level server and service structures completely. The top level context is now the event_context. This got rid of service.h and server.h completely (they were the most confusing parts of the old code) - added service_stream.[ch] for the helper functions that are specific to stream type services (services that handle streams, and use a logically separate process per connection) - got rid of the builtin idle_handler code in the service logic, as none of the servers were using it, and it can easily be handled by a server in future by adding its own timed_event to the event context. - fixed some major memory leaks in the rpc server code. - added registration of servers, rather than hard coding our list of possible servers. This allows for servers as modules in the future. - temporarily disabled the winbind code until I add the helper functions for that type of server - added error checking on service startup. If a configured server fails to startup then smbd doesn't startup. - cleaned up the command line handling in smbd, removing unused options (This used to be commit cf6a46c3cbde7b1eb1b86bd3882b953a2de3a42e)
2005-01-30 03:54:57 +03:00
{
char *ldapi_path;
#ifdef WITH_LDAPI_PRIV_SOCKET
char *priv_dir;
#endif
const char *dns_host_name;
r5102: This is a major simplification of the logic for controlling top level servers in smbd. The old code still contained a fairly bit of legacy from the time when smbd was only handling SMB connection. The new code gets rid of all of the smb_server specific code in smbd/, and creates a much simpler infrastructures for new server code. Major changes include: - simplified the process model code a lot. - got rid of the top level server and service structures completely. The top level context is now the event_context. This got rid of service.h and server.h completely (they were the most confusing parts of the old code) - added service_stream.[ch] for the helper functions that are specific to stream type services (services that handle streams, and use a logically separate process per connection) - got rid of the builtin idle_handler code in the service logic, as none of the servers were using it, and it can easily be handled by a server in future by adding its own timed_event to the event context. - fixed some major memory leaks in the rpc server code. - added registration of servers, rather than hard coding our list of possible servers. This allows for servers as modules in the future. - temporarily disabled the winbind code until I add the helper functions for that type of server - added error checking on service startup. If a configured server fails to startup then smbd doesn't startup. - cleaned up the command line handling in smbd, removing unused options (This used to be commit cf6a46c3cbde7b1eb1b86bd3882b953a2de3a42e)
2005-01-30 03:54:57 +03:00
struct ldapsrv_service *ldap_service;
NTSTATUS status;
switch (lpcfg_server_role(task->lp_ctx)) {
case ROLE_STANDALONE:
task_server_terminate(task, "ldap_server: no LDAP server required in standalone configuration",
false);
return NT_STATUS_INVALID_DOMAIN_ROLE;
case ROLE_DOMAIN_MEMBER:
task_server_terminate(task, "ldap_server: no LDAP server required in member server configuration",
false);
return NT_STATUS_INVALID_DOMAIN_ROLE;
case ROLE_ACTIVE_DIRECTORY_DC:
/* Yes, we want an LDAP server */
break;
}
task_server_set_title(task, "task[ldapsrv]");
ldap_service = talloc_zero(task, struct ldapsrv_service);
if (ldap_service == NULL) {
status = NT_STATUS_NO_MEMORY;
goto failed;
}
r5102: This is a major simplification of the logic for controlling top level servers in smbd. The old code still contained a fairly bit of legacy from the time when smbd was only handling SMB connection. The new code gets rid of all of the smb_server specific code in smbd/, and creates a much simpler infrastructures for new server code. Major changes include: - simplified the process model code a lot. - got rid of the top level server and service structures completely. The top level context is now the event_context. This got rid of service.h and server.h completely (they were the most confusing parts of the old code) - added service_stream.[ch] for the helper functions that are specific to stream type services (services that handle streams, and use a logically separate process per connection) - got rid of the builtin idle_handler code in the service logic, as none of the servers were using it, and it can easily be handled by a server in future by adding its own timed_event to the event context. - fixed some major memory leaks in the rpc server code. - added registration of servers, rather than hard coding our list of possible servers. This allows for servers as modules in the future. - temporarily disabled the winbind code until I add the helper functions for that type of server - added error checking on service startup. If a configured server fails to startup then smbd doesn't startup. - cleaned up the command line handling in smbd, removing unused options (This used to be commit cf6a46c3cbde7b1eb1b86bd3882b953a2de3a42e)
2005-01-30 03:54:57 +03:00
ldap_service->task = task;
dns_host_name = talloc_asprintf(ldap_service, "%s.%s",
lpcfg_netbios_name(task->lp_ctx),
lpcfg_dnsdomain(task->lp_ctx));
if (dns_host_name == NULL) {
status = NT_STATUS_NO_MEMORY;
goto failed;
}
status = tstream_tls_params_server(ldap_service,
dns_host_name,
lpcfg_tls_enabled(task->lp_ctx),
lpcfg_tls_keyfile(ldap_service, task->lp_ctx),
lpcfg_tls_certfile(ldap_service, task->lp_ctx),
lpcfg_tls_cafile(ldap_service, task->lp_ctx),
lpcfg_tls_crlfile(ldap_service, task->lp_ctx),
lpcfg_tls_dhpfile(ldap_service, task->lp_ctx),
lpcfg_tls_priority(task->lp_ctx),
&ldap_service->tls_params);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0,("ldapsrv failed tstream_tls_params_server - %s\n",
nt_errstr(status)));
goto failed;
}
ldap_service->call_queue = tevent_queue_create(ldap_service, "ldapsrv_call_queue");
if (ldap_service->call_queue == NULL) {
status = NT_STATUS_NO_MEMORY;
goto failed;
}
r5102: This is a major simplification of the logic for controlling top level servers in smbd. The old code still contained a fairly bit of legacy from the time when smbd was only handling SMB connection. The new code gets rid of all of the smb_server specific code in smbd/, and creates a much simpler infrastructures for new server code. Major changes include: - simplified the process model code a lot. - got rid of the top level server and service structures completely. The top level context is now the event_context. This got rid of service.h and server.h completely (they were the most confusing parts of the old code) - added service_stream.[ch] for the helper functions that are specific to stream type services (services that handle streams, and use a logically separate process per connection) - got rid of the builtin idle_handler code in the service logic, as none of the servers were using it, and it can easily be handled by a server in future by adding its own timed_event to the event context. - fixed some major memory leaks in the rpc server code. - added registration of servers, rather than hard coding our list of possible servers. This allows for servers as modules in the future. - temporarily disabled the winbind code until I add the helper functions for that type of server - added error checking on service startup. If a configured server fails to startup then smbd doesn't startup. - cleaned up the command line handling in smbd, removing unused options (This used to be commit cf6a46c3cbde7b1eb1b86bd3882b953a2de3a42e)
2005-01-30 03:54:57 +03:00
if (lpcfg_interfaces(task->lp_ctx) && lpcfg_bind_interfaces_only(task->lp_ctx)) {
struct interface *ifaces;
int num_interfaces;
r5102: This is a major simplification of the logic for controlling top level servers in smbd. The old code still contained a fairly bit of legacy from the time when smbd was only handling SMB connection. The new code gets rid of all of the smb_server specific code in smbd/, and creates a much simpler infrastructures for new server code. Major changes include: - simplified the process model code a lot. - got rid of the top level server and service structures completely. The top level context is now the event_context. This got rid of service.h and server.h completely (they were the most confusing parts of the old code) - added service_stream.[ch] for the helper functions that are specific to stream type services (services that handle streams, and use a logically separate process per connection) - got rid of the builtin idle_handler code in the service logic, as none of the servers were using it, and it can easily be handled by a server in future by adding its own timed_event to the event context. - fixed some major memory leaks in the rpc server code. - added registration of servers, rather than hard coding our list of possible servers. This allows for servers as modules in the future. - temporarily disabled the winbind code until I add the helper functions for that type of server - added error checking on service startup. If a configured server fails to startup then smbd doesn't startup. - cleaned up the command line handling in smbd, removing unused options (This used to be commit cf6a46c3cbde7b1eb1b86bd3882b953a2de3a42e)
2005-01-30 03:54:57 +03:00
int i;
load_interface_list(task, task->lp_ctx, &ifaces);
num_interfaces = iface_list_count(ifaces);
r5102: This is a major simplification of the logic for controlling top level servers in smbd. The old code still contained a fairly bit of legacy from the time when smbd was only handling SMB connection. The new code gets rid of all of the smb_server specific code in smbd/, and creates a much simpler infrastructures for new server code. Major changes include: - simplified the process model code a lot. - got rid of the top level server and service structures completely. The top level context is now the event_context. This got rid of service.h and server.h completely (they were the most confusing parts of the old code) - added service_stream.[ch] for the helper functions that are specific to stream type services (services that handle streams, and use a logically separate process per connection) - got rid of the builtin idle_handler code in the service logic, as none of the servers were using it, and it can easily be handled by a server in future by adding its own timed_event to the event context. - fixed some major memory leaks in the rpc server code. - added registration of servers, rather than hard coding our list of possible servers. This allows for servers as modules in the future. - temporarily disabled the winbind code until I add the helper functions for that type of server - added error checking on service startup. If a configured server fails to startup then smbd doesn't startup. - cleaned up the command line handling in smbd, removing unused options (This used to be commit cf6a46c3cbde7b1eb1b86bd3882b953a2de3a42e)
2005-01-30 03:54:57 +03:00
/* We have been given an interfaces line, and been
told to only bind to those interfaces. Create a
socket per interface and bind to only these.
*/
for(i = 0; i < num_interfaces; i++) {
const char *address = iface_list_n_ip(ifaces, i);
status = add_socket(task, task->lp_ctx, task->model_ops,
address, ldap_service);
if (!NT_STATUS_IS_OK(status)) goto failed;
r5102: This is a major simplification of the logic for controlling top level servers in smbd. The old code still contained a fairly bit of legacy from the time when smbd was only handling SMB connection. The new code gets rid of all of the smb_server specific code in smbd/, and creates a much simpler infrastructures for new server code. Major changes include: - simplified the process model code a lot. - got rid of the top level server and service structures completely. The top level context is now the event_context. This got rid of service.h and server.h completely (they were the most confusing parts of the old code) - added service_stream.[ch] for the helper functions that are specific to stream type services (services that handle streams, and use a logically separate process per connection) - got rid of the builtin idle_handler code in the service logic, as none of the servers were using it, and it can easily be handled by a server in future by adding its own timed_event to the event context. - fixed some major memory leaks in the rpc server code. - added registration of servers, rather than hard coding our list of possible servers. This allows for servers as modules in the future. - temporarily disabled the winbind code until I add the helper functions for that type of server - added error checking on service startup. If a configured server fails to startup then smbd doesn't startup. - cleaned up the command line handling in smbd, removing unused options (This used to be commit cf6a46c3cbde7b1eb1b86bd3882b953a2de3a42e)
2005-01-30 03:54:57 +03:00
}
} else {
char **wcard;
size_t i;
size_t num_binds = 0;
wcard = iface_list_wildcard(task);
if (wcard == NULL) {
DEBUG(0,("No wildcard addresses available\n"));
status = NT_STATUS_UNSUCCESSFUL;
goto failed;
}
for (i=0; wcard[i]; i++) {
status = add_socket(task, task->lp_ctx, task->model_ops,
wcard[i], ldap_service);
if (NT_STATUS_IS_OK(status)) {
num_binds++;
}
}
talloc_free(wcard);
if (num_binds == 0) {
status = NT_STATUS_UNSUCCESSFUL;
goto failed;
}
r5102: This is a major simplification of the logic for controlling top level servers in smbd. The old code still contained a fairly bit of legacy from the time when smbd was only handling SMB connection. The new code gets rid of all of the smb_server specific code in smbd/, and creates a much simpler infrastructures for new server code. Major changes include: - simplified the process model code a lot. - got rid of the top level server and service structures completely. The top level context is now the event_context. This got rid of service.h and server.h completely (they were the most confusing parts of the old code) - added service_stream.[ch] for the helper functions that are specific to stream type services (services that handle streams, and use a logically separate process per connection) - got rid of the builtin idle_handler code in the service logic, as none of the servers were using it, and it can easily be handled by a server in future by adding its own timed_event to the event context. - fixed some major memory leaks in the rpc server code. - added registration of servers, rather than hard coding our list of possible servers. This allows for servers as modules in the future. - temporarily disabled the winbind code until I add the helper functions for that type of server - added error checking on service startup. If a configured server fails to startup then smbd doesn't startup. - cleaned up the command line handling in smbd, removing unused options (This used to be commit cf6a46c3cbde7b1eb1b86bd3882b953a2de3a42e)
2005-01-30 03:54:57 +03:00
}
ldapi_path = lpcfg_private_path(ldap_service, task->lp_ctx, "ldapi");
if (!ldapi_path) {
status = NT_STATUS_UNSUCCESSFUL;
goto failed;
}
status = stream_setup_socket(task, task->event_ctx, task->lp_ctx,
task->model_ops, &ldap_stream_nonpriv_ops,
"unix", ldapi_path, NULL,
lpcfg_socket_options(task->lp_ctx),
ldap_service, task->process_context);
talloc_free(ldapi_path);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0,("ldapsrv failed to bind to %s - %s\n",
ldapi_path, nt_errstr(status)));
}
#ifdef WITH_LDAPI_PRIV_SOCKET
priv_dir = lpcfg_private_path(ldap_service, task->lp_ctx, "ldap_priv");
if (priv_dir == NULL) {
status = NT_STATUS_UNSUCCESSFUL;
goto failed;
}
/*
* Make sure the directory for the privileged ldapi socket exists, and
* is of the correct permissions
*/
if (!directory_create_or_exist(priv_dir, 0750)) {
task_server_terminate(task, "Cannot create ldap "
"privileged ldapi directory", true);
return NT_STATUS_UNSUCCESSFUL;
}
ldapi_path = talloc_asprintf(ldap_service, "%s/ldapi", priv_dir);
talloc_free(priv_dir);
if (ldapi_path == NULL) {
status = NT_STATUS_NO_MEMORY;
goto failed;
}
status = stream_setup_socket(task, task->event_ctx, task->lp_ctx,
task->model_ops, &ldap_stream_priv_ops,
"unix", ldapi_path, NULL,
lpcfg_socket_options(task->lp_ctx),
ldap_service,
task->process_context);
talloc_free(ldapi_path);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0,("ldapsrv failed to bind to %s - %s\n",
ldapi_path, nt_errstr(status)));
}
#endif
/* register the server */
irpc_add_name(task->msg_ctx, "ldap_server");
return NT_STATUS_OK;
failed:
task_server_terminate(task, "Failed to startup ldap server task", true);
return status;
}
r5102: This is a major simplification of the logic for controlling top level servers in smbd. The old code still contained a fairly bit of legacy from the time when smbd was only handling SMB connection. The new code gets rid of all of the smb_server specific code in smbd/, and creates a much simpler infrastructures for new server code. Major changes include: - simplified the process model code a lot. - got rid of the top level server and service structures completely. The top level context is now the event_context. This got rid of service.h and server.h completely (they were the most confusing parts of the old code) - added service_stream.[ch] for the helper functions that are specific to stream type services (services that handle streams, and use a logically separate process per connection) - got rid of the builtin idle_handler code in the service logic, as none of the servers were using it, and it can easily be handled by a server in future by adding its own timed_event to the event context. - fixed some major memory leaks in the rpc server code. - added registration of servers, rather than hard coding our list of possible servers. This allows for servers as modules in the future. - temporarily disabled the winbind code until I add the helper functions for that type of server - added error checking on service startup. If a configured server fails to startup then smbd doesn't startup. - cleaned up the command line handling in smbd, removing unused options (This used to be commit cf6a46c3cbde7b1eb1b86bd3882b953a2de3a42e)
2005-01-30 03:54:57 +03:00
NTSTATUS server_service_ldap_init(TALLOC_CTX *ctx)
{
static const struct service_details details = {
.inhibit_fork_on_accept = false,
.inhibit_pre_fork = false,
.task_init = ldapsrv_task_init,
.post_fork = NULL
};
return register_server_service(ctx, "ldap", &details);
}