1
0
mirror of https://github.com/samba-team/samba.git synced 2024-12-23 17:34:34 +03:00
samba-mirror/source3/rpc_client/cli_pipe.c

3416 lines
91 KiB
C
Raw Normal View History

/*
* Unix SMB/CIFS implementation.
* RPC Pipe client / server routines
* Largely rewritten by Jeremy Allison 2005.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "includes.h"
#include "librpc/gen_ndr/cli_epmapper.h"
#include "../librpc/gen_ndr/ndr_schannel.h"
#include "../librpc/gen_ndr/ndr_dssetup.h"
#include "../librpc/gen_ndr/ndr_netlogon.h"
#include "../libcli/auth/schannel.h"
2009-09-17 02:21:01 +04:00
#include "../libcli/auth/spnego.h"
#include "smb_krb5.h"
#include "../libcli/auth/ntlmssp.h"
#include "ntlmssp_wrap.h"
#include "rpc_client/cli_netlogon.h"
#include "librpc/gen_ndr/ndr_dcerpc.h"
#include "librpc/rpc/dcerpc.h"
#undef DBGC_CLASS
#define DBGC_CLASS DBGC_RPC_CLI
/********************************************************************
Pipe description for a DEBUG
********************************************************************/
static const char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx,
struct rpc_pipe_client *cli)
{
char *result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
if (result == NULL) {
return "pipe";
}
return result;
}
/********************************************************************
Rpc pipe call id.
********************************************************************/
static uint32 get_rpc_call_id(void)
{
static uint32 call_id = 0;
return ++call_id;
}
/*******************************************************************
Use SMBreadX to get rest of one fragment's worth of rpc data.
Reads the whole size or give an error message
********************************************************************/
2009-01-15 23:56:03 +03:00
struct rpc_read_state {
struct event_context *ev;
struct rpc_cli_transport *transport;
uint8_t *data;
2009-01-15 23:56:03 +03:00
size_t size;
size_t num_read;
};
static void rpc_read_done(struct tevent_req *subreq);
2009-01-15 23:56:03 +03:00
2009-03-23 23:37:27 +03:00
static struct tevent_req *rpc_read_send(TALLOC_CTX *mem_ctx,
struct event_context *ev,
struct rpc_cli_transport *transport,
uint8_t *data, size_t size)
{
struct tevent_req *req, *subreq;
2009-01-15 23:56:03 +03:00
struct rpc_read_state *state;
2009-03-23 23:37:27 +03:00
req = tevent_req_create(mem_ctx, &state, struct rpc_read_state);
if (req == NULL) {
2009-01-15 23:56:03 +03:00
return NULL;
}
state->ev = ev;
state->transport = transport;
2009-01-15 23:56:03 +03:00
state->data = data;
state->size = size;
state->num_read = 0;
DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
subreq = transport->read_send(state, ev, (uint8_t *)data, size,
transport->priv);
if (subreq == NULL) {
goto fail;
2009-01-15 23:56:03 +03:00
}
tevent_req_set_callback(subreq, rpc_read_done, req);
2009-03-23 23:37:27 +03:00
return req;
2009-01-15 23:56:03 +03:00
fail:
2009-03-23 23:37:27 +03:00
TALLOC_FREE(req);
2009-01-15 23:56:03 +03:00
return NULL;
}
static void rpc_read_done(struct tevent_req *subreq)
2009-01-15 23:56:03 +03:00
{
struct tevent_req *req = tevent_req_callback_data(
subreq, struct tevent_req);
2009-03-23 23:37:27 +03:00
struct rpc_read_state *state = tevent_req_data(
req, struct rpc_read_state);
2009-01-15 23:56:03 +03:00
NTSTATUS status;
ssize_t received;
status = state->transport->read_recv(subreq, &received);
TALLOC_FREE(subreq);
2009-01-15 23:56:03 +03:00
if (!NT_STATUS_IS_OK(status)) {
2009-03-23 23:37:27 +03:00
tevent_req_nterror(req, status);
2009-01-15 23:56:03 +03:00
return;
}
state->num_read += received;
if (state->num_read == state->size) {
2009-03-23 23:37:27 +03:00
tevent_req_done(req);
2009-01-15 23:56:03 +03:00
return;
}
subreq = state->transport->read_send(state, state->ev,
state->data + state->num_read,
state->size - state->num_read,
state->transport->priv);
2009-03-23 23:37:27 +03:00
if (tevent_req_nomem(subreq, req)) {
2009-01-15 23:56:03 +03:00
return;
}
tevent_req_set_callback(subreq, rpc_read_done, req);
2009-01-15 23:56:03 +03:00
}
2009-03-23 23:37:27 +03:00
static NTSTATUS rpc_read_recv(struct tevent_req *req)
2009-01-15 23:56:03 +03:00
{
2009-03-23 23:37:27 +03:00
return tevent_req_simple_recv_ntstatus(req);
2009-01-15 23:56:03 +03:00
}
2009-01-17 17:07:52 +03:00
struct rpc_write_state {
struct event_context *ev;
struct rpc_cli_transport *transport;
const uint8_t *data;
2009-01-17 17:07:52 +03:00
size_t size;
size_t num_written;
};
static void rpc_write_done(struct tevent_req *subreq);
2009-01-17 17:07:52 +03:00
2009-03-23 23:49:19 +03:00
static struct tevent_req *rpc_write_send(TALLOC_CTX *mem_ctx,
struct event_context *ev,
struct rpc_cli_transport *transport,
const uint8_t *data, size_t size)
2009-01-17 17:07:52 +03:00
{
struct tevent_req *req, *subreq;
2009-01-17 17:07:52 +03:00
struct rpc_write_state *state;
2009-03-23 23:49:19 +03:00
req = tevent_req_create(mem_ctx, &state, struct rpc_write_state);
if (req == NULL) {
2009-01-17 17:07:52 +03:00
return NULL;
}
state->ev = ev;
state->transport = transport;
2009-01-17 17:07:52 +03:00
state->data = data;
state->size = size;
state->num_written = 0;
DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
subreq = transport->write_send(state, ev, data, size, transport->priv);
if (subreq == NULL) {
goto fail;
2009-01-17 17:07:52 +03:00
}
tevent_req_set_callback(subreq, rpc_write_done, req);
2009-03-23 23:49:19 +03:00
return req;
2009-01-17 17:07:52 +03:00
fail:
2009-03-23 23:49:19 +03:00
TALLOC_FREE(req);
2009-01-17 17:07:52 +03:00
return NULL;
}
static void rpc_write_done(struct tevent_req *subreq)
2009-01-17 17:07:52 +03:00
{
struct tevent_req *req = tevent_req_callback_data(
subreq, struct tevent_req);
2009-03-23 23:49:19 +03:00
struct rpc_write_state *state = tevent_req_data(
req, struct rpc_write_state);
2009-01-17 17:07:52 +03:00
NTSTATUS status;
ssize_t written;
2009-01-17 17:07:52 +03:00
status = state->transport->write_recv(subreq, &written);
2009-01-17 17:07:52 +03:00
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
2009-03-23 23:49:19 +03:00
tevent_req_nterror(req, status);
2009-01-17 17:07:52 +03:00
return;
}
state->num_written += written;
if (state->num_written == state->size) {
2009-03-23 23:49:19 +03:00
tevent_req_done(req);
2009-01-17 17:07:52 +03:00
return;
}
subreq = state->transport->write_send(state, state->ev,
state->data + state->num_written,
state->size - state->num_written,
state->transport->priv);
2009-03-23 23:49:19 +03:00
if (tevent_req_nomem(subreq, req)) {
2009-01-17 17:07:52 +03:00
return;
}
tevent_req_set_callback(subreq, rpc_write_done, req);
2009-01-17 17:07:52 +03:00
}
2009-03-23 23:49:19 +03:00
static NTSTATUS rpc_write_recv(struct tevent_req *req)
2009-01-17 17:07:52 +03:00
{
2009-03-23 23:49:19 +03:00
return tevent_req_simple_recv_ntstatus(req);
2009-01-17 17:07:52 +03:00
}
/****************************************************************************
Try and get a PDU's worth of data from current_pdu. If not, then read more
from the wire.
****************************************************************************/
struct get_complete_frag_state {
struct event_context *ev;
struct rpc_pipe_client *cli;
uint16_t frag_len;
DATA_BLOB *pdu;
};
2009-03-23 23:37:27 +03:00
static void get_complete_frag_got_header(struct tevent_req *subreq);
static void get_complete_frag_got_rest(struct tevent_req *subreq);
static struct tevent_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
struct event_context *ev,
struct rpc_pipe_client *cli,
DATA_BLOB *pdu)
{
struct tevent_req *req, *subreq;
struct get_complete_frag_state *state;
size_t received;
NTSTATUS status;
req = tevent_req_create(mem_ctx, &state,
struct get_complete_frag_state);
if (req == NULL) {
return NULL;
}
state->ev = ev;
state->cli = cli;
state->frag_len = RPC_HEADER_LEN;
state->pdu = pdu;
received = pdu->length;
if (received < RPC_HEADER_LEN) {
if (!data_blob_realloc(mem_ctx, pdu, RPC_HEADER_LEN)) {
status = NT_STATUS_NO_MEMORY;
goto post_status;
}
subreq = rpc_read_send(state, state->ev,
state->cli->transport,
pdu->data + received,
RPC_HEADER_LEN - received);
if (subreq == NULL) {
status = NT_STATUS_NO_MEMORY;
goto post_status;
}
2009-03-23 23:37:27 +03:00
tevent_req_set_callback(subreq, get_complete_frag_got_header,
req);
return req;
}
state->frag_len = dcerpc_get_frag_length(pdu);
/*
* Ensure we have frag_len bytes of data.
*/
if (received < state->frag_len) {
if (!data_blob_realloc(NULL, pdu, state->frag_len)) {
status = NT_STATUS_NO_MEMORY;
goto post_status;
}
subreq = rpc_read_send(state, state->ev,
state->cli->transport,
pdu->data + received,
state->frag_len - received);
if (subreq == NULL) {
status = NT_STATUS_NO_MEMORY;
goto post_status;
}
2009-03-23 23:37:27 +03:00
tevent_req_set_callback(subreq, get_complete_frag_got_rest,
req);
return req;
}
status = NT_STATUS_OK;
post_status:
if (NT_STATUS_IS_OK(status)) {
tevent_req_done(req);
} else {
tevent_req_nterror(req, status);
}
return tevent_req_post(req, ev);
}
2009-03-23 23:37:27 +03:00
static void get_complete_frag_got_header(struct tevent_req *subreq)
{
struct tevent_req *req = tevent_req_callback_data(
subreq, struct tevent_req);
struct get_complete_frag_state *state = tevent_req_data(
req, struct get_complete_frag_state);
NTSTATUS status;
status = rpc_read_recv(subreq);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
tevent_req_nterror(req, status);
return;
}
state->frag_len = dcerpc_get_frag_length(state->pdu);
if (!data_blob_realloc(NULL, state->pdu, state->frag_len)) {
tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
return;
}
/*
* We're here in this piece of code because we've read exactly
* RPC_HEADER_LEN bytes into state->pdu.
*/
subreq = rpc_read_send(state, state->ev, state->cli->transport,
state->pdu->data + RPC_HEADER_LEN,
state->frag_len - RPC_HEADER_LEN);
if (tevent_req_nomem(subreq, req)) {
return;
}
2009-03-23 23:37:27 +03:00
tevent_req_set_callback(subreq, get_complete_frag_got_rest, req);
}
2009-03-23 23:37:27 +03:00
static void get_complete_frag_got_rest(struct tevent_req *subreq)
{
struct tevent_req *req = tevent_req_callback_data(
subreq, struct tevent_req);
NTSTATUS status;
status = rpc_read_recv(subreq);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
tevent_req_nterror(req, status);
return;
}
tevent_req_done(req);
}
static NTSTATUS get_complete_frag_recv(struct tevent_req *req)
{
return tevent_req_simple_recv_ntstatus(req);
}
/****************************************************************************
Do basic authentication checks on an incoming pdu.
****************************************************************************/
static NTSTATUS cli_pipe_validate_current_pdu(TALLOC_CTX *mem_ctx,
struct rpc_pipe_client *cli,
struct ncacn_packet *pkt,
DATA_BLOB *pdu,
uint8_t expected_pkt_type,
DATA_BLOB *rdata,
DATA_BLOB *reply_pdu)
{
NTSTATUS ret = NT_STATUS_OK;
size_t pad_len = 0;
ret = dcerpc_pull_ncacn_packet(cli, pdu, pkt, false);
if (!NT_STATUS_IS_OK(ret)) {
return ret;
}
if (pdu->length != pkt->frag_length) {
DEBUG(5, ("Incorrect pdu length %u, expected %u\n",
(unsigned int)pdu->length,
(unsigned int)pkt->frag_length));
return NT_STATUS_INVALID_PARAMETER;
}
/*
* Point the return values at the real data including the RPC
* header. Just in case the caller wants it.
*/
*rdata = *pdu;
/* Ensure we have the correct type. */
switch (pkt->ptype) {
case DCERPC_PKT_ALTER_RESP:
case DCERPC_PKT_BIND_ACK:
/* Alter context and bind ack share the same packet definitions. */
break;
case DCERPC_PKT_RESPONSE:
/* Here's where we deal with incoming sign/seal. */
ret = dcerpc_check_auth(cli->auth, pkt,
&pkt->u.response.stub_and_verifier,
DCERPC_RESPONSE_LENGTH,
pdu, &pad_len);
if (!NT_STATUS_IS_OK(ret)) {
return ret;
}
if (pdu->length < DCERPC_RESPONSE_LENGTH + pad_len) {
return NT_STATUS_BUFFER_TOO_SMALL;
}
/* Point the return values at the NDR data. */
rdata->data = pdu->data + DCERPC_RESPONSE_LENGTH;
if (pkt->auth_length) {
/* We've already done integer wrap tests in
* dcerpc_check_auth(). */
rdata->length = pdu->length
- DCERPC_RESPONSE_LENGTH
- pad_len
- DCERPC_AUTH_TRAILER_LENGTH
- pkt->auth_length;
} else {
rdata->length = pdu->length - DCERPC_RESPONSE_LENGTH;
}
DEBUG(10, ("Got pdu len %lu, data_len %lu, ss_len %u\n",
2010-07-17 19:20:03 +04:00
(long unsigned int)pdu->length,
(long unsigned int)rdata->length,
(unsigned int)pad_len));
/*
* If this is the first reply, and the allocation hint is
* reasonable, try and set up the reply_pdu DATA_BLOB to the
* correct size.
*/
if ((reply_pdu->length == 0) &&
pkt->u.response.alloc_hint &&
(pkt->u.response.alloc_hint < 15*1024*1024)) {
if (!data_blob_realloc(mem_ctx, reply_pdu,
pkt->u.response.alloc_hint)) {
DEBUG(0, ("reply alloc hint %d too "
"large to allocate\n",
(int)pkt->u.response.alloc_hint));
return NT_STATUS_NO_MEMORY;
}
}
break;
case DCERPC_PKT_BIND_NAK:
DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
"received from %s!\n",
rpccli_pipe_txt(talloc_tos(), cli)));
/* Use this for now... */
return NT_STATUS_NETWORK_ACCESS_DENIED;
case DCERPC_PKT_FAULT:
DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
"code %s received from %s!\n",
dcerpc_errstr(talloc_tos(),
pkt->u.fault.status),
rpccli_pipe_txt(talloc_tos(), cli)));
if (NT_STATUS_IS_OK(NT_STATUS(pkt->u.fault.status))) {
return NT_STATUS_UNSUCCESSFUL;
} else {
return NT_STATUS(pkt->u.fault.status);
}
Jeremy requested that I get my NTLMSSP patch into CVS. He didn't request the schannel code, but I've included that anyway. :-) This patch revives the client-side NTLMSSP support for RPC named pipes in Samba, and cleans up the client and server schannel code. The use of the new code is enabled by the 'sign', 'seal' and 'schannel' commands in rpcclient. The aim was to prove that our separate NTLMSSP client library actually implements NTLMSSP signing and sealing as per Microsoft's NTLMv1 implementation, in the hope that knowing this will assist us in correctly implementing NTLMSSP signing for SMB packets. (Still not yet functional) This patch replaces the NTLMSSP implementation in rpc_client/cli_pipe.c with calls to libsmb/ntlmssp.c. In the process, we have gained the ability to use the more secure NT password, and the ability to sign-only, instead of having to seal the pipe connection. (Previously we were limited to sealing, and could only use the LM-password derived key). Our new client-side NTLMSSP code also needed alteration to cope with our comparatively simple server-side implementation. A future step is to replace it with calls to the same NTLMSSP library. Also included in this patch is the schannel 'sign only' patch I submitted to the team earlier. While not enabled (and not functional, at this stage) the work in this patch makes the code paths *much* easier to follow. I have also included similar hooks in rpccleint to allow the use of schannel on *any* pipe. rpcclient now defaults to not using schannel (or any other extra per-pipe authenticiation) for any connection. The 'schannel' command enables schannel for all pipes until disabled. This code is also much more secure than the previous code, as changes to our cli_pipe routines ensure that the authentication footer cannot be removed by an attacker, and more error states are correctly handled. (The same needs to be done to our server) Andrew Bartlett (This used to be commit 5472ddc9eaf4e79c5b2e1c8ee8c7f190dc285f19)
2003-07-14 12:46:32 +04:00
default:
DEBUG(0, ("Unknown packet type %u received from %s!\n",
(unsigned int)pkt->ptype,
rpccli_pipe_txt(talloc_tos(), cli)));
return NT_STATUS_INVALID_INFO_CLASS;
}
Jeremy requested that I get my NTLMSSP patch into CVS. He didn't request the schannel code, but I've included that anyway. :-) This patch revives the client-side NTLMSSP support for RPC named pipes in Samba, and cleans up the client and server schannel code. The use of the new code is enabled by the 'sign', 'seal' and 'schannel' commands in rpcclient. The aim was to prove that our separate NTLMSSP client library actually implements NTLMSSP signing and sealing as per Microsoft's NTLMv1 implementation, in the hope that knowing this will assist us in correctly implementing NTLMSSP signing for SMB packets. (Still not yet functional) This patch replaces the NTLMSSP implementation in rpc_client/cli_pipe.c with calls to libsmb/ntlmssp.c. In the process, we have gained the ability to use the more secure NT password, and the ability to sign-only, instead of having to seal the pipe connection. (Previously we were limited to sealing, and could only use the LM-password derived key). Our new client-side NTLMSSP code also needed alteration to cope with our comparatively simple server-side implementation. A future step is to replace it with calls to the same NTLMSSP library. Also included in this patch is the schannel 'sign only' patch I submitted to the team earlier. While not enabled (and not functional, at this stage) the work in this patch makes the code paths *much* easier to follow. I have also included similar hooks in rpccleint to allow the use of schannel on *any* pipe. rpcclient now defaults to not using schannel (or any other extra per-pipe authenticiation) for any connection. The 'schannel' command enables schannel for all pipes until disabled. This code is also much more secure than the previous code, as changes to our cli_pipe routines ensure that the authentication footer cannot be removed by an attacker, and more error states are correctly handled. (The same needs to be done to our server) Andrew Bartlett (This used to be commit 5472ddc9eaf4e79c5b2e1c8ee8c7f190dc285f19)
2003-07-14 12:46:32 +04:00
if (pkt->ptype != expected_pkt_type) {
DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
"got an unexpected RPC packet type - %u, not %u\n",
rpccli_pipe_txt(talloc_tos(), cli),
pkt->ptype,
expected_pkt_type));
return NT_STATUS_INVALID_INFO_CLASS;
}
/* Do this just before return - we don't want to modify any rpc header
data before now as we may have needed to do cryptographic actions on
it before. */
if ((pkt->ptype == DCERPC_PKT_BIND_ACK) &&
!(pkt->pfc_flags & DCERPC_PFC_FLAG_LAST)) {
DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
"setting fragment first/last ON.\n"));
pkt->pfc_flags |= DCERPC_PFC_FLAG_FIRST |
DCERPC_PFC_FLAG_LAST;
}
return NT_STATUS_OK;
}
/****************************************************************************
Call a remote api on an arbitrary pipe. takes param, data and setup buffers.
****************************************************************************/
struct cli_api_pipe_state {
struct event_context *ev;
struct rpc_cli_transport *transport;
uint8_t *rdata;
uint32_t rdata_len;
};
static void cli_api_pipe_trans_done(struct tevent_req *subreq);
2009-03-23 23:49:19 +03:00
static void cli_api_pipe_write_done(struct tevent_req *subreq);
static void cli_api_pipe_read_done(struct tevent_req *subreq);
2009-03-24 00:13:44 +03:00
static struct tevent_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
struct event_context *ev,
struct rpc_cli_transport *transport,
uint8_t *data, size_t data_len,
uint32_t max_rdata_len)
{
struct tevent_req *req, *subreq;
struct cli_api_pipe_state *state;
NTSTATUS status;
2009-03-24 00:13:44 +03:00
req = tevent_req_create(mem_ctx, &state, struct cli_api_pipe_state);
if (req == NULL) {
return NULL;
}
state->ev = ev;
state->transport = transport;
if (max_rdata_len < RPC_HEADER_LEN) {
/*
* For a RPC reply we always need at least RPC_HEADER_LEN
* bytes. We check this here because we will receive
* RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
*/
status = NT_STATUS_INVALID_PARAMETER;
goto post_status;
}
if (transport->trans_send != NULL) {
subreq = transport->trans_send(state, ev, data, data_len,
max_rdata_len, transport->priv);
if (subreq == NULL) {
goto fail;
}
tevent_req_set_callback(subreq, cli_api_pipe_trans_done, req);
2009-03-24 00:13:44 +03:00
return req;
}
/*
* If the transport does not provide a "trans" routine, i.e. for
* example the ncacn_ip_tcp transport, do the write/read step here.
*/
subreq = rpc_write_send(state, ev, transport, data, data_len);
if (subreq == NULL) {
goto fail;
}
tevent_req_set_callback(subreq, cli_api_pipe_write_done, req);
2009-03-24 00:13:44 +03:00
return req;
post_status:
tevent_req_nterror(req, status);
2009-03-24 00:13:44 +03:00
return tevent_req_post(req, ev);
fail:
2009-03-24 00:13:44 +03:00
TALLOC_FREE(req);
return NULL;
}
static void cli_api_pipe_trans_done(struct tevent_req *subreq)
{
struct tevent_req *req = tevent_req_callback_data(
subreq, struct tevent_req);
2009-03-24 00:13:44 +03:00
struct cli_api_pipe_state *state = tevent_req_data(
req, struct cli_api_pipe_state);
NTSTATUS status;
status = state->transport->trans_recv(subreq, state, &state->rdata,
&state->rdata_len);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
2009-03-24 00:13:44 +03:00
tevent_req_nterror(req, status);
return;
}
2009-03-24 00:13:44 +03:00
tevent_req_done(req);
}
2009-03-23 23:49:19 +03:00
static void cli_api_pipe_write_done(struct tevent_req *subreq)
{
2009-03-24 00:13:44 +03:00
struct tevent_req *req = tevent_req_callback_data(
subreq, struct tevent_req);
struct cli_api_pipe_state *state = tevent_req_data(
req, struct cli_api_pipe_state);
NTSTATUS status;
status = rpc_write_recv(subreq);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
2009-03-24 00:13:44 +03:00
tevent_req_nterror(req, status);
return;
}
state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
2009-03-24 00:13:44 +03:00
if (tevent_req_nomem(state->rdata, req)) {
return;
}
/*
* We don't need to use rpc_read_send here, the upper layer will cope
* with a short read, transport->trans_send could also return less
* than state->max_rdata_len.
*/
subreq = state->transport->read_send(state, state->ev, state->rdata,
RPC_HEADER_LEN,
state->transport->priv);
if (tevent_req_nomem(subreq, req)) {
return;
}
tevent_req_set_callback(subreq, cli_api_pipe_read_done, req);
}
static void cli_api_pipe_read_done(struct tevent_req *subreq)
{
struct tevent_req *req = tevent_req_callback_data(
subreq, struct tevent_req);
2009-03-24 00:13:44 +03:00
struct cli_api_pipe_state *state = tevent_req_data(
req, struct cli_api_pipe_state);
NTSTATUS status;
ssize_t received;
status = state->transport->read_recv(subreq, &received);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
2009-03-24 00:13:44 +03:00
tevent_req_nterror(req, status);
return;
}
state->rdata_len = received;
2009-03-24 00:13:44 +03:00
tevent_req_done(req);
}
2009-03-24 00:13:44 +03:00
static NTSTATUS cli_api_pipe_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
uint8_t **prdata, uint32_t *prdata_len)
{
2009-03-24 00:13:44 +03:00
struct cli_api_pipe_state *state = tevent_req_data(
req, struct cli_api_pipe_state);
NTSTATUS status;
2009-03-24 00:13:44 +03:00
if (tevent_req_is_nterror(req, &status)) {
return status;
}
*prdata = talloc_move(mem_ctx, &state->rdata);
*prdata_len = state->rdata_len;
return NT_STATUS_OK;
}
/****************************************************************************
Send data on an rpc pipe via trans. The data must be the last
pdu fragment of an NDR data stream.
Receive response data from an rpc pipe, which may be large...
Read the first fragment: unfortunately have to use SMBtrans for the first
bit, then SMBreadX for subsequent bits.
If first fragment received also wasn't the last fragment, continue
getting fragments until we _do_ receive the last fragment.
Request/Response PDU's look like the following...
|<------------------PDU len----------------------------------------------->|
|<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
+------------+-----------------+-------------+---------------+-------------+
| RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR | AUTH DATA |
+------------+-----------------+-------------+---------------+-------------+
Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
signing & sealing being negotiated.
****************************************************************************/
2009-01-17 15:33:34 +03:00
struct rpc_api_pipe_state {
struct event_context *ev;
struct rpc_pipe_client *cli;
uint8_t expected_pkt_type;
DATA_BLOB incoming_frag;
struct ncacn_packet *pkt;
2009-01-17 15:33:34 +03:00
/* Incoming reply */
DATA_BLOB reply_pdu;
size_t reply_pdu_offset;
uint8_t endianess;
2009-01-17 15:33:34 +03:00
};
2009-03-24 00:13:44 +03:00
static void rpc_api_pipe_trans_done(struct tevent_req *subreq);
static void rpc_api_pipe_got_pdu(struct tevent_req *subreq);
2009-01-17 15:33:34 +03:00
2009-03-24 00:33:00 +03:00
static struct tevent_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
struct event_context *ev,
struct rpc_pipe_client *cli,
DATA_BLOB *data, /* Outgoing PDU */
2009-03-24 00:33:00 +03:00
uint8_t expected_pkt_type)
2009-01-17 15:33:34 +03:00
{
2009-03-24 00:33:00 +03:00
struct tevent_req *req, *subreq;
2009-01-17 15:33:34 +03:00
struct rpc_api_pipe_state *state;
uint16_t max_recv_frag;
2009-01-17 15:33:34 +03:00
NTSTATUS status;
2009-03-24 00:33:00 +03:00
req = tevent_req_create(mem_ctx, &state, struct rpc_api_pipe_state);
if (req == NULL) {
2009-01-17 15:33:34 +03:00
return NULL;
}
state->ev = ev;
state->cli = cli;
state->expected_pkt_type = expected_pkt_type;
state->incoming_frag = data_blob_null;
state->reply_pdu = data_blob_null;
state->reply_pdu_offset = 0;
state->endianess = DCERPC_DREP_LE;
2009-01-17 15:33:34 +03:00
/*
* Ensure we're not sending too much.
*/
if (data->length > cli->max_xmit_frag) {
2009-01-17 15:33:34 +03:00
status = NT_STATUS_INVALID_PARAMETER;
goto post_status;
}
DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(talloc_tos(), cli)));
2009-01-17 15:33:34 +03:00
/* get the header first, then fetch the rest once we have
* the frag_length available */
max_recv_frag = RPC_HEADER_LEN;
subreq = cli_api_pipe_send(state, ev, cli->transport,
data->data, data->length, max_recv_frag);
2009-01-17 15:33:34 +03:00
if (subreq == NULL) {
2009-03-24 00:33:00 +03:00
goto fail;
2009-01-17 15:33:34 +03:00
}
2009-03-24 00:33:00 +03:00
tevent_req_set_callback(subreq, rpc_api_pipe_trans_done, req);
return req;
2009-01-17 15:33:34 +03:00
post_status:
2009-03-24 00:33:00 +03:00
tevent_req_nterror(req, status);
return tevent_req_post(req, ev);
fail:
TALLOC_FREE(req);
2009-01-17 15:33:34 +03:00
return NULL;
}
2009-03-24 00:13:44 +03:00
static void rpc_api_pipe_trans_done(struct tevent_req *subreq)
2009-01-17 15:33:34 +03:00
{
2009-03-24 00:33:00 +03:00
struct tevent_req *req = tevent_req_callback_data(
subreq, struct tevent_req);
struct rpc_api_pipe_state *state = tevent_req_data(
req, struct rpc_api_pipe_state);
2009-01-17 15:33:34 +03:00
NTSTATUS status;
uint8_t *rdata = NULL;
uint32_t rdata_len = 0;
status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
2009-03-24 00:33:00 +03:00
tevent_req_nterror(req, status);
2009-01-17 15:33:34 +03:00
return;
}
if (rdata == NULL) {
DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
rpccli_pipe_txt(talloc_tos(), state->cli)));
2009-03-24 00:33:00 +03:00
tevent_req_done(req);
2009-01-17 15:33:34 +03:00
return;
}
/*
* Move data on state->incoming_frag.
2009-01-17 15:33:34 +03:00
*/
state->incoming_frag.data = talloc_move(state, &rdata);
state->incoming_frag.length = rdata_len;
if (!state->incoming_frag.data) {
tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
return;
}
2009-01-17 15:33:34 +03:00
/* Ensure we have enough data for a pdu. */
2009-03-24 00:13:44 +03:00
subreq = get_complete_frag_send(state, state->ev, state->cli,
&state->incoming_frag);
2009-03-24 00:33:00 +03:00
if (tevent_req_nomem(subreq, req)) {
2009-01-17 15:33:34 +03:00
return;
}
2009-03-24 00:13:44 +03:00
tevent_req_set_callback(subreq, rpc_api_pipe_got_pdu, req);
2009-01-17 15:33:34 +03:00
}
static void rpc_api_pipe_got_pdu(struct tevent_req *subreq)
2009-01-17 15:33:34 +03:00
{
2009-03-24 00:33:00 +03:00
struct tevent_req *req = tevent_req_callback_data(
subreq, struct tevent_req);
struct rpc_api_pipe_state *state = tevent_req_data(
req, struct rpc_api_pipe_state);
2009-01-17 15:33:34 +03:00
NTSTATUS status;
DATA_BLOB rdata = data_blob_null;
2009-01-17 15:33:34 +03:00
status = get_complete_frag_recv(subreq);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(5, ("get_complete_frag failed: %s\n",
nt_errstr(status)));
2009-03-24 00:33:00 +03:00
tevent_req_nterror(req, status);
2009-01-17 15:33:34 +03:00
return;
}
state->pkt = talloc(state, struct ncacn_packet);
if (!state->pkt) {
tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
return;
}
status = cli_pipe_validate_current_pdu(state,
state->cli, state->pkt,
&state->incoming_frag,
state->expected_pkt_type,
&rdata,
&state->reply_pdu);
2009-01-17 15:33:34 +03:00
DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
(unsigned)state->incoming_frag.length,
(unsigned)state->reply_pdu_offset,
2009-01-17 15:33:34 +03:00
nt_errstr(status)));
if (!NT_STATUS_IS_OK(status)) {
2009-03-24 00:33:00 +03:00
tevent_req_nterror(req, status);
2009-01-17 15:33:34 +03:00
return;
}
if ((state->pkt->pfc_flags & DCERPC_PFC_FLAG_FIRST)
&& (state->pkt->drep[0] != DCERPC_DREP_LE)) {
2009-01-17 15:33:34 +03:00
/*
* Set the data type correctly for big-endian data on the
* first packet.
*/
DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
"big-endian.\n",
rpccli_pipe_txt(talloc_tos(), state->cli)));
state->endianess = 0x00; /* BIG ENDIAN */
2009-01-17 15:33:34 +03:00
}
/*
* Check endianness on subsequent packets.
*/
if (state->endianess != state->pkt->drep[0]) {
2009-01-17 15:33:34 +03:00
DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
"%s\n",
state->endianess?"little":"big",
state->pkt->drep[0]?"little":"big"));
2009-03-24 00:33:00 +03:00
tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2009-01-17 15:33:34 +03:00
return;
}
/* Now copy the data portion out of the pdu into rbuf. */
if (state->reply_pdu.length < state->reply_pdu_offset + rdata.length) {
if (!data_blob_realloc(NULL, &state->reply_pdu,
state->reply_pdu_offset + rdata.length)) {
tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
return;
}
2009-01-17 15:33:34 +03:00
}
memcpy(state->reply_pdu.data + state->reply_pdu_offset,
rdata.data, rdata.length);
state->reply_pdu_offset += rdata.length;
2009-01-17 15:33:34 +03:00
/* reset state->incoming_frag, there is no need to free it,
* it will be reallocated to the right size the next time
* it is used */
state->incoming_frag.length = 0;
2009-01-17 15:33:34 +03:00
if (state->pkt->pfc_flags & DCERPC_PFC_FLAG_LAST) {
/* make sure the pdu length is right now that we
* have all the data available (alloc hint may
* have allocated more than was actually used) */
state->reply_pdu.length = state->reply_pdu_offset;
2009-01-17 15:33:34 +03:00
DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
rpccli_pipe_txt(talloc_tos(), state->cli),
(unsigned)state->reply_pdu.length));
2009-03-24 00:33:00 +03:00
tevent_req_done(req);
2009-01-17 15:33:34 +03:00
return;
}
subreq = get_complete_frag_send(state, state->ev, state->cli,
&state->incoming_frag);
2009-03-24 00:33:00 +03:00
if (tevent_req_nomem(subreq, req)) {
2009-01-17 15:33:34 +03:00
return;
}
tevent_req_set_callback(subreq, rpc_api_pipe_got_pdu, req);
2009-01-17 15:33:34 +03:00
}
2009-03-24 00:33:00 +03:00
static NTSTATUS rpc_api_pipe_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
struct ncacn_packet **pkt,
DATA_BLOB *reply_pdu)
2009-01-17 15:33:34 +03:00
{
2009-03-24 00:33:00 +03:00
struct rpc_api_pipe_state *state = tevent_req_data(
req, struct rpc_api_pipe_state);
2009-01-17 15:33:34 +03:00
NTSTATUS status;
2009-03-24 00:33:00 +03:00
if (tevent_req_is_nterror(req, &status)) {
2009-01-17 15:33:34 +03:00
return status;
}
/* return data to caller and assign it ownership of memory */
if (reply_pdu) {
reply_pdu->data = talloc_move(mem_ctx, &state->reply_pdu.data);
reply_pdu->length = state->reply_pdu.length;
state->reply_pdu.length = 0;
} else {
data_blob_free(&state->reply_pdu);
}
2009-01-17 15:33:34 +03:00
if (pkt) {
*pkt = talloc_steal(mem_ctx, state->pkt);
}
2009-01-17 15:33:34 +03:00
return NT_STATUS_OK;
}
/*******************************************************************
Creates krb5 auth bind.
********************************************************************/
static NTSTATUS create_krb5_auth_bind_req(struct rpc_pipe_client *cli,
enum dcerpc_AuthLevel auth_level,
DATA_BLOB *auth_info)
{
#ifdef HAVE_KRB5
int ret;
NTSTATUS status;
struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
DATA_BLOB tkt = data_blob_null;
DATA_BLOB tkt_wrapped = data_blob_null;
DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
a->service_principal ));
/* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
ret = cli_krb5_get_ticket(a, a->service_principal, 0,
&tkt, &a->session_key,
AP_OPTS_MUTUAL_REQUIRED, NULL,
NULL, NULL);
if (ret) {
DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
"failed with %s\n",
a->service_principal,
error_message(ret) ));
data_blob_free(&tkt);
return NT_STATUS_INVALID_PARAMETER;
}
/* wrap that up in a nice GSS-API wrapping */
tkt_wrapped = spnego_gen_krb5_wrap(talloc_tos(), tkt, TOK_ID_KRB_AP_REQ);
data_blob_free(&tkt);
status = dcerpc_push_dcerpc_auth(cli,
DCERPC_AUTH_TYPE_KRB5,
auth_level,
0, /* auth_pad_length */
1, /* auth_context_id */
&tkt_wrapped,
auth_info);
if (!NT_STATUS_IS_OK(status)) {
data_blob_free(&tkt_wrapped);
return status;
}
DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
return NT_STATUS_OK;
#else
return NT_STATUS_INVALID_PARAMETER;
#endif
}
/*******************************************************************
Creates SPNEGO NTLMSSP auth bind.
********************************************************************/
static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req(struct rpc_pipe_client *cli,
enum dcerpc_AuthLevel auth_level,
DATA_BLOB *auth_info)
{
NTSTATUS status;
DATA_BLOB null_blob = data_blob_null;
DATA_BLOB request = data_blob_null;
DATA_BLOB spnego_msg = data_blob_null;
const char *OIDs_ntlm[] = {OID_NTLMSSP, NULL};
DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
status = auth_ntlmssp_update(cli->auth->a_u.auth_ntlmssp_state,
null_blob,
&request);
if (!NT_STATUS_EQUAL(status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
data_blob_free(&request);
return status;
}
/* Wrap this in SPNEGO. */
spnego_msg = spnego_gen_negTokenInit(talloc_tos(), OIDs_ntlm, &request, NULL);
data_blob_free(&request);
status = dcerpc_push_dcerpc_auth(cli,
DCERPC_AUTH_TYPE_SPNEGO,
auth_level,
0, /* auth_pad_length */
1, /* auth_context_id */
&spnego_msg,
auth_info);
if (!NT_STATUS_IS_OK(status)) {
data_blob_free(&spnego_msg);
return status;
}
DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
dump_data(5, spnego_msg.data, spnego_msg.length);
data_blob_free(&spnego_msg);
return NT_STATUS_OK;
}
/*******************************************************************
Creates NTLMSSP auth bind.
********************************************************************/
static NTSTATUS create_ntlmssp_auth_rpc_bind_req(struct rpc_pipe_client *cli,
enum dcerpc_AuthLevel auth_level,
DATA_BLOB *auth_info)
{
NTSTATUS status;
DATA_BLOB null_blob = data_blob_null;
DATA_BLOB request = data_blob_null;
DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
status = auth_ntlmssp_update(cli->auth->a_u.auth_ntlmssp_state,
null_blob,
&request);
if (!NT_STATUS_EQUAL(status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
data_blob_free(&request);
return status;
}
Jeremy requested that I get my NTLMSSP patch into CVS. He didn't request the schannel code, but I've included that anyway. :-) This patch revives the client-side NTLMSSP support for RPC named pipes in Samba, and cleans up the client and server schannel code. The use of the new code is enabled by the 'sign', 'seal' and 'schannel' commands in rpcclient. The aim was to prove that our separate NTLMSSP client library actually implements NTLMSSP signing and sealing as per Microsoft's NTLMv1 implementation, in the hope that knowing this will assist us in correctly implementing NTLMSSP signing for SMB packets. (Still not yet functional) This patch replaces the NTLMSSP implementation in rpc_client/cli_pipe.c with calls to libsmb/ntlmssp.c. In the process, we have gained the ability to use the more secure NT password, and the ability to sign-only, instead of having to seal the pipe connection. (Previously we were limited to sealing, and could only use the LM-password derived key). Our new client-side NTLMSSP code also needed alteration to cope with our comparatively simple server-side implementation. A future step is to replace it with calls to the same NTLMSSP library. Also included in this patch is the schannel 'sign only' patch I submitted to the team earlier. While not enabled (and not functional, at this stage) the work in this patch makes the code paths *much* easier to follow. I have also included similar hooks in rpccleint to allow the use of schannel on *any* pipe. rpcclient now defaults to not using schannel (or any other extra per-pipe authenticiation) for any connection. The 'schannel' command enables schannel for all pipes until disabled. This code is also much more secure than the previous code, as changes to our cli_pipe routines ensure that the authentication footer cannot be removed by an attacker, and more error states are correctly handled. (The same needs to be done to our server) Andrew Bartlett (This used to be commit 5472ddc9eaf4e79c5b2e1c8ee8c7f190dc285f19)
2003-07-14 12:46:32 +04:00
status = dcerpc_push_dcerpc_auth(cli,
DCERPC_AUTH_TYPE_NTLMSSP,
auth_level,
0, /* auth_pad_length */
1, /* auth_context_id */
&request,
auth_info);
if (!NT_STATUS_IS_OK(status)) {
data_blob_free(&request);
return status;
}
DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
dump_data(5, request.data, request.length);
return NT_STATUS_OK;
}
/*******************************************************************
Creates schannel auth bind.
********************************************************************/
static NTSTATUS create_schannel_auth_rpc_bind_req(struct rpc_pipe_client *cli,
enum dcerpc_AuthLevel auth_level,
DATA_BLOB *auth_info)
{
NTSTATUS status;
struct NL_AUTH_MESSAGE r;
DATA_BLOB schannel_blob;
/* Use lp_workgroup() if domain not specified */
if (!cli->auth->domain || !cli->auth->domain[0]) {
cli->auth->domain = talloc_strdup(cli, lp_workgroup());
if (cli->auth->domain == NULL) {
return NT_STATUS_NO_MEMORY;
}
}
/*
* Now marshall the data into the auth parse_struct.
*/
r.MessageType = NL_NEGOTIATE_REQUEST;
r.Flags = NL_FLAG_OEM_NETBIOS_DOMAIN_NAME |
NL_FLAG_OEM_NETBIOS_COMPUTER_NAME;
r.oem_netbios_domain.a = cli->auth->domain;
r.oem_netbios_computer.a = global_myname();
status = dcerpc_push_schannel_bind(cli, &r, &schannel_blob);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
status = dcerpc_push_dcerpc_auth(cli,
DCERPC_AUTH_TYPE_SCHANNEL,
auth_level,
0, /* auth_pad_length */
1, /* auth_context_id */
&schannel_blob,
auth_info);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
return NT_STATUS_OK;
}
/*******************************************************************
Creates the internals of a DCE/RPC bind request or alter context PDU.
********************************************************************/
static NTSTATUS create_bind_or_alt_ctx_internal(TALLOC_CTX *mem_ctx,
enum dcerpc_pkt_type ptype,
uint32 rpc_call_id,
const struct ndr_syntax_id *abstract,
const struct ndr_syntax_id *transfer,
const DATA_BLOB *auth_info,
DATA_BLOB *blob)
{
uint16 auth_len = auth_info->length;
NTSTATUS status;
union dcerpc_payload u;
struct dcerpc_ctx_list ctx_list;
if (auth_len) {
auth_len -= DCERPC_AUTH_TRAILER_LENGTH;
}
ctx_list.context_id = 0;
ctx_list.num_transfer_syntaxes = 1;
ctx_list.abstract_syntax = *abstract;
2010-07-16 03:11:39 +04:00
ctx_list.transfer_syntaxes = (struct ndr_syntax_id *)discard_const(transfer);
u.bind.max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
u.bind.max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
u.bind.assoc_group_id = 0x0;
u.bind.num_contexts = 1;
u.bind.ctx_list = &ctx_list;
u.bind.auth_info = *auth_info;
status = dcerpc_push_ncacn_packet(mem_ctx,
ptype,
DCERPC_PFC_FLAG_FIRST |
DCERPC_PFC_FLAG_LAST,
auth_len,
rpc_call_id,
&u,
blob);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0, ("Failed to marshall bind/alter ncacn_packet.\n"));
return status;
}
Jeremy requested that I get my NTLMSSP patch into CVS. He didn't request the schannel code, but I've included that anyway. :-) This patch revives the client-side NTLMSSP support for RPC named pipes in Samba, and cleans up the client and server schannel code. The use of the new code is enabled by the 'sign', 'seal' and 'schannel' commands in rpcclient. The aim was to prove that our separate NTLMSSP client library actually implements NTLMSSP signing and sealing as per Microsoft's NTLMv1 implementation, in the hope that knowing this will assist us in correctly implementing NTLMSSP signing for SMB packets. (Still not yet functional) This patch replaces the NTLMSSP implementation in rpc_client/cli_pipe.c with calls to libsmb/ntlmssp.c. In the process, we have gained the ability to use the more secure NT password, and the ability to sign-only, instead of having to seal the pipe connection. (Previously we were limited to sealing, and could only use the LM-password derived key). Our new client-side NTLMSSP code also needed alteration to cope with our comparatively simple server-side implementation. A future step is to replace it with calls to the same NTLMSSP library. Also included in this patch is the schannel 'sign only' patch I submitted to the team earlier. While not enabled (and not functional, at this stage) the work in this patch makes the code paths *much* easier to follow. I have also included similar hooks in rpccleint to allow the use of schannel on *any* pipe. rpcclient now defaults to not using schannel (or any other extra per-pipe authenticiation) for any connection. The 'schannel' command enables schannel for all pipes until disabled. This code is also much more secure than the previous code, as changes to our cli_pipe routines ensure that the authentication footer cannot be removed by an attacker, and more error states are correctly handled. (The same needs to be done to our server) Andrew Bartlett (This used to be commit 5472ddc9eaf4e79c5b2e1c8ee8c7f190dc285f19)
2003-07-14 12:46:32 +04:00
return NT_STATUS_OK;
}
/*******************************************************************
Creates a DCE/RPC bind request.
********************************************************************/
static NTSTATUS create_rpc_bind_req(TALLOC_CTX *mem_ctx,
struct rpc_pipe_client *cli,
struct pipe_auth_data *auth,
uint32 rpc_call_id,
const struct ndr_syntax_id *abstract,
const struct ndr_syntax_id *transfer,
DATA_BLOB *rpc_out)
{
DATA_BLOB auth_info = data_blob_null;
NTSTATUS ret = NT_STATUS_OK;
switch (auth->auth_type) {
case DCERPC_AUTH_TYPE_SCHANNEL:
ret = create_schannel_auth_rpc_bind_req(cli,
auth->auth_level,
&auth_info);
if (!NT_STATUS_IS_OK(ret)) {
return ret;
}
break;
case DCERPC_AUTH_TYPE_NTLMSSP:
ret = create_ntlmssp_auth_rpc_bind_req(cli,
auth->auth_level,
&auth_info);
if (!NT_STATUS_IS_OK(ret)) {
return ret;
}
break;
case DCERPC_AUTH_TYPE_SPNEGO:
if (auth->spnego_type != PIPE_AUTH_TYPE_SPNEGO_NTLMSSP) {
/* "Can't" happen. */
return NT_STATUS_INVALID_INFO_CLASS;
}
ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli,
auth->auth_level,
&auth_info);
if (!NT_STATUS_IS_OK(ret)) {
return ret;
}
break;
case DCERPC_AUTH_TYPE_KRB5:
ret = create_krb5_auth_bind_req(cli,
auth->auth_level,
&auth_info);
if (!NT_STATUS_IS_OK(ret)) {
return ret;
}
break;
case DCERPC_AUTH_TYPE_NONE:
break;
default:
/* "Can't" happen. */
return NT_STATUS_INVALID_INFO_CLASS;
}
ret = create_bind_or_alt_ctx_internal(mem_ctx,
DCERPC_PKT_BIND,
rpc_call_id,
abstract,
transfer,
&auth_info,
rpc_out);
return ret;
}
/*******************************************************************
Calculate how much data we're going to send in this packet, also
work out any sign/seal padding length.
********************************************************************/
static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
uint32 data_left,
uint16 *p_frag_len,
uint16 *p_auth_len,
uint32 *p_ss_padding)
{
uint32 data_space, data_len;
#if 0
if ((data_left > 0) && (sys_random() % 2)) {
data_left = MAX(data_left/2, 1);
}
#endif
switch (cli->auth->auth_level) {
case DCERPC_AUTH_LEVEL_NONE:
case DCERPC_AUTH_LEVEL_CONNECT:
data_space = cli->max_xmit_frag - DCERPC_REQUEST_LENGTH;
data_len = MIN(data_space, data_left);
*p_ss_padding = 0;
*p_auth_len = 0;
*p_frag_len = DCERPC_REQUEST_LENGTH + data_len;
return data_len;
case DCERPC_AUTH_LEVEL_INTEGRITY:
case DCERPC_AUTH_LEVEL_PRIVACY:
/* Treat the same for all authenticated rpc requests. */
switch(cli->auth->auth_type) {
case DCERPC_AUTH_TYPE_SPNEGO:
switch (cli->auth->spnego_type) {
case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
*p_auth_len = NTLMSSP_SIG_SIZE;
break;
default:
smb_panic("bad auth type");
break;
}
case DCERPC_AUTH_TYPE_NTLMSSP:
*p_auth_len = NTLMSSP_SIG_SIZE;
break;
case DCERPC_AUTH_TYPE_SCHANNEL:
*p_auth_len = NL_AUTH_SIGNATURE_SIZE;
break;
default:
smb_panic("bad auth type");
break;
}
data_space = cli->max_xmit_frag
- DCERPC_REQUEST_LENGTH
- DCERPC_AUTH_TRAILER_LENGTH
- *p_auth_len;
data_len = MIN(data_space, data_left);
*p_ss_padding = 0;
if (data_len % CLIENT_NDR_PADDING_SIZE) {
*p_ss_padding = CLIENT_NDR_PADDING_SIZE - (data_len % CLIENT_NDR_PADDING_SIZE);
}
*p_frag_len = DCERPC_REQUEST_LENGTH
+ data_len + *p_ss_padding
+ DCERPC_AUTH_TRAILER_LENGTH
+ *p_auth_len;
return data_len;
default:
smb_panic("bad auth level");
/* Notreached. */
return 0;
}
}
/*******************************************************************
External interface.
Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
and deals with signing/sealing details.
********************************************************************/
2009-01-17 19:52:35 +03:00
struct rpc_api_pipe_req_state {
struct event_context *ev;
struct rpc_pipe_client *cli;
uint8_t op_num;
uint32_t call_id;
DATA_BLOB *req_data;
2009-01-17 19:52:35 +03:00
uint32_t req_data_sent;
DATA_BLOB rpc_out;
DATA_BLOB reply_pdu;
2009-01-17 19:52:35 +03:00
};
2009-03-23 23:49:19 +03:00
static void rpc_api_pipe_req_write_done(struct tevent_req *subreq);
2009-03-24 00:33:00 +03:00
static void rpc_api_pipe_req_done(struct tevent_req *subreq);
2009-01-17 19:52:35 +03:00
static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
bool *is_last_frag);
2009-03-24 00:49:29 +03:00
struct tevent_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
struct event_context *ev,
struct rpc_pipe_client *cli,
uint8_t op_num,
DATA_BLOB *req_data)
2009-01-17 19:52:35 +03:00
{
2009-03-24 00:49:29 +03:00
struct tevent_req *req, *subreq;
2009-01-17 19:52:35 +03:00
struct rpc_api_pipe_req_state *state;
NTSTATUS status;
bool is_last_frag;
2009-03-24 00:49:29 +03:00
req = tevent_req_create(mem_ctx, &state,
struct rpc_api_pipe_req_state);
if (req == NULL) {
2009-01-17 19:52:35 +03:00
return NULL;
}
state->ev = ev;
state->cli = cli;
state->op_num = op_num;
state->req_data = req_data;
state->req_data_sent = 0;
state->call_id = get_rpc_call_id();
state->reply_pdu = data_blob_null;
state->rpc_out = data_blob_null;
2009-01-17 19:52:35 +03:00
if (cli->max_xmit_frag < DCERPC_REQUEST_LENGTH
+ RPC_MAX_SIGN_SIZE) {
2009-01-17 19:52:35 +03:00
/* Server is screwed up ! */
status = NT_STATUS_INVALID_PARAMETER;
goto post_status;
}
status = prepare_next_frag(state, &is_last_frag);
if (!NT_STATUS_IS_OK(status)) {
goto post_status;
}
if (is_last_frag) {
subreq = rpc_api_pipe_send(state, ev, state->cli,
&state->rpc_out,
DCERPC_PKT_RESPONSE);
2009-01-17 19:52:35 +03:00
if (subreq == NULL) {
2009-03-24 00:33:00 +03:00
goto fail;
2009-01-17 19:52:35 +03:00
}
2009-03-24 00:49:29 +03:00
tevent_req_set_callback(subreq, rpc_api_pipe_req_done, req);
2009-01-17 19:52:35 +03:00
} else {
subreq = rpc_write_send(state, ev, cli->transport,
state->rpc_out.data,
state->rpc_out.length);
2009-03-24 00:33:00 +03:00
if (subreq == NULL) {
goto fail;
2009-01-17 19:52:35 +03:00
}
2009-03-24 00:33:00 +03:00
tevent_req_set_callback(subreq, rpc_api_pipe_req_write_done,
2009-03-24 00:49:29 +03:00
req);
2009-01-17 19:52:35 +03:00
}
2009-03-24 00:49:29 +03:00
return req;
2009-01-17 19:52:35 +03:00
post_status:
2009-03-24 00:49:29 +03:00
tevent_req_nterror(req, status);
return tevent_req_post(req, ev);
2009-03-24 00:33:00 +03:00
fail:
2009-03-24 00:49:29 +03:00
TALLOC_FREE(req);
2009-01-17 19:52:35 +03:00
return NULL;
}
static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
bool *is_last_frag)
{
uint32_t data_sent_thistime;
uint16_t auth_len;
uint16_t frag_len;
uint8_t flags = 0;
uint32_t ss_padding;
uint32_t data_left;
NTSTATUS status;
union dcerpc_payload u;
2009-01-17 19:52:35 +03:00
data_left = state->req_data->length - state->req_data_sent;
2009-01-17 19:52:35 +03:00
data_sent_thistime = calculate_data_len_tosend(
state->cli, data_left, &frag_len, &auth_len, &ss_padding);
if (state->req_data_sent == 0) {
flags = DCERPC_PFC_FLAG_FIRST;
2009-01-17 19:52:35 +03:00
}
if (data_sent_thistime == data_left) {
flags |= DCERPC_PFC_FLAG_LAST;
2009-01-17 19:52:35 +03:00
}
data_blob_free(&state->rpc_out);
2009-01-17 19:52:35 +03:00
ZERO_STRUCT(u.request);
2009-01-17 19:52:35 +03:00
u.request.alloc_hint = state->req_data->length;
u.request.context_id = 0;
u.request.opnum = state->op_num;
2009-01-17 19:52:35 +03:00
status = dcerpc_push_ncacn_packet(state,
DCERPC_PKT_REQUEST,
flags,
auth_len,
state->call_id,
&u,
&state->rpc_out);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
2009-01-17 19:52:35 +03:00
/* explicitly set frag_len here as dcerpc_push_ncacn_packet() can't
* compute it right for requests because the auth trailer is missing
* at this stage */
dcerpc_set_frag_length(&state->rpc_out, frag_len);
2009-01-17 19:52:35 +03:00
/* Copy in the data. */
if (!data_blob_append(NULL, &state->rpc_out,
state->req_data->data + state->req_data_sent,
data_sent_thistime)) {
2009-01-17 19:52:35 +03:00
return NT_STATUS_NO_MEMORY;
}
status = dcerpc_add_auth_footer(state->cli->auth, ss_padding,
&state->rpc_out);
if (!NT_STATUS_IS_OK(status)) {
return status;
2009-01-17 19:52:35 +03:00
}
state->req_data_sent += data_sent_thistime;
*is_last_frag = ((flags & DCERPC_PFC_FLAG_LAST) != 0);
2009-01-17 19:52:35 +03:00
return status;
}
2009-03-23 23:49:19 +03:00
static void rpc_api_pipe_req_write_done(struct tevent_req *subreq)
2009-01-17 19:52:35 +03:00
{
2009-03-24 00:49:29 +03:00
struct tevent_req *req = tevent_req_callback_data(
subreq, struct tevent_req);
struct rpc_api_pipe_req_state *state = tevent_req_data(
req, struct rpc_api_pipe_req_state);
2009-01-17 19:52:35 +03:00
NTSTATUS status;
bool is_last_frag;
status = rpc_write_recv(subreq);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
2009-03-24 00:49:29 +03:00
tevent_req_nterror(req, status);
2009-01-17 19:52:35 +03:00
return;
}
status = prepare_next_frag(state, &is_last_frag);
if (!NT_STATUS_IS_OK(status)) {
2009-03-24 00:49:29 +03:00
tevent_req_nterror(req, status);
2009-01-17 19:52:35 +03:00
return;
}
if (is_last_frag) {
2009-03-24 00:33:00 +03:00
subreq = rpc_api_pipe_send(state, state->ev, state->cli,
&state->rpc_out,
DCERPC_PKT_RESPONSE);
2009-03-24 00:49:29 +03:00
if (tevent_req_nomem(subreq, req)) {
2009-01-17 19:52:35 +03:00
return;
}
2009-03-24 00:33:00 +03:00
tevent_req_set_callback(subreq, rpc_api_pipe_req_done, req);
2009-01-17 19:52:35 +03:00
} else {
subreq = rpc_write_send(state, state->ev,
state->cli->transport,
state->rpc_out.data,
state->rpc_out.length);
2009-03-24 00:49:29 +03:00
if (tevent_req_nomem(subreq, req)) {
2009-01-17 19:52:35 +03:00
return;
}
2009-03-23 23:49:19 +03:00
tevent_req_set_callback(subreq, rpc_api_pipe_req_write_done,
req);
2009-01-17 19:52:35 +03:00
}
}
2009-03-24 00:33:00 +03:00
static void rpc_api_pipe_req_done(struct tevent_req *subreq)
2009-01-17 19:52:35 +03:00
{
2009-03-24 00:49:29 +03:00
struct tevent_req *req = tevent_req_callback_data(
subreq, struct tevent_req);
struct rpc_api_pipe_req_state *state = tevent_req_data(
req, struct rpc_api_pipe_req_state);
2009-01-17 19:52:35 +03:00
NTSTATUS status;
status = rpc_api_pipe_recv(subreq, state, NULL, &state->reply_pdu);
2009-01-17 19:52:35 +03:00
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
2009-03-24 00:49:29 +03:00
tevent_req_nterror(req, status);
2009-01-17 19:52:35 +03:00
return;
}
2009-03-24 00:49:29 +03:00
tevent_req_done(req);
2009-01-17 19:52:35 +03:00
}
2009-03-24 00:49:29 +03:00
NTSTATUS rpc_api_pipe_req_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
DATA_BLOB *reply_pdu)
2009-01-17 19:52:35 +03:00
{
2009-03-24 00:49:29 +03:00
struct rpc_api_pipe_req_state *state = tevent_req_data(
req, struct rpc_api_pipe_req_state);
2009-01-17 19:52:35 +03:00
NTSTATUS status;
2009-03-24 00:49:29 +03:00
if (tevent_req_is_nterror(req, &status)) {
/*
* We always have to initialize to reply pdu, even if there is
* none. The rpccli_* caller routines expect this.
*/
*reply_pdu = data_blob_null;
2009-01-17 19:52:35 +03:00
return status;
}
/* return data to caller and assign it ownership of memory */
reply_pdu->data = talloc_move(mem_ctx, &state->reply_pdu.data);
reply_pdu->length = state->reply_pdu.length;
state->reply_pdu.length = 0;
2009-01-17 19:52:35 +03:00
return NT_STATUS_OK;
}
#if 0
/****************************************************************************
Set the handle state.
****************************************************************************/
static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
const char *pipe_name, uint16 device_state)
{
bool state_set = False;
char param[2];
uint16 setup[2]; /* only need 2 uint16 setup parameters */
char *rparam = NULL;
char *rdata = NULL;
uint32 rparam_len, rdata_len;
if (pipe_name == NULL)
return False;
DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
cli->fnum, pipe_name, device_state));
/* create parameters: device state */
SSVAL(param, 0, device_state);
/* create setup parameters. */
setup[0] = 0x0001;
setup[1] = cli->fnum; /* pipe file handle. got this from an SMBOpenX. */
/* send the data on \PIPE\ */
if (cli_api_pipe(cli->cli, "\\PIPE\\",
setup, 2, 0, /* setup, length, max */
param, 2, 0, /* param, length, max */
NULL, 0, 1024, /* data, length, max */
&rparam, &rparam_len, /* return param, length */
&rdata, &rdata_len)) /* return data, length */
{
DEBUG(5, ("Set Handle state: return OK\n"));
state_set = True;
}
SAFE_FREE(rparam);
SAFE_FREE(rdata);
return state_set;
}
#endif
/****************************************************************************
Check the rpc bind acknowledge response.
****************************************************************************/
static bool check_bind_response(const struct dcerpc_bind_ack *r,
const struct ndr_syntax_id *transfer)
{
struct dcerpc_ack_ctx ctx;
if (r->secondary_address_size == 0) {
DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
}
if (r->num_results < 1 || !r->ctx_list) {
return false;
}
ctx = r->ctx_list[0];
/* check the transfer syntax */
if ((ctx.syntax.if_version != transfer->if_version) ||
(memcmp(&ctx.syntax.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
return False;
}
if (r->num_results != 0x1 || ctx.result != 0) {
DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
r->num_results, ctx.reason));
}
DEBUG(5,("check_bind_response: accepted!\n"));
return True;
}
/*******************************************************************
Creates a DCE/RPC bind authentication response.
This is the packet that is sent back to the server once we
have received a BIND-ACK, to finish the third leg of
the authentication handshake.
********************************************************************/
static NTSTATUS create_rpc_bind_auth3(TALLOC_CTX *mem_ctx,
struct rpc_pipe_client *cli,
uint32 rpc_call_id,
enum dcerpc_AuthType auth_type,
enum dcerpc_AuthLevel auth_level,
DATA_BLOB *pauth_blob,
DATA_BLOB *rpc_out)
{
NTSTATUS status;
union dcerpc_payload u;
u.auth3._pad = 0;
status = dcerpc_push_dcerpc_auth(mem_ctx,
auth_type,
auth_level,
0, /* auth_pad_length */
1, /* auth_context_id */
pauth_blob,
&u.auth3.auth_info);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
status = dcerpc_push_ncacn_packet(mem_ctx,
DCERPC_PKT_AUTH3,
DCERPC_PFC_FLAG_FIRST |
DCERPC_PFC_FLAG_LAST,
pauth_blob->length,
rpc_call_id,
&u,
rpc_out);
data_blob_free(&u.auth3.auth_info);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
return status;
}
return NT_STATUS_OK;
}
/*******************************************************************
Creates a DCE/RPC bind alter context authentication request which
may contain a spnego auth blobl
********************************************************************/
Jeremy requested that I get my NTLMSSP patch into CVS. He didn't request the schannel code, but I've included that anyway. :-) This patch revives the client-side NTLMSSP support for RPC named pipes in Samba, and cleans up the client and server schannel code. The use of the new code is enabled by the 'sign', 'seal' and 'schannel' commands in rpcclient. The aim was to prove that our separate NTLMSSP client library actually implements NTLMSSP signing and sealing as per Microsoft's NTLMv1 implementation, in the hope that knowing this will assist us in correctly implementing NTLMSSP signing for SMB packets. (Still not yet functional) This patch replaces the NTLMSSP implementation in rpc_client/cli_pipe.c with calls to libsmb/ntlmssp.c. In the process, we have gained the ability to use the more secure NT password, and the ability to sign-only, instead of having to seal the pipe connection. (Previously we were limited to sealing, and could only use the LM-password derived key). Our new client-side NTLMSSP code also needed alteration to cope with our comparatively simple server-side implementation. A future step is to replace it with calls to the same NTLMSSP library. Also included in this patch is the schannel 'sign only' patch I submitted to the team earlier. While not enabled (and not functional, at this stage) the work in this patch makes the code paths *much* easier to follow. I have also included similar hooks in rpccleint to allow the use of schannel on *any* pipe. rpcclient now defaults to not using schannel (or any other extra per-pipe authenticiation) for any connection. The 'schannel' command enables schannel for all pipes until disabled. This code is also much more secure than the previous code, as changes to our cli_pipe routines ensure that the authentication footer cannot be removed by an attacker, and more error states are correctly handled. (The same needs to be done to our server) Andrew Bartlett (This used to be commit 5472ddc9eaf4e79c5b2e1c8ee8c7f190dc285f19)
2003-07-14 12:46:32 +04:00
static NTSTATUS create_rpc_alter_context(TALLOC_CTX *mem_ctx,
uint32 rpc_call_id,
const struct ndr_syntax_id *abstract,
const struct ndr_syntax_id *transfer,
enum dcerpc_AuthLevel auth_level,
const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
DATA_BLOB *rpc_out)
{
DATA_BLOB auth_info;
NTSTATUS status;
Jeremy requested that I get my NTLMSSP patch into CVS. He didn't request the schannel code, but I've included that anyway. :-) This patch revives the client-side NTLMSSP support for RPC named pipes in Samba, and cleans up the client and server schannel code. The use of the new code is enabled by the 'sign', 'seal' and 'schannel' commands in rpcclient. The aim was to prove that our separate NTLMSSP client library actually implements NTLMSSP signing and sealing as per Microsoft's NTLMv1 implementation, in the hope that knowing this will assist us in correctly implementing NTLMSSP signing for SMB packets. (Still not yet functional) This patch replaces the NTLMSSP implementation in rpc_client/cli_pipe.c with calls to libsmb/ntlmssp.c. In the process, we have gained the ability to use the more secure NT password, and the ability to sign-only, instead of having to seal the pipe connection. (Previously we were limited to sealing, and could only use the LM-password derived key). Our new client-side NTLMSSP code also needed alteration to cope with our comparatively simple server-side implementation. A future step is to replace it with calls to the same NTLMSSP library. Also included in this patch is the schannel 'sign only' patch I submitted to the team earlier. While not enabled (and not functional, at this stage) the work in this patch makes the code paths *much* easier to follow. I have also included similar hooks in rpccleint to allow the use of schannel on *any* pipe. rpcclient now defaults to not using schannel (or any other extra per-pipe authenticiation) for any connection. The 'schannel' command enables schannel for all pipes until disabled. This code is also much more secure than the previous code, as changes to our cli_pipe routines ensure that the authentication footer cannot be removed by an attacker, and more error states are correctly handled. (The same needs to be done to our server) Andrew Bartlett (This used to be commit 5472ddc9eaf4e79c5b2e1c8ee8c7f190dc285f19)
2003-07-14 12:46:32 +04:00
status = dcerpc_push_dcerpc_auth(mem_ctx,
DCERPC_AUTH_TYPE_SPNEGO,
auth_level,
0, /* auth_pad_length */
1, /* auth_context_id */
pauth_blob,
&auth_info);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
Jeremy requested that I get my NTLMSSP patch into CVS. He didn't request the schannel code, but I've included that anyway. :-) This patch revives the client-side NTLMSSP support for RPC named pipes in Samba, and cleans up the client and server schannel code. The use of the new code is enabled by the 'sign', 'seal' and 'schannel' commands in rpcclient. The aim was to prove that our separate NTLMSSP client library actually implements NTLMSSP signing and sealing as per Microsoft's NTLMv1 implementation, in the hope that knowing this will assist us in correctly implementing NTLMSSP signing for SMB packets. (Still not yet functional) This patch replaces the NTLMSSP implementation in rpc_client/cli_pipe.c with calls to libsmb/ntlmssp.c. In the process, we have gained the ability to use the more secure NT password, and the ability to sign-only, instead of having to seal the pipe connection. (Previously we were limited to sealing, and could only use the LM-password derived key). Our new client-side NTLMSSP code also needed alteration to cope with our comparatively simple server-side implementation. A future step is to replace it with calls to the same NTLMSSP library. Also included in this patch is the schannel 'sign only' patch I submitted to the team earlier. While not enabled (and not functional, at this stage) the work in this patch makes the code paths *much* easier to follow. I have also included similar hooks in rpccleint to allow the use of schannel on *any* pipe. rpcclient now defaults to not using schannel (or any other extra per-pipe authenticiation) for any connection. The 'schannel' command enables schannel for all pipes until disabled. This code is also much more secure than the previous code, as changes to our cli_pipe routines ensure that the authentication footer cannot be removed by an attacker, and more error states are correctly handled. (The same needs to be done to our server) Andrew Bartlett (This used to be commit 5472ddc9eaf4e79c5b2e1c8ee8c7f190dc285f19)
2003-07-14 12:46:32 +04:00
status = create_bind_or_alt_ctx_internal(mem_ctx,
DCERPC_PKT_ALTER,
rpc_call_id,
abstract,
transfer,
&auth_info,
rpc_out);
data_blob_free(&auth_info);
return status;
}
/****************************************************************************
Do an rpc bind.
****************************************************************************/
2009-01-18 14:12:15 +03:00
struct rpc_pipe_bind_state {
struct event_context *ev;
struct rpc_pipe_client *cli;
DATA_BLOB rpc_out;
2009-01-18 14:12:15 +03:00
uint32_t rpc_call_id;
};
2009-03-24 00:33:00 +03:00
static void rpc_pipe_bind_step_one_done(struct tevent_req *subreq);
2009-03-24 01:38:04 +03:00
static NTSTATUS rpc_finish_auth3_bind_send(struct tevent_req *req,
2009-01-18 14:12:15 +03:00
struct rpc_pipe_bind_state *state,
struct ncacn_packet *r);
2009-03-23 23:49:19 +03:00
static void rpc_bind_auth3_write_done(struct tevent_req *subreq);
2009-03-24 01:38:04 +03:00
static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct tevent_req *req,
2009-01-18 14:12:15 +03:00
struct rpc_pipe_bind_state *state,
struct ncacn_packet *r,
DATA_BLOB *reply_pdu);
2009-03-24 00:33:00 +03:00
static void rpc_bind_ntlmssp_api_done(struct tevent_req *subreq);
2009-01-18 14:12:15 +03:00
2009-03-24 01:38:04 +03:00
struct tevent_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
struct event_context *ev,
struct rpc_pipe_client *cli,
struct pipe_auth_data *auth)
2009-01-18 14:12:15 +03:00
{
2009-03-24 01:38:04 +03:00
struct tevent_req *req, *subreq;
2009-01-18 14:12:15 +03:00
struct rpc_pipe_bind_state *state;
NTSTATUS status;
2009-03-24 01:38:04 +03:00
req = tevent_req_create(mem_ctx, &state, struct rpc_pipe_bind_state);
if (req == NULL) {
2009-01-18 14:12:15 +03:00
return NULL;
}
DEBUG(5,("Bind RPC Pipe: %s auth_type %u(%u), auth_level %u\n",
rpccli_pipe_txt(talloc_tos(), cli),
2009-01-18 14:12:15 +03:00
(unsigned int)auth->auth_type,
(unsigned int)auth->spnego_type,
2009-01-18 14:12:15 +03:00
(unsigned int)auth->auth_level ));
state->ev = ev;
state->cli = cli;
state->rpc_call_id = get_rpc_call_id();
state->rpc_out = data_blob_null;
2009-01-18 14:12:15 +03:00
cli->auth = talloc_move(cli, &auth);
/* Marshall the outgoing data. */
status = create_rpc_bind_req(state, cli,
cli->auth,
2009-01-18 14:12:15 +03:00
state->rpc_call_id,
&cli->abstract_syntax,
&cli->transfer_syntax,
&state->rpc_out);
2009-01-18 14:12:15 +03:00
if (!NT_STATUS_IS_OK(status)) {
goto post_status;
}
subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
DCERPC_PKT_BIND_ACK);
2009-01-18 14:12:15 +03:00
if (subreq == NULL) {
2009-03-24 00:33:00 +03:00
goto fail;
2009-01-18 14:12:15 +03:00
}
2009-03-24 01:38:04 +03:00
tevent_req_set_callback(subreq, rpc_pipe_bind_step_one_done, req);
return req;
2009-01-18 14:12:15 +03:00
post_status:
2009-03-24 01:38:04 +03:00
tevent_req_nterror(req, status);
return tevent_req_post(req, ev);
2009-03-24 00:33:00 +03:00
fail:
2009-03-24 01:38:04 +03:00
TALLOC_FREE(req);
2009-01-18 14:12:15 +03:00
return NULL;
}
2009-03-24 00:33:00 +03:00
static void rpc_pipe_bind_step_one_done(struct tevent_req *subreq)
2009-01-18 14:12:15 +03:00
{
2009-03-24 01:38:04 +03:00
struct tevent_req *req = tevent_req_callback_data(
subreq, struct tevent_req);
struct rpc_pipe_bind_state *state = tevent_req_data(
req, struct rpc_pipe_bind_state);
DATA_BLOB reply_pdu;
struct ncacn_packet *pkt;
2009-01-18 14:12:15 +03:00
NTSTATUS status;
status = rpc_api_pipe_recv(subreq, talloc_tos(), &pkt, &reply_pdu);
2009-01-18 14:12:15 +03:00
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
rpccli_pipe_txt(talloc_tos(), state->cli),
2009-01-18 14:12:15 +03:00
nt_errstr(status)));
2009-03-24 01:38:04 +03:00
tevent_req_nterror(req, status);
2009-01-18 14:12:15 +03:00
return;
}
if (!check_bind_response(&pkt->u.bind_ack, &state->cli->transfer_syntax)) {
2009-01-18 14:12:15 +03:00
DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2009-03-24 01:38:04 +03:00
tevent_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2009-01-18 14:12:15 +03:00
return;
}
state->cli->max_xmit_frag = pkt->u.bind_ack.max_xmit_frag;
state->cli->max_recv_frag = pkt->u.bind_ack.max_recv_frag;
2009-01-18 14:12:15 +03:00
/*
* For authenticated binds we may need to do 3 or 4 leg binds.
*/
switch(state->cli->auth->auth_type) {
case DCERPC_AUTH_TYPE_NONE:
case DCERPC_AUTH_TYPE_SCHANNEL:
2009-01-18 14:12:15 +03:00
/* Bind complete. */
2009-03-24 01:38:04 +03:00
tevent_req_done(req);
return;
2009-01-18 14:12:15 +03:00
case DCERPC_AUTH_TYPE_NTLMSSP:
2009-01-18 14:12:15 +03:00
/* Need to send AUTH3 packet - no reply. */
status = rpc_finish_auth3_bind_send(req, state, pkt);
2009-01-18 14:12:15 +03:00
if (!NT_STATUS_IS_OK(status)) {
2009-03-24 01:38:04 +03:00
tevent_req_nterror(req, status);
2009-01-18 14:12:15 +03:00
}
return;
2009-01-18 14:12:15 +03:00
case DCERPC_AUTH_TYPE_SPNEGO:
if (state->cli->auth->spnego_type !=
PIPE_AUTH_TYPE_SPNEGO_NTLMSSP) {
break;
}
2009-01-18 14:12:15 +03:00
/* Need to send alter context request and reply. */
status = rpc_finish_spnego_ntlmssp_bind_send(req, state, pkt,
2009-01-18 14:12:15 +03:00
&reply_pdu);
if (!NT_STATUS_IS_OK(status)) {
2009-03-24 01:38:04 +03:00
tevent_req_nterror(req, status);
2009-01-18 14:12:15 +03:00
}
return;
2009-01-18 14:12:15 +03:00
case DCERPC_AUTH_TYPE_KRB5:
2009-01-18 14:12:15 +03:00
/* */
break;
2009-01-18 14:12:15 +03:00
default:
break;
2009-01-18 14:12:15 +03:00
}
DEBUG(0,("cli_finish_bind_auth: unknown auth type %u(%u)\n",
(unsigned int)state->cli->auth->auth_type,
(unsigned int)state->cli->auth->spnego_type));
tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
2009-01-18 14:12:15 +03:00
}
2009-03-24 01:38:04 +03:00
static NTSTATUS rpc_finish_auth3_bind_send(struct tevent_req *req,
2009-01-18 14:12:15 +03:00
struct rpc_pipe_bind_state *state,
struct ncacn_packet *r)
2009-01-18 14:12:15 +03:00
{
DATA_BLOB client_reply = data_blob_null;
struct dcerpc_auth auth;
2009-03-23 23:49:19 +03:00
struct tevent_req *subreq;
2009-01-18 14:12:15 +03:00
NTSTATUS status;
if ((r->auth_length == 0)
|| (r->frag_length < DCERPC_AUTH_TRAILER_LENGTH
+ r->auth_length)) {
2009-01-18 14:12:15 +03:00
return NT_STATUS_INVALID_PARAMETER;
}
status = dcerpc_pull_dcerpc_auth(talloc_tos(),
&r->u.bind_ack.auth_info,
&auth, false);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0, ("Failed to pull dcerpc auth: %s.\n",
nt_errstr(status)));
return status;
2009-01-18 14:12:15 +03:00
}
/* TODO - check auth_type/auth_level match. */
status = auth_ntlmssp_update(state->cli->auth->a_u.auth_ntlmssp_state,
auth.credentials, &client_reply);
2009-01-18 14:12:15 +03:00
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
"blob failed: %s.\n", nt_errstr(status)));
return status;
}
data_blob_free(&state->rpc_out);
2009-01-18 14:12:15 +03:00
status = create_rpc_bind_auth3(state,
state->cli, state->rpc_call_id,
2009-01-18 14:12:15 +03:00
state->cli->auth->auth_type,
state->cli->auth->auth_level,
&client_reply, &state->rpc_out);
data_blob_free(&client_reply);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
subreq = rpc_write_send(state, state->ev, state->cli->transport,
state->rpc_out.data, state->rpc_out.length);
2009-01-18 14:12:15 +03:00
if (subreq == NULL) {
return NT_STATUS_NO_MEMORY;
}
2009-03-23 23:49:19 +03:00
tevent_req_set_callback(subreq, rpc_bind_auth3_write_done, req);
2009-01-18 14:12:15 +03:00
return NT_STATUS_OK;
}
2009-03-23 23:49:19 +03:00
static void rpc_bind_auth3_write_done(struct tevent_req *subreq)
2009-01-18 14:12:15 +03:00
{
2009-03-24 01:38:04 +03:00
struct tevent_req *req = tevent_req_callback_data(
subreq, struct tevent_req);
2009-01-18 14:12:15 +03:00
NTSTATUS status;
status = rpc_write_recv(subreq);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
2009-03-24 01:38:04 +03:00
tevent_req_nterror(req, status);
2009-01-18 14:12:15 +03:00
return;
}
2009-03-24 01:38:04 +03:00
tevent_req_done(req);
2009-01-18 14:12:15 +03:00
}
2009-03-24 01:38:04 +03:00
static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct tevent_req *req,
2009-01-18 14:12:15 +03:00
struct rpc_pipe_bind_state *state,
struct ncacn_packet *r,
DATA_BLOB *reply_pdu)
2009-01-18 14:12:15 +03:00
{
DATA_BLOB server_ntlm_response = data_blob_null;
DATA_BLOB client_reply = data_blob_null;
DATA_BLOB tmp_blob = data_blob_null;
struct dcerpc_auth auth_info;
DATA_BLOB auth_blob;
2009-03-24 00:33:00 +03:00
struct tevent_req *subreq;
2009-01-18 14:12:15 +03:00
NTSTATUS status;
if ((r->auth_length == 0)
|| (r->frag_length < DCERPC_AUTH_TRAILER_LENGTH
+ r->auth_length)) {
2009-01-18 14:12:15 +03:00
return NT_STATUS_INVALID_PARAMETER;
}
/* Process the returned NTLMSSP blob first. */
auth_blob = data_blob_const(reply_pdu->data
+ r->frag_length
- DCERPC_AUTH_TRAILER_LENGTH
- r->auth_length,
DCERPC_AUTH_TRAILER_LENGTH
+ r->auth_length);
2009-01-18 14:12:15 +03:00
status = dcerpc_pull_dcerpc_auth(state, &auth_blob, &auth_info, false);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0, ("Failed to unmarshall dcerpc_auth.\n"));
return status;
}
2009-01-18 14:12:15 +03:00
/*
* The server might give us back two challenges - tmp_blob is for the
* second.
*/
if (!spnego_parse_challenge(state, auth_info.credentials,
2009-01-18 14:12:15 +03:00
&server_ntlm_response, &tmp_blob)) {
data_blob_free(&server_ntlm_response);
data_blob_free(&tmp_blob);
return NT_STATUS_INVALID_PARAMETER;
}
/* We're finished with the server spnego response and the tmp_blob. */
data_blob_free(&tmp_blob);
status = auth_ntlmssp_update(state->cli->auth->a_u.auth_ntlmssp_state,
2009-01-18 14:12:15 +03:00
server_ntlm_response, &client_reply);
/* Finished with the server_ntlm response */
data_blob_free(&server_ntlm_response);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
"using server blob failed.\n"));
data_blob_free(&client_reply);
return status;
}
/* SPNEGO wrap the client reply. */
tmp_blob = spnego_gen_auth(state, client_reply);
2009-01-18 14:12:15 +03:00
data_blob_free(&client_reply);
client_reply = tmp_blob;
tmp_blob = data_blob_null;
/* Now prepare the alter context pdu. */
data_blob_free(&state->rpc_out);
2009-01-18 14:12:15 +03:00
status = create_rpc_alter_context(state,
state->rpc_call_id,
2009-01-18 14:12:15 +03:00
&state->cli->abstract_syntax,
&state->cli->transfer_syntax,
state->cli->auth->auth_level,
&client_reply,
&state->rpc_out);
data_blob_free(&client_reply);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
subreq = rpc_api_pipe_send(state, state->ev, state->cli,
&state->rpc_out, DCERPC_PKT_ALTER_RESP);
2009-01-18 14:12:15 +03:00
if (subreq == NULL) {
return NT_STATUS_NO_MEMORY;
}
2009-03-24 00:33:00 +03:00
tevent_req_set_callback(subreq, rpc_bind_ntlmssp_api_done, req);
2009-01-18 14:12:15 +03:00
return NT_STATUS_OK;
}
2009-03-24 00:33:00 +03:00
static void rpc_bind_ntlmssp_api_done(struct tevent_req *subreq)
2009-01-18 14:12:15 +03:00
{
2009-03-24 01:38:04 +03:00
struct tevent_req *req = tevent_req_callback_data(
subreq, struct tevent_req);
struct rpc_pipe_bind_state *state = tevent_req_data(
req, struct rpc_pipe_bind_state);
2009-01-18 14:12:15 +03:00
DATA_BLOB tmp_blob = data_blob_null;
struct ncacn_packet *pkt;
struct dcerpc_auth auth;
2009-01-18 14:12:15 +03:00
NTSTATUS status;
status = rpc_api_pipe_recv(subreq, talloc_tos(), &pkt, NULL);
2009-01-18 14:12:15 +03:00
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
2009-03-24 01:38:04 +03:00
tevent_req_nterror(req, status);
2009-01-18 14:12:15 +03:00
return;
}
status = dcerpc_pull_dcerpc_auth(pkt,
&pkt->u.alter_resp.auth_info,
&auth, false);
if (!NT_STATUS_IS_OK(status)) {
tevent_req_nterror(req, status);
2009-01-18 14:12:15 +03:00
return;
}
/* Check we got a valid auth response. */
if (!spnego_parse_auth_response(talloc_tos(), auth.credentials,
NT_STATUS_OK,
2009-01-18 14:12:15 +03:00
OID_NTLMSSP, &tmp_blob)) {
data_blob_free(&tmp_blob);
2009-03-24 01:38:04 +03:00
tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2009-01-18 14:12:15 +03:00
return;
}
data_blob_free(&tmp_blob);
DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
"%s.\n", rpccli_pipe_txt(talloc_tos(), state->cli)));
2009-03-24 01:38:04 +03:00
tevent_req_done(req);
2009-01-18 14:12:15 +03:00
}
2009-03-24 01:38:04 +03:00
NTSTATUS rpc_pipe_bind_recv(struct tevent_req *req)
2009-01-18 14:12:15 +03:00
{
2009-03-24 01:38:04 +03:00
return tevent_req_simple_recv_ntstatus(req);
2009-01-18 14:12:15 +03:00
}
NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
struct pipe_auth_data *auth)
2009-01-18 14:12:15 +03:00
{
TALLOC_CTX *frame = talloc_stackframe();
struct event_context *ev;
2009-03-24 01:38:04 +03:00
struct tevent_req *req;
2009-04-06 22:52:04 +04:00
NTSTATUS status = NT_STATUS_OK;
2009-01-18 14:12:15 +03:00
ev = event_context_init(frame);
if (ev == NULL) {
2009-04-06 22:52:04 +04:00
status = NT_STATUS_NO_MEMORY;
2009-01-18 14:12:15 +03:00
goto fail;
}
req = rpc_pipe_bind_send(frame, ev, cli, auth);
if (req == NULL) {
2009-04-06 22:52:04 +04:00
status = NT_STATUS_NO_MEMORY;
2009-01-18 14:12:15 +03:00
goto fail;
}
2009-04-06 22:52:04 +04:00
if (!tevent_req_poll(req, ev)) {
status = map_nt_error_from_unix(errno);
goto fail;
}
2009-01-18 14:12:15 +03:00
status = rpc_pipe_bind_recv(req);
fail:
TALLOC_FREE(frame);
return status;
}
#define RPCCLI_DEFAULT_TIMEOUT 10000 /* 10 seconds. */
unsigned int rpccli_set_timeout(struct rpc_pipe_client *rpc_cli,
unsigned int timeout)
{
unsigned int old;
if (rpc_cli->transport == NULL) {
return RPCCLI_DEFAULT_TIMEOUT;
}
if (rpc_cli->transport->set_timeout == NULL) {
return RPCCLI_DEFAULT_TIMEOUT;
}
old = rpc_cli->transport->set_timeout(rpc_cli->transport->priv, timeout);
if (old == 0) {
return RPCCLI_DEFAULT_TIMEOUT;
}
return old;
}
bool rpccli_is_connected(struct rpc_pipe_client *rpc_cli)
{
if (rpc_cli == NULL) {
return false;
}
if (rpc_cli->transport == NULL) {
return false;
}
return rpc_cli->transport->is_connected(rpc_cli->transport->priv);
}
bool rpccli_get_pwd_hash(struct rpc_pipe_client *rpc_cli, uint8_t nt_hash[16])
{
struct cli_state *cli;
if ((rpc_cli->auth->auth_type == DCERPC_AUTH_TYPE_NTLMSSP)
|| ((rpc_cli->auth->auth_type == DCERPC_AUTH_TYPE_SPNEGO
&& rpc_cli->auth->spnego_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP))) {
memcpy(nt_hash, auth_ntlmssp_get_nt_hash(rpc_cli->auth->a_u.auth_ntlmssp_state), 16);
return true;
}
cli = rpc_pipe_np_smb_conn(rpc_cli);
if (cli == NULL) {
return false;
}
E_md4hash(cli->password ? cli->password : "", nt_hash);
return true;
}
NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
struct pipe_auth_data **presult)
{
struct pipe_auth_data *result;
result = talloc(mem_ctx, struct pipe_auth_data);
if (result == NULL) {
return NT_STATUS_NO_MEMORY;
}
result->auth_type = DCERPC_AUTH_TYPE_NONE;
result->spnego_type = PIPE_AUTH_TYPE_SPNEGO_NONE;
result->auth_level = DCERPC_AUTH_LEVEL_NONE;
result->user_name = talloc_strdup(result, "");
result->domain = talloc_strdup(result, "");
if ((result->user_name == NULL) || (result->domain == NULL)) {
TALLOC_FREE(result);
return NT_STATUS_NO_MEMORY;
}
*presult = result;
return NT_STATUS_OK;
}
static int cli_auth_ntlmssp_data_destructor(struct pipe_auth_data *auth)
{
TALLOC_FREE(auth->a_u.auth_ntlmssp_state);
return 0;
}
static NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
enum dcerpc_AuthType auth_type,
enum pipe_auth_type_spnego spnego_type,
enum dcerpc_AuthLevel auth_level,
const char *domain,
const char *username,
const char *password,
struct pipe_auth_data **presult)
{
struct pipe_auth_data *result;
NTSTATUS status;
result = talloc(mem_ctx, struct pipe_auth_data);
if (result == NULL) {
return NT_STATUS_NO_MEMORY;
}
result->auth_type = auth_type;
result->spnego_type = spnego_type;
result->auth_level = auth_level;
result->user_name = talloc_strdup(result, username);
result->domain = talloc_strdup(result, domain);
if ((result->user_name == NULL) || (result->domain == NULL)) {
status = NT_STATUS_NO_MEMORY;
goto fail;
}
status = auth_ntlmssp_client_start(NULL,
global_myname(),
lp_workgroup(),
lp_client_ntlmv2_auth(),
&result->a_u.auth_ntlmssp_state);
if (!NT_STATUS_IS_OK(status)) {
goto fail;
}
talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
status = auth_ntlmssp_set_username(result->a_u.auth_ntlmssp_state,
username);
if (!NT_STATUS_IS_OK(status)) {
goto fail;
}
status = auth_ntlmssp_set_domain(result->a_u.auth_ntlmssp_state,
domain);
if (!NT_STATUS_IS_OK(status)) {
goto fail;
}
status = auth_ntlmssp_set_password(result->a_u.auth_ntlmssp_state,
password);
if (!NT_STATUS_IS_OK(status)) {
goto fail;
}
/*
* Turn off sign+seal to allow selected auth level to turn it back on.
*/
auth_ntlmssp_and_flags(result->a_u.auth_ntlmssp_state,
~(NTLMSSP_NEGOTIATE_SIGN |
NTLMSSP_NEGOTIATE_SEAL));
if (auth_level == DCERPC_AUTH_LEVEL_INTEGRITY) {
auth_ntlmssp_or_flags(result->a_u.auth_ntlmssp_state,
NTLMSSP_NEGOTIATE_SIGN);
} else if (auth_level == DCERPC_AUTH_LEVEL_PRIVACY) {
auth_ntlmssp_or_flags(result->a_u.auth_ntlmssp_state,
NTLMSSP_NEGOTIATE_SEAL |
NTLMSSP_NEGOTIATE_SIGN);
}
*presult = result;
return NT_STATUS_OK;
fail:
TALLOC_FREE(result);
return status;
}
NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
enum dcerpc_AuthLevel auth_level,
struct netlogon_creds_CredentialState *creds,
struct pipe_auth_data **presult)
{
struct pipe_auth_data *result;
result = talloc(mem_ctx, struct pipe_auth_data);
if (result == NULL) {
return NT_STATUS_NO_MEMORY;
}
result->auth_type = DCERPC_AUTH_TYPE_SCHANNEL;
result->spnego_type = PIPE_AUTH_TYPE_SPNEGO_NONE;
result->auth_level = auth_level;
result->user_name = talloc_strdup(result, "");
result->domain = talloc_strdup(result, domain);
if ((result->user_name == NULL) || (result->domain == NULL)) {
goto fail;
}
result->a_u.schannel_auth = talloc(result, struct schannel_state);
if (result->a_u.schannel_auth == NULL) {
goto fail;
}
result->a_u.schannel_auth->state = SCHANNEL_STATE_START;
result->a_u.schannel_auth->seq_num = 0;
result->a_u.schannel_auth->initiator = true;
result->a_u.schannel_auth->creds = creds;
*presult = result;
return NT_STATUS_OK;
fail:
TALLOC_FREE(result);
return NT_STATUS_NO_MEMORY;
}
#ifdef HAVE_KRB5
static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
{
data_blob_free(&auth->session_key);
return 0;
}
#endif
static NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
enum dcerpc_AuthLevel auth_level,
const char *service_princ,
const char *username,
const char *password,
struct pipe_auth_data **presult)
{
#ifdef HAVE_KRB5
struct pipe_auth_data *result;
if ((username != NULL) && (password != NULL)) {
int ret = kerberos_kinit_password(username, password, 0, NULL);
if (ret != 0) {
return NT_STATUS_ACCESS_DENIED;
}
}
result = talloc(mem_ctx, struct pipe_auth_data);
if (result == NULL) {
return NT_STATUS_NO_MEMORY;
}
result->auth_type = DCERPC_AUTH_TYPE_KRB5;
result->spnego_type = PIPE_AUTH_TYPE_SPNEGO_NONE;
result->auth_level = auth_level;
/*
* Username / domain need fixing!
*/
result->user_name = talloc_strdup(result, "");
result->domain = talloc_strdup(result, "");
if ((result->user_name == NULL) || (result->domain == NULL)) {
goto fail;
}
result->a_u.kerberos_auth = TALLOC_ZERO_P(
result, struct kerberos_auth_struct);
if (result->a_u.kerberos_auth == NULL) {
goto fail;
}
talloc_set_destructor(result->a_u.kerberos_auth,
cli_auth_kerberos_data_destructor);
result->a_u.kerberos_auth->service_principal = talloc_strdup(
result, service_princ);
if (result->a_u.kerberos_auth->service_principal == NULL) {
goto fail;
}
*presult = result;
return NT_STATUS_OK;
fail:
TALLOC_FREE(result);
return NT_STATUS_NO_MEMORY;
#else
return NT_STATUS_NOT_SUPPORTED;
#endif
}
/**
* Create an rpc pipe client struct, connecting to a tcp port.
*/
static NTSTATUS rpc_pipe_open_tcp_port(TALLOC_CTX *mem_ctx, const char *host,
uint16_t port,
const struct ndr_syntax_id *abstract_syntax,
struct rpc_pipe_client **presult)
{
struct rpc_pipe_client *result;
struct sockaddr_storage addr;
NTSTATUS status;
int fd;
result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
if (result == NULL) {
return NT_STATUS_NO_MEMORY;
}
result->abstract_syntax = *abstract_syntax;
result->transfer_syntax = ndr_transfer_syntax;
result->dispatch = cli_do_rpc_ndr;
result->dispatch_send = cli_do_rpc_ndr_send;
result->dispatch_recv = cli_do_rpc_ndr_recv;
result->desthost = talloc_strdup(result, host);
result->srv_name_slash = talloc_asprintf_strupper_m(
result, "\\\\%s", result->desthost);
if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
status = NT_STATUS_NO_MEMORY;
goto fail;
}
result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
if (!resolve_name(host, &addr, 0, false)) {
status = NT_STATUS_NOT_FOUND;
goto fail;
}
status = open_socket_out(&addr, port, 60, &fd);
if (!NT_STATUS_IS_OK(status)) {
goto fail;
}
set_socket_options(fd, lp_socket_options());
status = rpc_transport_sock_init(result, fd, &result->transport);
if (!NT_STATUS_IS_OK(status)) {
close(fd);
goto fail;
}
result->transport->transport = NCACN_IP_TCP;
*presult = result;
return NT_STATUS_OK;
fail:
TALLOC_FREE(result);
return status;
}
/**
* Determine the tcp port on which a dcerpc interface is listening
* for the ncacn_ip_tcp transport via the endpoint mapper of the
* target host.
*/
static NTSTATUS rpc_pipe_get_tcp_port(const char *host,
const struct ndr_syntax_id *abstract_syntax,
uint16_t *pport)
{
NTSTATUS status;
struct rpc_pipe_client *epm_pipe = NULL;
struct pipe_auth_data *auth = NULL;
struct dcerpc_binding *map_binding = NULL;
struct dcerpc_binding *res_binding = NULL;
struct epm_twr_t *map_tower = NULL;
struct epm_twr_t *res_towers = NULL;
struct policy_handle *entry_handle = NULL;
uint32_t num_towers = 0;
uint32_t max_towers = 1;
struct epm_twr_p_t towers;
TALLOC_CTX *tmp_ctx = talloc_stackframe();
if (pport == NULL) {
status = NT_STATUS_INVALID_PARAMETER;
goto done;
}
/* open the connection to the endpoint mapper */
status = rpc_pipe_open_tcp_port(tmp_ctx, host, 135,
&ndr_table_epmapper.syntax_id,
&epm_pipe);
if (!NT_STATUS_IS_OK(status)) {
goto done;
}
status = rpccli_anon_bind_data(tmp_ctx, &auth);
if (!NT_STATUS_IS_OK(status)) {
goto done;
}
status = rpc_pipe_bind(epm_pipe, auth);
if (!NT_STATUS_IS_OK(status)) {
goto done;
}
/* create tower for asking the epmapper */
map_binding = TALLOC_ZERO_P(tmp_ctx, struct dcerpc_binding);
if (map_binding == NULL) {
status = NT_STATUS_NO_MEMORY;
goto done;
}
map_binding->transport = NCACN_IP_TCP;
map_binding->object = *abstract_syntax;
map_binding->host = host; /* needed? */
map_binding->endpoint = "0"; /* correct? needed? */
map_tower = TALLOC_ZERO_P(tmp_ctx, struct epm_twr_t);
if (map_tower == NULL) {
status = NT_STATUS_NO_MEMORY;
goto done;
}
status = dcerpc_binding_build_tower(tmp_ctx, map_binding,
&(map_tower->tower));
if (!NT_STATUS_IS_OK(status)) {
goto done;
}
/* allocate further parameters for the epm_Map call */
res_towers = TALLOC_ARRAY(tmp_ctx, struct epm_twr_t, max_towers);
if (res_towers == NULL) {
status = NT_STATUS_NO_MEMORY;
goto done;
}
towers.twr = res_towers;
entry_handle = TALLOC_ZERO_P(tmp_ctx, struct policy_handle);
if (entry_handle == NULL) {
status = NT_STATUS_NO_MEMORY;
goto done;
}
/* ask the endpoint mapper for the port */
status = rpccli_epm_Map(epm_pipe,
tmp_ctx,
CONST_DISCARD(struct GUID *,
&(abstract_syntax->uuid)),
map_tower,
entry_handle,
max_towers,
&num_towers,
&towers);
if (!NT_STATUS_IS_OK(status)) {
goto done;
}
if (num_towers != 1) {
status = NT_STATUS_UNSUCCESSFUL;
goto done;
}
/* extract the port from the answer */
status = dcerpc_binding_from_tower(tmp_ctx,
&(towers.twr->tower),
&res_binding);
if (!NT_STATUS_IS_OK(status)) {
goto done;
}
/* are further checks here necessary? */
if (res_binding->transport != NCACN_IP_TCP) {
status = NT_STATUS_UNSUCCESSFUL;
goto done;
}
*pport = (uint16_t)atoi(res_binding->endpoint);
done:
TALLOC_FREE(tmp_ctx);
return status;
}
/**
* Create a rpc pipe client struct, connecting to a host via tcp.
* The port is determined by asking the endpoint mapper on the given
* host.
*/
NTSTATUS rpc_pipe_open_tcp(TALLOC_CTX *mem_ctx, const char *host,
const struct ndr_syntax_id *abstract_syntax,
struct rpc_pipe_client **presult)
{
NTSTATUS status;
uint16_t port = 0;
status = rpc_pipe_get_tcp_port(host, abstract_syntax, &port);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
return rpc_pipe_open_tcp_port(mem_ctx, host, port,
abstract_syntax, presult);
}
/********************************************************************
Create a rpc pipe client struct, connecting to a unix domain socket
********************************************************************/
NTSTATUS rpc_pipe_open_ncalrpc(TALLOC_CTX *mem_ctx, const char *socket_path,
const struct ndr_syntax_id *abstract_syntax,
struct rpc_pipe_client **presult)
{
struct rpc_pipe_client *result;
struct sockaddr_un addr;
NTSTATUS status;
int fd;
result = talloc_zero(mem_ctx, struct rpc_pipe_client);
if (result == NULL) {
return NT_STATUS_NO_MEMORY;
}
result->abstract_syntax = *abstract_syntax;
result->transfer_syntax = ndr_transfer_syntax;
result->dispatch = cli_do_rpc_ndr;
result->dispatch_send = cli_do_rpc_ndr_send;
result->dispatch_recv = cli_do_rpc_ndr_recv;
result->desthost = get_myname(result);
result->srv_name_slash = talloc_asprintf_strupper_m(
result, "\\\\%s", result->desthost);
if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
status = NT_STATUS_NO_MEMORY;
goto fail;
}
result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
fd = socket(AF_UNIX, SOCK_STREAM, 0);
if (fd == -1) {
status = map_nt_error_from_unix(errno);
goto fail;
}
ZERO_STRUCT(addr);
addr.sun_family = AF_UNIX;
strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
2009-05-18 09:08:28 +04:00
if (sys_connect(fd, (struct sockaddr *)(void *)&addr) == -1) {
DEBUG(0, ("connect(%s) failed: %s\n", socket_path,
strerror(errno)));
close(fd);
return map_nt_error_from_unix(errno);
}
status = rpc_transport_sock_init(result, fd, &result->transport);
if (!NT_STATUS_IS_OK(status)) {
close(fd);
goto fail;
}
result->transport->transport = NCALRPC;
*presult = result;
return NT_STATUS_OK;
fail:
TALLOC_FREE(result);
return status;
}
struct rpc_pipe_client_np_ref {
struct cli_state *cli;
struct rpc_pipe_client *pipe;
};
static int rpc_pipe_client_np_ref_destructor(struct rpc_pipe_client_np_ref *np_ref)
{
DLIST_REMOVE(np_ref->cli->pipe_list, np_ref->pipe);
return 0;
}
/****************************************************************************
Open a named pipe over SMB to a remote server.
*
* CAVEAT CALLER OF THIS FUNCTION:
* The returned rpc_pipe_client saves a copy of the cli_state cli pointer,
* so be sure that this function is called AFTER any structure (vs pointer)
* assignment of the cli. In particular, libsmbclient does structure
* assignments of cli, which invalidates the data in the returned
* rpc_pipe_client if this function is called before the structure assignment
* of cli.
*
****************************************************************************/
static NTSTATUS rpc_pipe_open_np(struct cli_state *cli,
const struct ndr_syntax_id *abstract_syntax,
struct rpc_pipe_client **presult)
{
struct rpc_pipe_client *result;
NTSTATUS status;
struct rpc_pipe_client_np_ref *np_ref;
/* sanity check to protect against crashes */
if ( !cli ) {
return NT_STATUS_INVALID_HANDLE;
}
result = TALLOC_ZERO_P(NULL, struct rpc_pipe_client);
if (result == NULL) {
return NT_STATUS_NO_MEMORY;
}
result->abstract_syntax = *abstract_syntax;
result->transfer_syntax = ndr_transfer_syntax;
result->dispatch = cli_do_rpc_ndr;
result->dispatch_send = cli_do_rpc_ndr_send;
result->dispatch_recv = cli_do_rpc_ndr_recv;
result->desthost = talloc_strdup(result, cli->desthost);
result->srv_name_slash = talloc_asprintf_strupper_m(
result, "\\\\%s", result->desthost);
result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
TALLOC_FREE(result);
return NT_STATUS_NO_MEMORY;
}
status = rpc_transport_np_init(result, cli, abstract_syntax,
&result->transport);
if (!NT_STATUS_IS_OK(status)) {
TALLOC_FREE(result);
return status;
}
result->transport->transport = NCACN_NP;
np_ref = talloc(result->transport, struct rpc_pipe_client_np_ref);
if (np_ref == NULL) {
TALLOC_FREE(result);
return NT_STATUS_NO_MEMORY;
}
np_ref->cli = cli;
np_ref->pipe = result;
DLIST_ADD(np_ref->cli->pipe_list, np_ref->pipe);
talloc_set_destructor(np_ref, rpc_pipe_client_np_ref_destructor);
*presult = result;
return NT_STATUS_OK;
}
NTSTATUS rpc_pipe_open_local(TALLOC_CTX *mem_ctx,
struct rpc_cli_smbd_conn *conn,
const struct ndr_syntax_id *syntax,
struct rpc_pipe_client **presult)
{
struct rpc_pipe_client *result;
struct pipe_auth_data *auth;
NTSTATUS status;
result = talloc(mem_ctx, struct rpc_pipe_client);
if (result == NULL) {
return NT_STATUS_NO_MEMORY;
}
result->abstract_syntax = *syntax;
result->transfer_syntax = ndr_transfer_syntax;
result->dispatch = cli_do_rpc_ndr;
result->dispatch_send = cli_do_rpc_ndr_send;
result->dispatch_recv = cli_do_rpc_ndr_recv;
result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
result->desthost = talloc_strdup(result, global_myname());
result->srv_name_slash = talloc_asprintf_strupper_m(
result, "\\\\%s", global_myname());
if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
TALLOC_FREE(result);
return NT_STATUS_NO_MEMORY;
}
status = rpc_transport_smbd_init(result, conn, syntax,
&result->transport);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(1, ("rpc_transport_smbd_init failed: %s\n",
nt_errstr(status)));
TALLOC_FREE(result);
return status;
}
status = rpccli_anon_bind_data(result, &auth);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(1, ("rpccli_anon_bind_data failed: %s\n",
nt_errstr(status)));
TALLOC_FREE(result);
return status;
}
status = rpc_pipe_bind(result, auth);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(1, ("rpc_pipe_bind failed: %s\n", nt_errstr(status)));
TALLOC_FREE(result);
return status;
}
result->transport->transport = NCACN_INTERNAL;
*presult = result;
return NT_STATUS_OK;
}
/****************************************************************************
Open a pipe to a remote server.
****************************************************************************/
static NTSTATUS cli_rpc_pipe_open(struct cli_state *cli,
enum dcerpc_transport_t transport,
const struct ndr_syntax_id *interface,
struct rpc_pipe_client **presult)
{
switch (transport) {
case NCACN_IP_TCP:
return rpc_pipe_open_tcp(NULL, cli->desthost, interface,
presult);
case NCACN_NP:
return rpc_pipe_open_np(cli, interface, presult);
default:
return NT_STATUS_NOT_IMPLEMENTED;
}
}
/****************************************************************************
Open a named pipe to an SMB server and bind anonymously.
****************************************************************************/
NTSTATUS cli_rpc_pipe_open_noauth_transport(struct cli_state *cli,
enum dcerpc_transport_t transport,
const struct ndr_syntax_id *interface,
struct rpc_pipe_client **presult)
{
struct rpc_pipe_client *result;
struct pipe_auth_data *auth;
NTSTATUS status;
status = cli_rpc_pipe_open(cli, transport, interface, &result);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
status = rpccli_anon_bind_data(result, &auth);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0, ("rpccli_anon_bind_data returned %s\n",
nt_errstr(status)));
TALLOC_FREE(result);
return status;
}
/*
* This is a bit of an abstraction violation due to the fact that an
* anonymous bind on an authenticated SMB inherits the user/domain
* from the enclosing SMB creds
*/
TALLOC_FREE(auth->user_name);
TALLOC_FREE(auth->domain);
auth->user_name = talloc_strdup(auth, cli->user_name);
auth->domain = talloc_strdup(auth, cli->domain);
auth->user_session_key = data_blob_talloc(auth,
cli->user_session_key.data,
cli->user_session_key.length);
if ((auth->user_name == NULL) || (auth->domain == NULL)) {
TALLOC_FREE(result);
return NT_STATUS_NO_MEMORY;
}
status = rpc_pipe_bind(result, auth);
if (!NT_STATUS_IS_OK(status)) {
int lvl = 0;
if (ndr_syntax_id_equal(interface,
&ndr_table_dssetup.syntax_id)) {
/* non AD domains just don't have this pipe, avoid
* level 0 statement in that case - gd */
lvl = 3;
}
DEBUG(lvl, ("cli_rpc_pipe_open_noauth: rpc_pipe_bind for pipe "
"%s failed with error %s\n",
get_pipe_name_from_syntax(talloc_tos(), interface),
nt_errstr(status) ));
TALLOC_FREE(result);
return status;
}
DEBUG(10,("cli_rpc_pipe_open_noauth: opened pipe %s to machine "
"%s and bound anonymously.\n",
get_pipe_name_from_syntax(talloc_tos(), interface),
cli->desthost));
*presult = result;
return NT_STATUS_OK;
}
/****************************************************************************
****************************************************************************/
NTSTATUS cli_rpc_pipe_open_noauth(struct cli_state *cli,
const struct ndr_syntax_id *interface,
struct rpc_pipe_client **presult)
{
return cli_rpc_pipe_open_noauth_transport(cli, NCACN_NP,
interface, presult);
}
/****************************************************************************
Open a named pipe to an SMB server and bind using NTLMSSP or SPNEGO NTLMSSP
****************************************************************************/
static NTSTATUS cli_rpc_pipe_open_ntlmssp_internal(struct cli_state *cli,
const struct ndr_syntax_id *interface,
enum dcerpc_transport_t transport,
bool use_spnego,
enum dcerpc_AuthLevel auth_level,
const char *domain,
const char *username,
const char *password,
struct rpc_pipe_client **presult)
{
struct rpc_pipe_client *result;
struct pipe_auth_data *auth;
enum dcerpc_AuthType auth_type = DCERPC_AUTH_TYPE_NTLMSSP;
enum pipe_auth_type_spnego spnego_type = PIPE_AUTH_TYPE_SPNEGO_NONE;
NTSTATUS status;
status = cli_rpc_pipe_open(cli, transport, interface, &result);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
if (use_spnego) {
auth_type = DCERPC_AUTH_TYPE_SPNEGO;
spnego_type = PIPE_AUTH_TYPE_SPNEGO_NTLMSSP;
}
status = rpccli_ntlmssp_bind_data(result,
auth_type, spnego_type, auth_level,
domain, username, password,
&auth);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0, ("rpccli_ntlmssp_bind_data returned %s\n",
nt_errstr(status)));
goto err;
}
status = rpc_pipe_bind(result, auth);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0, ("cli_rpc_pipe_open_ntlmssp_internal: cli_rpc_pipe_bind failed with error %s\n",
nt_errstr(status) ));
goto err;
}
DEBUG(10,("cli_rpc_pipe_open_ntlmssp_internal: opened pipe %s to "
"machine %s and bound NTLMSSP as user %s\\%s.\n",
get_pipe_name_from_syntax(talloc_tos(), interface),
cli->desthost, domain, username ));
*presult = result;
return NT_STATUS_OK;
err:
TALLOC_FREE(result);
return status;
}
/****************************************************************************
External interface.
Open a named pipe to an SMB server and bind using NTLMSSP (bind type 10)
****************************************************************************/
NTSTATUS cli_rpc_pipe_open_ntlmssp(struct cli_state *cli,
const struct ndr_syntax_id *interface,
enum dcerpc_transport_t transport,
enum dcerpc_AuthLevel auth_level,
const char *domain,
const char *username,
const char *password,
struct rpc_pipe_client **presult)
{
return cli_rpc_pipe_open_ntlmssp_internal(cli,
interface,
transport,
false,
auth_level,
domain,
username,
password,
presult);
}
/****************************************************************************
External interface.
Open a named pipe to an SMB server and bind using spnego NTLMSSP (bind type 9)
****************************************************************************/
NTSTATUS cli_rpc_pipe_open_spnego_ntlmssp(struct cli_state *cli,
const struct ndr_syntax_id *interface,
enum dcerpc_transport_t transport,
enum dcerpc_AuthLevel auth_level,
const char *domain,
const char *username,
const char *password,
struct rpc_pipe_client **presult)
{
return cli_rpc_pipe_open_ntlmssp_internal(cli,
interface,
transport,
true,
auth_level,
domain,
username,
password,
presult);
}
/****************************************************************************
Get a the schannel session key out of an already opened netlogon pipe.
****************************************************************************/
static NTSTATUS get_schannel_session_key_common(struct rpc_pipe_client *netlogon_pipe,
struct cli_state *cli,
const char *domain,
uint32 *pneg_flags)
{
enum netr_SchannelType sec_chan_type = 0;
unsigned char machine_pwd[16];
const char *machine_account;
NTSTATUS status;
/* Get the machine account credentials from secrets.tdb. */
if (!get_trust_pw_hash(domain, machine_pwd, &machine_account,
&sec_chan_type))
{
DEBUG(0, ("get_schannel_session_key: could not fetch "
"trust account password for domain '%s'\n",
domain));
return NT_STATUS_CANT_ACCESS_DOMAIN_INFO;
}
status = rpccli_netlogon_setup_creds(netlogon_pipe,
cli->desthost, /* server name */
domain, /* domain */
global_myname(), /* client name */
machine_account, /* machine account name */
machine_pwd,
sec_chan_type,
pneg_flags);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(3, ("get_schannel_session_key_common: "
"rpccli_netlogon_setup_creds failed with result %s "
"to server %s, domain %s, machine account %s.\n",
nt_errstr(status), cli->desthost, domain,
machine_account ));
return status;
}
if (((*pneg_flags) & NETLOGON_NEG_SCHANNEL) == 0) {
DEBUG(3, ("get_schannel_session_key: Server %s did not offer schannel\n",
cli->desthost));
return NT_STATUS_INVALID_NETWORK_RESPONSE;
}
return NT_STATUS_OK;;
}
/****************************************************************************
Open a netlogon pipe and get the schannel session key.
Now exposed to external callers.
****************************************************************************/
NTSTATUS get_schannel_session_key(struct cli_state *cli,
const char *domain,
uint32 *pneg_flags,
struct rpc_pipe_client **presult)
{
struct rpc_pipe_client *netlogon_pipe = NULL;
NTSTATUS status;
status = cli_rpc_pipe_open_noauth(cli, &ndr_table_netlogon.syntax_id,
&netlogon_pipe);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
pneg_flags);
if (!NT_STATUS_IS_OK(status)) {
TALLOC_FREE(netlogon_pipe);
return status;
}
*presult = netlogon_pipe;
return NT_STATUS_OK;
}
/****************************************************************************
External interface.
Open a named pipe to an SMB server and bind using schannel (bind type 68)
using session_key. sign and seal.
The *pdc will be stolen onto this new pipe
****************************************************************************/
NTSTATUS cli_rpc_pipe_open_schannel_with_key(struct cli_state *cli,
const struct ndr_syntax_id *interface,
enum dcerpc_transport_t transport,
enum dcerpc_AuthLevel auth_level,
const char *domain,
struct netlogon_creds_CredentialState **pdc,
struct rpc_pipe_client **presult)
{
struct rpc_pipe_client *result;
struct pipe_auth_data *auth;
NTSTATUS status;
status = cli_rpc_pipe_open(cli, transport, interface, &result);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
status = rpccli_schannel_bind_data(result, domain, auth_level,
*pdc, &auth);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0, ("rpccli_schannel_bind_data returned %s\n",
nt_errstr(status)));
TALLOC_FREE(result);
return status;
}
status = rpc_pipe_bind(result, auth);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0, ("cli_rpc_pipe_open_schannel_with_key: "
"cli_rpc_pipe_bind failed with error %s\n",
nt_errstr(status) ));
TALLOC_FREE(result);
return status;
}
/*
* The credentials on a new netlogon pipe are the ones we are passed
* in - reference them in
*/
result->dc = talloc_move(result, pdc);
DEBUG(10,("cli_rpc_pipe_open_schannel_with_key: opened pipe %s to machine %s "
"for domain %s and bound using schannel.\n",
get_pipe_name_from_syntax(talloc_tos(), interface),
cli->desthost, domain ));
*presult = result;
return NT_STATUS_OK;
}
/****************************************************************************
Open a named pipe to an SMB server and bind using schannel (bind type 68).
Fetch the session key ourselves using a temporary netlogon pipe. This
version uses an ntlmssp auth bound netlogon pipe to get the key.
****************************************************************************/
static NTSTATUS get_schannel_session_key_auth_ntlmssp(struct cli_state *cli,
const char *domain,
const char *username,
const char *password,
uint32 *pneg_flags,
struct rpc_pipe_client **presult)
{
struct rpc_pipe_client *netlogon_pipe = NULL;
NTSTATUS status;
status = cli_rpc_pipe_open_spnego_ntlmssp(
cli, &ndr_table_netlogon.syntax_id, NCACN_NP,
DCERPC_AUTH_LEVEL_PRIVACY,
domain, username, password, &netlogon_pipe);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
pneg_flags);
if (!NT_STATUS_IS_OK(status)) {
TALLOC_FREE(netlogon_pipe);
return status;
}
*presult = netlogon_pipe;
return NT_STATUS_OK;
}
/****************************************************************************
Open a named pipe to an SMB server and bind using schannel (bind type 68).
Fetch the session key ourselves using a temporary netlogon pipe. This version
uses an ntlmssp bind to get the session key.
****************************************************************************/
NTSTATUS cli_rpc_pipe_open_ntlmssp_auth_schannel(struct cli_state *cli,
const struct ndr_syntax_id *interface,
enum dcerpc_transport_t transport,
enum dcerpc_AuthLevel auth_level,
const char *domain,
const char *username,
const char *password,
struct rpc_pipe_client **presult)
{
uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
struct rpc_pipe_client *netlogon_pipe = NULL;
struct rpc_pipe_client *result = NULL;
NTSTATUS status;
status = get_schannel_session_key_auth_ntlmssp(
cli, domain, username, password, &neg_flags, &netlogon_pipe);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0,("cli_rpc_pipe_open_ntlmssp_auth_schannel: failed to get schannel session "
"key from server %s for domain %s.\n",
cli->desthost, domain ));
return status;
}
status = cli_rpc_pipe_open_schannel_with_key(
cli, interface, transport, auth_level, domain, &netlogon_pipe->dc,
&result);
/* Now we've bound using the session key we can close the netlog pipe. */
TALLOC_FREE(netlogon_pipe);
if (NT_STATUS_IS_OK(status)) {
*presult = result;
}
return status;
}
/****************************************************************************
Open a named pipe to an SMB server and bind using schannel (bind type 68).
Fetch the session key ourselves using a temporary netlogon pipe.
****************************************************************************/
NTSTATUS cli_rpc_pipe_open_schannel(struct cli_state *cli,
const struct ndr_syntax_id *interface,
enum dcerpc_transport_t transport,
enum dcerpc_AuthLevel auth_level,
const char *domain,
struct rpc_pipe_client **presult)
{
uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
struct rpc_pipe_client *netlogon_pipe = NULL;
struct rpc_pipe_client *result = NULL;
NTSTATUS status;
status = get_schannel_session_key(cli, domain, &neg_flags,
&netlogon_pipe);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0,("cli_rpc_pipe_open_schannel: failed to get schannel session "
"key from server %s for domain %s.\n",
cli->desthost, domain ));
return status;
}
status = cli_rpc_pipe_open_schannel_with_key(
cli, interface, transport, auth_level, domain, &netlogon_pipe->dc,
&result);
/* Now we've bound using the session key we can close the netlog pipe. */
TALLOC_FREE(netlogon_pipe);
if (NT_STATUS_IS_OK(status)) {
*presult = result;
}
return status;
}
/****************************************************************************
Open a named pipe to an SMB server and bind using krb5 (bind type 16).
The idea is this can be called with service_princ, username and password all
NULL so long as the caller has a TGT.
****************************************************************************/
NTSTATUS cli_rpc_pipe_open_krb5(struct cli_state *cli,
const struct ndr_syntax_id *interface,
enum dcerpc_AuthLevel auth_level,
const char *service_princ,
const char *username,
const char *password,
struct rpc_pipe_client **presult)
{
#ifdef HAVE_KRB5
struct rpc_pipe_client *result;
struct pipe_auth_data *auth;
NTSTATUS status;
status = cli_rpc_pipe_open(cli, NCACN_NP, interface, &result);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
status = rpccli_kerberos_bind_data(result, auth_level, service_princ,
username, password, &auth);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0, ("rpccli_kerberos_bind_data returned %s\n",
nt_errstr(status)));
TALLOC_FREE(result);
return status;
}
status = rpc_pipe_bind(result, auth);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0, ("cli_rpc_pipe_open_krb5: cli_rpc_pipe_bind failed "
"with error %s\n", nt_errstr(status)));
TALLOC_FREE(result);
return status;
}
*presult = result;
return NT_STATUS_OK;
#else
DEBUG(0,("cli_rpc_pipe_open_krb5: kerberos not found at compile time.\n"));
return NT_STATUS_NOT_IMPLEMENTED;
#endif
}
NTSTATUS cli_get_session_key(TALLOC_CTX *mem_ctx,
struct rpc_pipe_client *cli,
DATA_BLOB *session_key)
{
struct pipe_auth_data *a = cli->auth;
DATA_BLOB sk;
if (!session_key || !cli) {
return NT_STATUS_INVALID_PARAMETER;
}
if (!cli->auth) {
return NT_STATUS_INVALID_PARAMETER;
}
switch (cli->auth->auth_type) {
case DCERPC_AUTH_TYPE_SCHANNEL:
sk = data_blob_const(a->a_u.schannel_auth->creds->session_key,
16);
break;
case DCERPC_AUTH_TYPE_SPNEGO:
switch (cli->auth->spnego_type) {
case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
sk = auth_ntlmssp_get_session_key(
a->a_u.auth_ntlmssp_state);
break;
case PIPE_AUTH_TYPE_SPNEGO_KRB5:
sk = data_blob_const(
a->a_u.kerberos_auth->session_key.data,
a->a_u.kerberos_auth->session_key.length);
break;
default:
return NT_STATUS_NO_USER_SESSION_KEY;
}
break;
case DCERPC_AUTH_TYPE_NTLMSSP:
sk = auth_ntlmssp_get_session_key(a->a_u.auth_ntlmssp_state);
break;
case DCERPC_AUTH_TYPE_KRB5:
sk = data_blob_const(a->a_u.kerberos_auth->session_key.data,
a->a_u.kerberos_auth->session_key.length);
break;
case DCERPC_AUTH_TYPE_NONE:
sk = data_blob_const(a->user_session_key.data,
a->user_session_key.length);
break;
default:
return NT_STATUS_NO_USER_SESSION_KEY;
}
*session_key = data_blob_dup_talloc(mem_ctx, &sk);
return NT_STATUS_OK;
}