2009-07-02 21:26:05 +04:00
/*
Unix SMB / CIFS implementation .
Core SMB2 server
Copyright ( C ) Stefan Metzmacher 2009
2010-05-07 02:39:21 +04:00
Copyright ( C ) Jeremy Allison 2010
2009-07-02 21:26:05 +04:00
This program is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation ; either version 3 of the License , or
( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
# include "includes.h"
2011-03-22 18:57:01 +03:00
# include "smbd/smbd.h"
2009-07-02 21:26:05 +04:00
# include "smbd/globals.h"
2009-08-12 19:52:55 +04:00
# include "../libcli/smb/smb_common.h"
2011-04-28 19:38:09 +04:00
# include "../lib/util/tevent_ntstatus.h"
2011-03-24 17:31:06 +03:00
# include "messages.h"
2009-07-02 21:26:05 +04:00
struct smbd_smb2_lock_element {
uint64_t offset ;
uint64_t length ;
uint32_t flags ;
} ;
2010-05-07 02:39:21 +04:00
struct smbd_smb2_lock_state {
struct smbd_smb2_request * smb2req ;
struct smb_request * smb1req ;
struct blocking_lock_record * blr ;
uint16_t lock_count ;
struct smbd_lock_element * locks ;
} ;
2010-05-10 22:09:41 +04:00
static void remove_pending_lock ( struct smbd_smb2_lock_state * state ,
struct blocking_lock_record * blr ) ;
2010-05-07 02:39:21 +04:00
2009-07-02 21:26:05 +04:00
static struct tevent_req * smbd_smb2_lock_send ( TALLOC_CTX * mem_ctx ,
struct tevent_context * ev ,
struct smbd_smb2_request * smb2req ,
2012-06-08 13:57:21 +04:00
struct files_struct * in_fsp ,
2009-07-02 21:26:05 +04:00
uint16_t in_lock_count ,
struct smbd_smb2_lock_element * in_locks ) ;
static NTSTATUS smbd_smb2_lock_recv ( struct tevent_req * req ) ;
static void smbd_smb2_request_lock_done ( struct tevent_req * subreq ) ;
NTSTATUS smbd_smb2_request_process_lock ( struct smbd_smb2_request * req )
{
const uint8_t * inbody ;
uint16_t in_lock_count ;
uint64_t in_file_id_persistent ;
uint64_t in_file_id_volatile ;
2012-06-08 13:57:21 +04:00
struct files_struct * in_fsp ;
2009-07-02 21:26:05 +04:00
struct smbd_smb2_lock_element * in_locks ;
struct tevent_req * subreq ;
const uint8_t * lock_buffer ;
2009-07-09 13:33:58 +04:00
uint16_t l ;
2011-09-06 16:01:43 +04:00
NTSTATUS status ;
2009-07-02 21:26:05 +04:00
2011-09-06 16:01:43 +04:00
status = smbd_smb2_request_verify_sizes ( req , 0x30 ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
return smbd_smb2_request_error ( req , status ) ;
2009-07-02 21:26:05 +04:00
}
2012-08-05 17:00:23 +04:00
inbody = SMBD_SMB2_IN_BODY_PTR ( req ) ;
2009-07-02 21:26:05 +04:00
in_lock_count = CVAL ( inbody , 0x02 ) ;
2009-07-09 13:33:58 +04:00
/* 0x04 - 4 bytes reserved */
2009-07-02 21:26:05 +04:00
in_file_id_persistent = BVAL ( inbody , 0x08 ) ;
in_file_id_volatile = BVAL ( inbody , 0x10 ) ;
if ( in_lock_count < 1 ) {
return smbd_smb2_request_error ( req , NT_STATUS_INVALID_PARAMETER ) ;
}
2012-08-05 17:00:23 +04:00
if ( ( ( in_lock_count - 1 ) * 0x18 ) > SMBD_SMB2_IN_DYN_LEN ( req ) ) {
2009-07-02 21:26:05 +04:00
return smbd_smb2_request_error ( req , NT_STATUS_INVALID_PARAMETER ) ;
}
in_locks = talloc_array ( req , struct smbd_smb2_lock_element ,
in_lock_count ) ;
if ( in_locks = = NULL ) {
return smbd_smb2_request_error ( req , NT_STATUS_NO_MEMORY ) ;
}
2009-07-09 13:33:58 +04:00
l = 0 ;
2009-07-02 21:26:05 +04:00
lock_buffer = inbody + 0x18 ;
2009-07-09 13:33:58 +04:00
in_locks [ l ] . offset = BVAL ( lock_buffer , 0x00 ) ;
in_locks [ l ] . length = BVAL ( lock_buffer , 0x08 ) ;
in_locks [ l ] . flags = IVAL ( lock_buffer , 0x10 ) ;
/* 0x14 - 4 reserved bytes */
2009-07-02 21:26:05 +04:00
2012-08-05 17:00:23 +04:00
lock_buffer = SMBD_SMB2_IN_DYN_PTR ( req ) ;
2009-07-02 21:26:05 +04:00
2009-07-09 13:33:58 +04:00
for ( l = 1 ; l < in_lock_count ; l + + ) {
in_locks [ l ] . offset = BVAL ( lock_buffer , 0x00 ) ;
in_locks [ l ] . length = BVAL ( lock_buffer , 0x08 ) ;
in_locks [ l ] . flags = IVAL ( lock_buffer , 0x10 ) ;
/* 0x14 - 4 reserved bytes */
2009-07-02 21:26:05 +04:00
lock_buffer + = 0x18 ;
}
2012-06-08 13:57:21 +04:00
in_fsp = file_fsp_smb2 ( req , in_file_id_persistent , in_file_id_volatile ) ;
if ( in_fsp = = NULL ) {
return smbd_smb2_request_error ( req , NT_STATUS_FILE_CLOSED ) ;
}
subreq = smbd_smb2_lock_send ( req , req - > sconn - > ev_ctx ,
req , in_fsp ,
2009-07-02 21:26:05 +04:00
in_lock_count ,
in_locks ) ;
if ( subreq = = NULL ) {
return smbd_smb2_request_error ( req , NT_STATUS_NO_MEMORY ) ;
}
tevent_req_set_callback ( subreq , smbd_smb2_request_lock_done , req ) ;
2011-11-14 18:42:55 +04:00
return smbd_smb2_request_pending_queue ( req , subreq , 500 ) ;
2009-07-02 21:26:05 +04:00
}
static void smbd_smb2_request_lock_done ( struct tevent_req * subreq )
{
2010-05-07 02:39:21 +04:00
struct smbd_smb2_request * smb2req = tevent_req_callback_data ( subreq ,
2009-07-02 21:26:05 +04:00
struct smbd_smb2_request ) ;
DATA_BLOB outbody ;
NTSTATUS status ;
NTSTATUS error ; /* transport error */
status = smbd_smb2_lock_recv ( subreq ) ;
TALLOC_FREE ( subreq ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
2010-05-07 02:39:21 +04:00
error = smbd_smb2_request_error ( smb2req , status ) ;
2009-07-02 21:26:05 +04:00
if ( ! NT_STATUS_IS_OK ( error ) ) {
2014-06-11 14:15:48 +04:00
smbd_server_connection_terminate ( smb2req - > xconn ,
2009-07-02 21:26:05 +04:00
nt_errstr ( error ) ) ;
return ;
}
return ;
}
2013-12-04 17:59:07 +04:00
outbody = smbd_smb2_generate_outbody ( smb2req , 0x04 ) ;
2009-07-02 21:26:05 +04:00
if ( outbody . data = = NULL ) {
2010-05-07 02:39:21 +04:00
error = smbd_smb2_request_error ( smb2req , NT_STATUS_NO_MEMORY ) ;
2009-07-02 21:26:05 +04:00
if ( ! NT_STATUS_IS_OK ( error ) ) {
2014-06-11 14:15:48 +04:00
smbd_server_connection_terminate ( smb2req - > xconn ,
2009-07-02 21:26:05 +04:00
nt_errstr ( error ) ) ;
return ;
}
return ;
}
SSVAL ( outbody . data , 0x00 , 0x04 ) ; /* struct size */
SSVAL ( outbody . data , 0x02 , 0 ) ; /* reserved */
2010-05-07 02:39:21 +04:00
error = smbd_smb2_request_done ( smb2req , outbody , NULL ) ;
2009-07-02 21:26:05 +04:00
if ( ! NT_STATUS_IS_OK ( error ) ) {
2014-06-11 14:15:48 +04:00
smbd_server_connection_terminate ( smb2req - > xconn ,
2009-07-02 21:26:05 +04:00
nt_errstr ( error ) ) ;
return ;
}
}
static struct tevent_req * smbd_smb2_lock_send ( TALLOC_CTX * mem_ctx ,
struct tevent_context * ev ,
struct smbd_smb2_request * smb2req ,
2012-06-08 13:57:21 +04:00
struct files_struct * fsp ,
2009-07-02 21:26:05 +04:00
uint16_t in_lock_count ,
struct smbd_smb2_lock_element * in_locks )
{
struct tevent_req * req ;
struct smbd_smb2_lock_state * state ;
2010-05-01 08:03:20 +04:00
struct smb_request * smb1req ;
2009-07-08 15:13:32 +04:00
int32_t timeout = - 1 ;
bool isunlock = false ;
uint16_t i ;
struct smbd_lock_element * locks ;
NTSTATUS status ;
bool async = false ;
2009-07-02 21:26:05 +04:00
req = tevent_req_create ( mem_ctx , & state ,
2010-05-01 08:03:20 +04:00
struct smbd_smb2_lock_state ) ;
2009-07-02 21:26:05 +04:00
if ( req = = NULL ) {
return NULL ;
}
state - > smb2req = smb2req ;
2010-05-06 19:22:13 +04:00
smb2req - > subreq = req ; /* So we can find this when going async. */
2010-05-01 08:03:20 +04:00
smb1req = smbd_smb2_fake_smb_request ( smb2req ) ;
if ( tevent_req_nomem ( smb1req , req ) ) {
return tevent_req_post ( req , ev ) ;
}
state - > smb1req = smb1req ;
2009-07-02 21:26:05 +04:00
2012-06-14 15:22:27 +04:00
DEBUG ( 10 , ( " smbd_smb2_lock_send: %s - %s \n " ,
fsp_str_dbg ( fsp ) , fsp_fnum_dbg ( fsp ) ) ) ;
2009-07-02 21:26:05 +04:00
2009-07-08 15:13:32 +04:00
locks = talloc_array ( state , struct smbd_lock_element , in_lock_count ) ;
if ( locks = = NULL ) {
tevent_req_nterror ( req , NT_STATUS_NO_MEMORY ) ;
return tevent_req_post ( req , ev ) ;
}
switch ( in_locks [ 0 ] . flags ) {
case SMB2_LOCK_FLAG_SHARED :
case SMB2_LOCK_FLAG_EXCLUSIVE :
if ( in_lock_count > 1 ) {
tevent_req_nterror ( req , NT_STATUS_INVALID_PARAMETER ) ;
return tevent_req_post ( req , ev ) ;
}
timeout = - 1 ;
break ;
case SMB2_LOCK_FLAG_SHARED | SMB2_LOCK_FLAG_FAIL_IMMEDIATELY :
case SMB2_LOCK_FLAG_EXCLUSIVE | SMB2_LOCK_FLAG_FAIL_IMMEDIATELY :
timeout = 0 ;
break ;
case SMB2_LOCK_FLAG_UNLOCK :
/* only the first lock gives the UNLOCK bit - see
MS - SMB2 3.3 .5 .14 */
isunlock = true ;
timeout = 0 ;
break ;
default :
tevent_req_nterror ( req , NT_STATUS_INVALID_PARAMETER ) ;
return tevent_req_post ( req , ev ) ;
}
2014-06-30 13:39:20 +04:00
if ( ! isunlock & & ( in_lock_count > 1 ) ) {
/*
* 3.3 .5 .14 .2 says we SHOULD fail with INVALID_PARAMETER if we
* have more than one lock and one of those is blocking .
*/
for ( i = 0 ; i < in_lock_count ; i + + ) {
uint32_t flags = in_locks [ i ] . flags ;
if ( ( flags & SMB2_LOCK_FLAG_FAIL_IMMEDIATELY ) = = 0 ) {
tevent_req_nterror (
req , NT_STATUS_INVALID_PARAMETER ) ;
return tevent_req_post ( req , ev ) ;
}
}
}
2009-07-08 15:13:32 +04:00
for ( i = 0 ; i < in_lock_count ; i + + ) {
bool invalid = false ;
switch ( in_locks [ i ] . flags ) {
case SMB2_LOCK_FLAG_SHARED :
case SMB2_LOCK_FLAG_EXCLUSIVE :
if ( isunlock ) {
2010-05-10 22:29:34 +04:00
invalid = true ;
break ;
}
2009-07-08 15:13:32 +04:00
break ;
case SMB2_LOCK_FLAG_SHARED | SMB2_LOCK_FLAG_FAIL_IMMEDIATELY :
case SMB2_LOCK_FLAG_EXCLUSIVE | SMB2_LOCK_FLAG_FAIL_IMMEDIATELY :
if ( isunlock ) {
2010-05-10 22:29:34 +04:00
invalid = true ;
2009-07-08 15:13:32 +04:00
}
break ;
case SMB2_LOCK_FLAG_UNLOCK :
if ( ! isunlock ) {
tevent_req_nterror ( req ,
NT_STATUS_INVALID_PARAMETER ) ;
return tevent_req_post ( req , ev ) ;
}
break ;
default :
if ( isunlock ) {
/*
2014-06-30 12:32:29 +04:00
* If the first element was a UNLOCK
* we need to defer the error response
2009-07-08 15:13:32 +04:00
* to the backend , because we need to process
* all unlock elements before
*/
invalid = true ;
break ;
}
tevent_req_nterror ( req , NT_STATUS_INVALID_PARAMETER ) ;
return tevent_req_post ( req , ev ) ;
}
2012-06-08 19:51:47 +04:00
locks [ i ] . smblctx = fsp - > op - > global - > open_persistent_id ;
2009-07-08 15:13:32 +04:00
locks [ i ] . offset = in_locks [ i ] . offset ;
locks [ i ] . count = in_locks [ i ] . length ;
if ( in_locks [ i ] . flags & SMB2_LOCK_FLAG_EXCLUSIVE ) {
locks [ i ] . brltype = WRITE_LOCK ;
} else if ( in_locks [ i ] . flags & SMB2_LOCK_FLAG_SHARED ) {
locks [ i ] . brltype = READ_LOCK ;
} else if ( invalid ) {
/*
* this is an invalid UNLOCK element
* and the backend needs to test for
* brltype ! = UNLOCK_LOCK and return
2014-07-15 18:49:25 +04:00
* NT_STATUS_INVALID_PARAMETER
2009-07-08 15:13:32 +04:00
*/
locks [ i ] . brltype = READ_LOCK ;
} else {
locks [ i ] . brltype = UNLOCK_LOCK ;
}
2010-05-06 19:22:13 +04:00
DEBUG ( 10 , ( " smbd_smb2_lock_send: index %d offset=%llu, count=%llu, "
2010-05-07 17:20:50 +04:00
" smblctx = %llu type %d \n " ,
2010-05-06 19:22:13 +04:00
i ,
( unsigned long long ) locks [ i ] . offset ,
( unsigned long long ) locks [ i ] . count ,
2010-05-07 17:20:50 +04:00
( unsigned long long ) locks [ i ] . smblctx ,
2010-05-06 19:22:13 +04:00
( int ) locks [ i ] . brltype ) ) ;
2009-07-08 15:13:32 +04:00
}
2010-05-01 08:03:20 +04:00
state - > locks = locks ;
state - > lock_count = in_lock_count ;
2009-07-08 15:13:32 +04:00
if ( isunlock ) {
2014-07-03 14:06:56 +04:00
status = smbd_do_unlocking ( smb1req , fsp ,
in_lock_count , locks ) ;
async = false ;
2009-07-08 15:13:32 +04:00
} else {
2010-05-01 08:03:20 +04:00
status = smbd_do_locking ( smb1req , fsp ,
2009-07-08 15:13:32 +04:00
0 ,
timeout ,
in_lock_count ,
locks ,
& async ) ;
}
if ( ! NT_STATUS_IS_OK ( status ) ) {
if ( NT_STATUS_EQUAL ( status , NT_STATUS_FILE_LOCK_CONFLICT ) ) {
status = NT_STATUS_LOCK_NOT_GRANTED ;
}
tevent_req_nterror ( req , status ) ;
return tevent_req_post ( req , ev ) ;
}
if ( async ) {
2014-09-16 10:05:31 +04:00
tevent_req_defer_callback ( req , smb2req - > sconn - > ev_ctx ) ;
2014-10-31 13:15:50 +03:00
SMBPROFILE_IOBYTES_ASYNC_SET_IDLE ( smb2req - > profile ) ;
2010-05-01 08:03:20 +04:00
return req ;
2009-07-08 15:13:32 +04:00
}
tevent_req_done ( req ) ;
2009-07-02 21:26:05 +04:00
return tevent_req_post ( req , ev ) ;
}
static NTSTATUS smbd_smb2_lock_recv ( struct tevent_req * req )
{
NTSTATUS status ;
if ( tevent_req_is_nterror ( req , & status ) ) {
tevent_req_received ( req ) ;
return status ;
}
tevent_req_received ( req ) ;
return NT_STATUS_OK ;
}
2010-04-09 09:15:55 +04:00
2010-05-01 08:03:20 +04:00
/****************************************************************
Cancel an outstanding blocking lock request .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static bool smbd_smb2_lock_cancel ( struct tevent_req * req )
{
2014-03-10 12:43:35 +04:00
struct smbd_smb2_request * smb2req = NULL ;
struct smbd_smb2_lock_state * state = tevent_req_data ( req ,
struct smbd_smb2_lock_state ) ;
if ( ! state ) {
return false ;
}
2010-05-01 08:03:20 +04:00
2014-03-10 12:43:35 +04:00
if ( ! state - > smb2req ) {
return false ;
}
2010-05-01 08:03:20 +04:00
2014-03-10 12:43:35 +04:00
smb2req = state - > smb2req ;
2010-05-01 08:03:20 +04:00
2012-09-20 18:16:03 +04:00
remove_pending_lock ( state , state - > blr ) ;
2014-03-10 12:47:11 +04:00
/*
* If the request is canceled because of logoff , tdis or close
* the status is NT_STATUS_RANGE_NOT_LOCKED instead of
* NT_STATUS_CANCELLED .
*
* Note that the close case is handled in
* cancel_pending_lock_requests_by_fid_smb2 ( SHUTDOWN_CLOSE )
* for now .
*/
if ( ! NT_STATUS_IS_OK ( smb2req - > session - > status ) ) {
tevent_req_nterror ( req , NT_STATUS_RANGE_NOT_LOCKED ) ;
return true ;
}
if ( ! NT_STATUS_IS_OK ( smb2req - > tcon - > status ) ) {
tevent_req_nterror ( req , NT_STATUS_RANGE_NOT_LOCKED ) ;
return true ;
}
2012-09-20 18:16:03 +04:00
tevent_req_nterror ( req , NT_STATUS_CANCELLED ) ;
2014-03-10 12:43:35 +04:00
return true ;
2010-05-01 08:03:20 +04:00
}
/****************************************************************
Got a message saying someone unlocked a file . Re - schedule all
blocking lock requests as we don ' t know if anything overlapped .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static void received_unlock_msg ( struct messaging_context * msg ,
void * private_data ,
uint32_t msg_type ,
struct server_id server_id ,
DATA_BLOB * data )
{
2011-12-12 18:45:07 +04:00
struct smbd_server_connection * sconn =
2011-12-14 12:50:24 +04:00
talloc_get_type_abort ( private_data ,
2011-12-12 18:45:07 +04:00
struct smbd_server_connection ) ;
2010-10-03 19:28:10 +04:00
2011-12-12 18:45:07 +04:00
DEBUG ( 10 , ( " received_unlock_msg (SMB2) \n " ) ) ;
2010-10-03 19:28:10 +04:00
process_blocking_lock_queue_smb2 ( sconn , timeval_current ( ) ) ;
2010-05-01 08:03:20 +04:00
}
/****************************************************************
Function to get the blr on a pending record .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
struct blocking_lock_record * get_pending_smb2req_blr ( struct smbd_smb2_request * smb2req )
{
struct smbd_smb2_lock_state * state = NULL ;
const uint8_t * inhdr ;
if ( ! smb2req ) {
return NULL ;
}
if ( smb2req - > subreq = = NULL ) {
return NULL ;
}
if ( ! tevent_req_is_in_progress ( smb2req - > subreq ) ) {
return NULL ;
}
2012-08-05 17:00:23 +04:00
inhdr = SMBD_SMB2_IN_HDR_PTR ( smb2req ) ;
2010-05-06 19:22:13 +04:00
if ( SVAL ( inhdr , SMB2_HDR_OPCODE ) ! = SMB2_OP_LOCK ) {
2010-05-01 08:03:20 +04:00
return NULL ;
}
state = tevent_req_data ( smb2req - > subreq ,
struct smbd_smb2_lock_state ) ;
if ( ! state ) {
return NULL ;
}
return state - > blr ;
}
/****************************************************************
Set up the next brl timeout .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static bool recalc_smb2_brl_timeout ( struct smbd_server_connection * sconn )
{
2014-09-16 11:03:39 +04:00
struct smbXsrv_connection * xconn = NULL ;
2010-05-06 19:22:13 +04:00
struct timeval next_timeout = timeval_zero ( ) ;
2010-05-01 08:03:20 +04:00
int max_brl_timeout = lp_parm_int ( - 1 , " brl " , " recalctime " , 5 ) ;
2010-05-06 01:54:31 +04:00
TALLOC_FREE ( sconn - > smb2 . locks . brl_timeout ) ;
2010-05-01 08:03:20 +04:00
2014-09-16 11:03:39 +04:00
if ( sconn ! = NULL & & sconn - > client ! = NULL ) {
xconn = sconn - > client - > connections ;
}
for ( ; xconn ! = NULL ; xconn = xconn - > next ) {
struct smbd_smb2_request * smb2req , * nextreq ;
for ( smb2req = xconn - > smb2 . requests ; smb2req ; smb2req = nextreq ) {
struct blocking_lock_record * blr =
get_pending_smb2req_blr ( smb2req ) ;
nextreq = smb2req - > next ;
if ( blr = = NULL ) {
continue ;
}
if ( ! timeval_is_zero ( & blr - > expire_time ) ) {
next_timeout = timeval_brl_min ( & next_timeout ,
& blr - > expire_time ) ;
continue ;
}
2010-05-01 08:03:20 +04:00
/*
2010-05-07 17:20:50 +04:00
* If we ' re blocked on pid 0xFFFFFFFFFFFFFFFFLL this is
2010-05-01 08:03:20 +04:00
* a POSIX lock , so calculate a timeout of
* 10 seconds into the future .
*/
2010-05-07 17:20:50 +04:00
if ( blr - > blocking_smblctx = = 0xFFFFFFFFFFFFFFFFLL ) {
2014-09-16 11:03:39 +04:00
struct timeval psx_to ;
2010-05-06 01:54:31 +04:00
2014-09-16 11:03:39 +04:00
psx_to = timeval_current_ofs ( 10 , 0 ) ;
next_timeout = timeval_brl_min ( & next_timeout ,
& psx_to ) ;
}
2010-05-01 08:03:20 +04:00
}
2010-05-06 01:54:31 +04:00
}
if ( timeval_is_zero ( & next_timeout ) ) {
DEBUG ( 10 , ( " recalc_smb2_brl_timeout:Next "
" timeout = Infinite. \n " ) ) ;
2010-05-06 19:22:13 +04:00
return true ;
2010-05-01 08:03:20 +04:00
}
/*
* To account for unclean shutdowns by clients we need a
* maximum timeout that we use for checking pending locks . If
* we have any pending locks at all , then check if the pending
* lock can continue at least every brl : recalctime seconds
* ( default 5 seconds ) .
*
* This saves us needing to do a message_send_all ( ) in the
* SIGCHLD handler in the parent daemon . That
* message_send_all ( ) caused O ( n ^ 2 ) work to be done when IP
* failovers happened in clustered Samba , which could make the
* entire system unusable for many minutes .
*/
if ( max_brl_timeout > 0 ) {
struct timeval min_to = timeval_current_ofs ( max_brl_timeout , 0 ) ;
next_timeout = timeval_brl_min ( & next_timeout , & min_to ) ;
}
if ( DEBUGLVL ( 10 ) ) {
struct timeval cur , from_now ;
cur = timeval_current ( ) ;
from_now = timeval_until ( & cur , & next_timeout ) ;
2010-05-06 01:54:31 +04:00
DEBUG ( 10 , ( " recalc_smb2_brl_timeout: Next "
2010-05-01 08:03:20 +04:00
" timeout = %d.%d seconds from now. \n " ,
( int ) from_now . tv_sec , ( int ) from_now . tv_usec ) ) ;
}
2011-12-12 16:49:29 +04:00
sconn - > smb2 . locks . brl_timeout = tevent_add_timer (
sconn - > ev_ctx ,
2010-05-01 08:03:20 +04:00
NULL ,
next_timeout ,
brl_timeout_fn ,
2013-12-05 18:50:58 +04:00
sconn ) ;
2010-05-01 08:03:20 +04:00
if ( ! sconn - > smb2 . locks . brl_timeout ) {
return false ;
}
return true ;
}
/****************************************************************
2014-06-30 12:32:29 +04:00
Get an SMB2 lock request to go async . lock_timeout should
2010-05-01 08:03:20 +04:00
always be - 1 here .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2010-04-09 09:15:55 +04:00
2010-04-10 06:26:34 +04:00
bool push_blocking_lock_request_smb2 ( struct byte_range_lock * br_lck ,
2010-05-01 08:03:20 +04:00
struct smb_request * smb1req ,
2010-04-09 09:15:55 +04:00
files_struct * fsp ,
int lock_timeout ,
int lock_num ,
2010-05-07 17:20:50 +04:00
uint64_t smblctx ,
2010-04-09 09:15:55 +04:00
enum brl_type lock_type ,
enum brl_flavour lock_flav ,
uint64_t offset ,
uint64_t count ,
2010-05-07 17:20:50 +04:00
uint64_t blocking_smblctx )
2010-04-09 09:15:55 +04:00
{
2010-06-12 16:50:08 +04:00
struct smbd_server_connection * sconn = smb1req - > sconn ;
2010-05-01 08:03:20 +04:00
struct smbd_smb2_request * smb2req = smb1req - > smb2req ;
struct tevent_req * req = NULL ;
struct smbd_smb2_lock_state * state = NULL ;
2010-05-06 19:22:13 +04:00
struct blocking_lock_record * blr = NULL ;
2010-05-01 08:03:20 +04:00
NTSTATUS status = NT_STATUS_OK ;
if ( ! smb2req ) {
return false ;
}
req = smb2req - > subreq ;
if ( ! req ) {
return false ;
}
2010-05-06 19:22:13 +04:00
if ( ! tevent_req_is_in_progress ( smb2req - > subreq ) ) {
return false ;
}
2010-05-01 08:03:20 +04:00
state = tevent_req_data ( req , struct smbd_smb2_lock_state ) ;
if ( ! state ) {
return false ;
}
2010-05-06 19:22:13 +04:00
blr = talloc_zero ( state , struct blocking_lock_record ) ;
if ( ! blr ) {
return false ;
}
blr - > fsp = fsp ;
2010-05-06 01:54:31 +04:00
2010-05-06 19:22:13 +04:00
if ( lock_timeout = = - 1 ) {
blr - > expire_time . tv_sec = 0 ;
blr - > expire_time . tv_usec = 0 ; /* Never expire. */
} else {
2011-06-01 05:51:15 +04:00
blr - > expire_time = timeval_current_ofs_msec ( lock_timeout ) ;
2010-05-06 19:22:13 +04:00
}
blr - > lock_num = lock_num ;
2010-05-07 17:20:50 +04:00
blr - > smblctx = smblctx ;
blr - > blocking_smblctx = blocking_smblctx ;
2010-05-06 19:22:13 +04:00
blr - > lock_flav = lock_flav ;
blr - > lock_type = lock_type ;
blr - > offset = offset ;
blr - > count = count ;
2010-05-01 08:03:20 +04:00
2010-05-06 19:22:13 +04:00
/* Specific brl_lock() implementations can fill this in. */
blr - > blr_private = NULL ;
2010-05-01 08:03:20 +04:00
2010-05-06 19:22:13 +04:00
/* Add a pending lock record for this. */
2010-07-04 20:09:59 +04:00
status = brl_lock ( sconn - > msg_ctx ,
2010-05-01 08:03:20 +04:00
br_lck ,
2010-05-07 17:20:50 +04:00
smblctx ,
2011-12-15 14:51:20 +04:00
messaging_server_id ( sconn - > msg_ctx ) ,
2010-05-01 08:03:20 +04:00
offset ,
count ,
lock_type = = READ_LOCK ? PENDING_READ_LOCK : PENDING_WRITE_LOCK ,
blr - > lock_flav ,
true ,
2014-07-03 18:05:03 +04:00
NULL ) ;
2010-05-01 08:03:20 +04:00
2010-05-06 19:22:13 +04:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
DEBUG ( 0 , ( " push_blocking_lock_request_smb2: "
" failed to add PENDING_LOCK record. \n " ) ) ;
TALLOC_FREE ( blr ) ;
return false ;
2010-05-01 08:03:20 +04:00
}
2010-05-06 19:22:13 +04:00
state - > blr = blr ;
DEBUG ( 10 , ( " push_blocking_lock_request_smb2: file %s timeout %d \n " ,
fsp_str_dbg ( fsp ) ,
lock_timeout ) ) ;
2010-05-01 08:03:20 +04:00
recalc_smb2_brl_timeout ( sconn ) ;
/* Ensure we'll receive messages when this is unlocked. */
if ( ! sconn - > smb2 . locks . blocking_lock_unlock_state ) {
2011-12-12 18:45:07 +04:00
messaging_register ( sconn - > msg_ctx , sconn ,
2010-05-01 08:03:20 +04:00
MSG_SMB_UNLOCK , received_unlock_msg ) ;
sconn - > smb2 . locks . blocking_lock_unlock_state = true ;
}
/* allow this request to be canceled */
tevent_req_set_cancel_fn ( req , smbd_smb2_lock_cancel ) ;
return true ;
}
2010-05-06 19:22:13 +04:00
/****************************************************************
Remove a pending lock record under lock .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2010-05-10 22:09:41 +04:00
static void remove_pending_lock ( struct smbd_smb2_lock_state * state ,
struct blocking_lock_record * blr )
2010-05-06 19:22:13 +04:00
{
struct byte_range_lock * br_lck = brl_get_locks (
2010-05-10 22:09:41 +04:00
state , blr - > fsp ) ;
2010-05-06 19:22:13 +04:00
DEBUG ( 10 , ( " remove_pending_lock: BLR = %p \n " , blr ) ) ;
if ( br_lck ) {
brl_lock_cancel ( br_lck ,
2010-05-07 17:20:50 +04:00
blr - > smblctx ,
2011-12-15 14:51:20 +04:00
messaging_server_id ( blr - > fsp - > conn - > sconn - > msg_ctx ) ,
2010-05-06 19:22:13 +04:00
blr - > offset ,
blr - > count ,
2014-07-03 18:03:12 +04:00
blr - > lock_flav ) ;
2010-05-06 19:22:13 +04:00
TALLOC_FREE ( br_lck ) ;
}
}
2010-05-01 08:03:20 +04:00
/****************************************************************
Re - proccess a blocking lock request .
This is equivalent to process_lockingX ( ) inside smbd / blocking . c
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2010-05-06 19:22:13 +04:00
static void reprocess_blocked_smb2_lock ( struct smbd_smb2_request * smb2req ,
struct timeval tv_curr )
2010-05-01 08:03:20 +04:00
{
2010-06-28 13:19:18 +04:00
NTSTATUS status = NT_STATUS_UNSUCCESSFUL ;
2010-05-01 08:03:20 +04:00
struct blocking_lock_record * blr = NULL ;
struct smbd_smb2_lock_state * state = NULL ;
2014-06-26 23:01:56 +04:00
struct byte_range_lock * br_lck = NULL ;
struct smbd_lock_element * e = NULL ;
2010-05-01 08:03:20 +04:00
files_struct * fsp = NULL ;
if ( ! smb2req - > subreq ) {
return ;
}
2014-10-31 13:15:50 +03:00
SMBPROFILE_IOBYTES_ASYNC_SET_BUSY ( smb2req - > profile ) ;
2010-05-01 08:03:20 +04:00
state = tevent_req_data ( smb2req - > subreq , struct smbd_smb2_lock_state ) ;
if ( ! state ) {
return ;
}
blr = state - > blr ;
fsp = blr - > fsp ;
2014-06-26 23:01:56 +04:00
/* We can only have one blocked lock in SMB2. */
SMB_ASSERT ( state - > lock_count = = 1 ) ;
SMB_ASSERT ( blr - > lock_num = = 0 ) ;
2010-05-01 08:03:20 +04:00
2014-06-26 23:01:56 +04:00
/* Try and get the outstanding lock. */
e = & state - > locks [ blr - > lock_num ] ;
2010-05-01 08:03:20 +04:00
2014-06-26 23:01:56 +04:00
br_lck = do_lock ( fsp - > conn - > sconn - > msg_ctx ,
fsp ,
e - > smblctx ,
e - > count ,
e - > offset ,
e - > brltype ,
WINDOWS_LOCK ,
true ,
& status ,
2014-07-03 18:08:26 +04:00
& blr - > blocking_smblctx ) ;
2010-05-01 08:03:20 +04:00
2014-06-26 23:01:56 +04:00
TALLOC_FREE ( br_lck ) ;
if ( NT_STATUS_IS_OK ( status ) ) {
2010-05-01 08:03:20 +04:00
/*
2014-06-26 23:01:56 +04:00
* Success - we got the lock .
2010-05-01 08:03:20 +04:00
*/
DEBUG ( 3 , ( " reprocess_blocked_smb2_lock SUCCESS file = %s, "
2012-06-14 15:22:27 +04:00
" %s, num_locks=%d \n " ,
2010-05-01 08:03:20 +04:00
fsp_str_dbg ( fsp ) ,
2012-06-14 15:22:27 +04:00
fsp_fnum_dbg ( fsp ) ,
2010-05-01 08:03:20 +04:00
( int ) state - > lock_count ) ) ;
2014-06-26 23:08:46 +04:00
remove_pending_lock ( state , blr ) ;
2010-05-01 08:03:20 +04:00
tevent_req_done ( smb2req - > subreq ) ;
return ;
}
if ( ! NT_STATUS_EQUAL ( status , NT_STATUS_LOCK_NOT_GRANTED ) & &
! NT_STATUS_EQUAL ( status , NT_STATUS_FILE_LOCK_CONFLICT ) ) {
/*
* We have other than a " can't get lock "
* error . Return an error .
*/
2010-05-06 19:22:13 +04:00
remove_pending_lock ( state , blr ) ;
2010-05-01 08:03:20 +04:00
tevent_req_nterror ( smb2req - > subreq , status ) ;
return ;
}
2010-05-06 19:22:13 +04:00
/*
2014-06-26 23:01:56 +04:00
* We couldn ' t get the lock for this record .
2010-05-06 19:22:13 +04:00
* If the time has expired , return a lock error .
*/
if ( ! timeval_is_zero ( & blr - > expire_time ) & &
timeval_compare ( & blr - > expire_time , & tv_curr ) < = 0 ) {
remove_pending_lock ( state , blr ) ;
tevent_req_nterror ( smb2req - > subreq , NT_STATUS_LOCK_NOT_GRANTED ) ;
return ;
}
2010-05-01 08:03:20 +04:00
/*
2014-06-26 23:01:56 +04:00
* Still can ' t get the lock - keep waiting .
2010-05-01 08:03:20 +04:00
*/
2014-06-26 23:01:56 +04:00
DEBUG ( 10 , ( " reprocess_blocked_smb2_lock: failed to get lock "
2012-06-14 15:22:27 +04:00
" for file %s, %s. Still waiting.... \n " ,
2010-05-01 08:03:20 +04:00
fsp_str_dbg ( fsp ) ,
2012-06-14 15:22:27 +04:00
fsp_fnum_dbg ( fsp ) ) ) ;
2010-05-01 08:03:20 +04:00
2014-10-31 13:15:50 +03:00
SMBPROFILE_IOBYTES_ASYNC_SET_IDLE ( smb2req - > profile ) ;
2010-05-01 08:03:20 +04:00
return ;
}
/****************************************************************
Attempt to proccess all outstanding blocking locks pending on
the request queue .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2010-06-12 16:52:09 +04:00
void process_blocking_lock_queue_smb2 (
struct smbd_server_connection * sconn , struct timeval tv_curr )
2010-05-01 08:03:20 +04:00
{
2014-09-16 11:03:39 +04:00
struct smbXsrv_connection * xconn = NULL ;
2010-05-01 08:03:20 +04:00
2014-09-16 11:03:39 +04:00
if ( sconn ! = NULL & & sconn - > client ! = NULL ) {
xconn = sconn - > client - > connections ;
}
2010-05-01 08:03:20 +04:00
2014-09-16 11:03:39 +04:00
for ( ; xconn ! = NULL ; xconn = xconn - > next ) {
struct smbd_smb2_request * smb2req , * nextreq ;
2010-05-01 08:03:20 +04:00
2014-09-16 11:03:39 +04:00
for ( smb2req = xconn - > smb2 . requests ; smb2req ; smb2req = nextreq ) {
const uint8_t * inhdr ;
2010-05-01 08:03:20 +04:00
2014-09-16 11:03:39 +04:00
nextreq = smb2req - > next ;
if ( smb2req - > subreq = = NULL ) {
/* This message has been processed. */
continue ;
}
if ( ! tevent_req_is_in_progress ( smb2req - > subreq ) ) {
/* This message has been processed. */
continue ;
}
inhdr = SMBD_SMB2_IN_HDR_PTR ( smb2req ) ;
if ( SVAL ( inhdr , SMB2_HDR_OPCODE ) = = SMB2_OP_LOCK ) {
reprocess_blocked_smb2_lock ( smb2req , tv_curr ) ;
}
2010-05-01 08:03:20 +04:00
}
}
recalc_smb2_brl_timeout ( sconn ) ;
2010-04-09 09:15:55 +04:00
}
2010-04-30 04:08:12 +04:00
2010-05-01 08:03:20 +04:00
/****************************************************************************
Remove any locks on this fd . Called from file_close ( ) .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2010-04-30 04:08:12 +04:00
void cancel_pending_lock_requests_by_fid_smb2 ( files_struct * fsp ,
2010-05-07 12:20:26 +04:00
struct byte_range_lock * br_lck ,
enum file_close_type close_type )
2010-04-30 04:08:12 +04:00
{
2010-06-12 16:52:54 +04:00
struct smbd_server_connection * sconn = fsp - > conn - > sconn ;
2014-09-16 11:03:39 +04:00
struct smbXsrv_connection * xconn = NULL ;
2010-05-01 08:03:20 +04:00
2014-09-16 11:03:39 +04:00
if ( sconn ! = NULL & & sconn - > client ! = NULL ) {
xconn = sconn - > client - > connections ;
}
2010-05-01 08:03:20 +04:00
2014-09-16 11:03:39 +04:00
for ( ; xconn ! = NULL ; xconn = xconn - > next ) {
struct smbd_smb2_request * smb2req , * nextreq ;
2010-05-01 08:03:20 +04:00
2014-09-16 11:03:39 +04:00
for ( smb2req = xconn - > smb2 . requests ; smb2req ; smb2req = nextreq ) {
struct smbd_smb2_lock_state * state = NULL ;
files_struct * fsp_curr = NULL ;
struct blocking_lock_record * blr = NULL ;
const uint8_t * inhdr ;
2010-05-01 08:03:20 +04:00
2014-09-16 11:03:39 +04:00
nextreq = smb2req - > next ;
2010-05-01 08:03:20 +04:00
2014-09-16 11:03:39 +04:00
if ( smb2req - > subreq = = NULL ) {
/* This message has been processed. */
continue ;
}
if ( ! tevent_req_is_in_progress ( smb2req - > subreq ) ) {
/* This message has been processed. */
continue ;
}
2010-05-01 08:03:20 +04:00
2014-09-16 11:03:39 +04:00
inhdr = SMBD_SMB2_IN_HDR_PTR ( smb2req ) ;
if ( SVAL ( inhdr , SMB2_HDR_OPCODE ) ! = SMB2_OP_LOCK ) {
/* Not a lock call. */
continue ;
}
2010-05-01 08:03:20 +04:00
2014-09-16 11:03:39 +04:00
state = tevent_req_data ( smb2req - > subreq ,
struct smbd_smb2_lock_state ) ;
if ( ! state ) {
/* Strange - is this even possible ? */
continue ;
}
2010-05-01 08:03:20 +04:00
2014-09-16 11:03:39 +04:00
fsp_curr = smb2req - > compat_chain_fsp ;
if ( fsp_curr = = NULL ) {
/* Strange - is this even possible ? */
continue ;
}
2010-05-01 08:03:20 +04:00
2014-09-16 11:03:39 +04:00
if ( fsp_curr ! = fsp ) {
/* It's not our fid */
continue ;
}
2010-05-01 08:03:20 +04:00
2014-09-16 11:03:39 +04:00
blr = state - > blr ;
/* Remove the entries from the lock db. */
brl_lock_cancel ( br_lck ,
blr - > smblctx ,
messaging_server_id ( sconn - > msg_ctx ) ,
blr - > offset ,
blr - > count ,
blr - > lock_flav ) ;
/* Finally end the request. */
if ( close_type = = SHUTDOWN_CLOSE ) {
tevent_req_done ( smb2req - > subreq ) ;
} else {
tevent_req_nterror ( smb2req - > subreq ,
NT_STATUS_RANGE_NOT_LOCKED ) ;
}
2010-05-07 12:20:26 +04:00
}
2010-05-01 08:03:20 +04:00
}
2010-04-30 04:08:12 +04:00
}