2009-07-02 21:26:05 +04:00
/*
Unix SMB / CIFS implementation .
Core SMB2 server
Copyright ( C ) Stefan Metzmacher 2009
2010-05-07 02:39:21 +04:00
Copyright ( C ) Jeremy Allison 2010
2009-07-02 21:26:05 +04:00
This program is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation ; either version 3 of the License , or
( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
# include "includes.h"
2011-03-22 18:57:01 +03:00
# include "smbd/smbd.h"
2009-07-02 21:26:05 +04:00
# include "smbd/globals.h"
2009-08-12 19:52:55 +04:00
# include "../libcli/smb/smb_common.h"
2011-04-28 19:38:09 +04:00
# include "../lib/util/tevent_ntstatus.h"
2019-06-16 01:23:50 +03:00
# include "lib/dbwrap/dbwrap_watch.h"
# include "librpc/gen_ndr/open_files.h"
2011-03-24 17:31:06 +03:00
# include "messages.h"
2009-07-02 21:26:05 +04:00
2018-03-21 22:01:05 +03:00
# undef DBGC_CLASS
# define DBGC_CLASS DBGC_SMB2
2009-07-02 21:26:05 +04:00
struct smbd_smb2_lock_element {
uint64_t offset ;
uint64_t length ;
uint32_t flags ;
} ;
2010-05-07 02:39:21 +04:00
struct smbd_smb2_lock_state {
2019-06-16 01:23:50 +03:00
struct tevent_context * ev ;
2010-05-07 02:39:21 +04:00
struct smbd_smb2_request * smb2req ;
struct smb_request * smb1req ;
2019-06-16 01:23:50 +03:00
struct files_struct * fsp ;
2019-08-13 17:14:23 +03:00
bool blocking ;
2019-08-02 15:50:27 +03:00
uint32_t polling_msecs ;
2019-08-19 17:25:59 +03:00
uint32_t retry_msecs ;
2010-05-07 02:39:21 +04:00
uint16_t lock_count ;
struct smbd_lock_element * locks ;
} ;
2009-07-02 21:26:05 +04:00
static struct tevent_req * smbd_smb2_lock_send ( TALLOC_CTX * mem_ctx ,
struct tevent_context * ev ,
struct smbd_smb2_request * smb2req ,
2012-06-08 13:57:21 +04:00
struct files_struct * in_fsp ,
2009-07-02 21:26:05 +04:00
uint16_t in_lock_count ,
struct smbd_smb2_lock_element * in_locks ) ;
static NTSTATUS smbd_smb2_lock_recv ( struct tevent_req * req ) ;
static void smbd_smb2_request_lock_done ( struct tevent_req * subreq ) ;
NTSTATUS smbd_smb2_request_process_lock ( struct smbd_smb2_request * req )
{
const uint8_t * inbody ;
uint16_t in_lock_count ;
uint64_t in_file_id_persistent ;
uint64_t in_file_id_volatile ;
2012-06-08 13:57:21 +04:00
struct files_struct * in_fsp ;
2009-07-02 21:26:05 +04:00
struct smbd_smb2_lock_element * in_locks ;
struct tevent_req * subreq ;
const uint8_t * lock_buffer ;
2009-07-09 13:33:58 +04:00
uint16_t l ;
2011-09-06 16:01:43 +04:00
NTSTATUS status ;
2009-07-02 21:26:05 +04:00
2011-09-06 16:01:43 +04:00
status = smbd_smb2_request_verify_sizes ( req , 0x30 ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
return smbd_smb2_request_error ( req , status ) ;
2009-07-02 21:26:05 +04:00
}
2012-08-05 17:00:23 +04:00
inbody = SMBD_SMB2_IN_BODY_PTR ( req ) ;
2009-07-02 21:26:05 +04:00
in_lock_count = CVAL ( inbody , 0x02 ) ;
2009-07-09 13:33:58 +04:00
/* 0x04 - 4 bytes reserved */
2009-07-02 21:26:05 +04:00
in_file_id_persistent = BVAL ( inbody , 0x08 ) ;
in_file_id_volatile = BVAL ( inbody , 0x10 ) ;
if ( in_lock_count < 1 ) {
return smbd_smb2_request_error ( req , NT_STATUS_INVALID_PARAMETER ) ;
}
2012-08-05 17:00:23 +04:00
if ( ( ( in_lock_count - 1 ) * 0x18 ) > SMBD_SMB2_IN_DYN_LEN ( req ) ) {
2009-07-02 21:26:05 +04:00
return smbd_smb2_request_error ( req , NT_STATUS_INVALID_PARAMETER ) ;
}
in_locks = talloc_array ( req , struct smbd_smb2_lock_element ,
in_lock_count ) ;
if ( in_locks = = NULL ) {
return smbd_smb2_request_error ( req , NT_STATUS_NO_MEMORY ) ;
}
2009-07-09 13:33:58 +04:00
l = 0 ;
2009-07-02 21:26:05 +04:00
lock_buffer = inbody + 0x18 ;
2009-07-09 13:33:58 +04:00
in_locks [ l ] . offset = BVAL ( lock_buffer , 0x00 ) ;
in_locks [ l ] . length = BVAL ( lock_buffer , 0x08 ) ;
in_locks [ l ] . flags = IVAL ( lock_buffer , 0x10 ) ;
/* 0x14 - 4 reserved bytes */
2009-07-02 21:26:05 +04:00
2017-12-20 16:05:54 +03:00
status = req - > session - > status ;
if ( NT_STATUS_EQUAL ( status , NT_STATUS_NETWORK_SESSION_EXPIRED ) ) {
/*
* We need to catch NT_STATUS_NETWORK_SESSION_EXPIRED
* for lock requests only .
*
* Unlock requests still need to be processed !
*
* This means smbd_smb2_request_check_session ( )
* can ' t handle the difference and always
* allows SMB2_OP_LOCK .
*/
if ( in_locks [ 0 ] . flags ! = SMB2_LOCK_FLAG_UNLOCK ) {
return smbd_smb2_request_error ( req , status ) ;
}
}
2012-08-05 17:00:23 +04:00
lock_buffer = SMBD_SMB2_IN_DYN_PTR ( req ) ;
2009-07-02 21:26:05 +04:00
2009-07-09 13:33:58 +04:00
for ( l = 1 ; l < in_lock_count ; l + + ) {
in_locks [ l ] . offset = BVAL ( lock_buffer , 0x00 ) ;
in_locks [ l ] . length = BVAL ( lock_buffer , 0x08 ) ;
in_locks [ l ] . flags = IVAL ( lock_buffer , 0x10 ) ;
/* 0x14 - 4 reserved bytes */
2009-07-02 21:26:05 +04:00
lock_buffer + = 0x18 ;
}
2012-06-08 13:57:21 +04:00
in_fsp = file_fsp_smb2 ( req , in_file_id_persistent , in_file_id_volatile ) ;
if ( in_fsp = = NULL ) {
return smbd_smb2_request_error ( req , NT_STATUS_FILE_CLOSED ) ;
}
2018-12-27 17:18:55 +03:00
subreq = smbd_smb2_lock_send ( req , req - > sconn - > ev_ctx ,
2012-06-08 13:57:21 +04:00
req , in_fsp ,
2009-07-02 21:26:05 +04:00
in_lock_count ,
in_locks ) ;
if ( subreq = = NULL ) {
return smbd_smb2_request_error ( req , NT_STATUS_NO_MEMORY ) ;
}
tevent_req_set_callback ( subreq , smbd_smb2_request_lock_done , req ) ;
2011-11-14 18:42:55 +04:00
return smbd_smb2_request_pending_queue ( req , subreq , 500 ) ;
2009-07-02 21:26:05 +04:00
}
static void smbd_smb2_request_lock_done ( struct tevent_req * subreq )
{
2010-05-07 02:39:21 +04:00
struct smbd_smb2_request * smb2req = tevent_req_callback_data ( subreq ,
2009-07-02 21:26:05 +04:00
struct smbd_smb2_request ) ;
DATA_BLOB outbody ;
NTSTATUS status ;
NTSTATUS error ; /* transport error */
status = smbd_smb2_lock_recv ( subreq ) ;
TALLOC_FREE ( subreq ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
2010-05-07 02:39:21 +04:00
error = smbd_smb2_request_error ( smb2req , status ) ;
2009-07-02 21:26:05 +04:00
if ( ! NT_STATUS_IS_OK ( error ) ) {
2014-06-11 14:15:48 +04:00
smbd_server_connection_terminate ( smb2req - > xconn ,
2009-07-02 21:26:05 +04:00
nt_errstr ( error ) ) ;
return ;
}
return ;
}
2013-12-04 17:59:07 +04:00
outbody = smbd_smb2_generate_outbody ( smb2req , 0x04 ) ;
2009-07-02 21:26:05 +04:00
if ( outbody . data = = NULL ) {
2010-05-07 02:39:21 +04:00
error = smbd_smb2_request_error ( smb2req , NT_STATUS_NO_MEMORY ) ;
2009-07-02 21:26:05 +04:00
if ( ! NT_STATUS_IS_OK ( error ) ) {
2014-06-11 14:15:48 +04:00
smbd_server_connection_terminate ( smb2req - > xconn ,
2009-07-02 21:26:05 +04:00
nt_errstr ( error ) ) ;
return ;
}
return ;
}
SSVAL ( outbody . data , 0x00 , 0x04 ) ; /* struct size */
SSVAL ( outbody . data , 0x02 , 0 ) ; /* reserved */
2010-05-07 02:39:21 +04:00
error = smbd_smb2_request_done ( smb2req , outbody , NULL ) ;
2009-07-02 21:26:05 +04:00
if ( ! NT_STATUS_IS_OK ( error ) ) {
2014-06-11 14:15:48 +04:00
smbd_server_connection_terminate ( smb2req - > xconn ,
2009-07-02 21:26:05 +04:00
nt_errstr ( error ) ) ;
return ;
}
}
2019-08-13 17:39:41 +03:00
static void smbd_smb2_lock_try ( struct tevent_req * req ) ;
2019-06-16 01:23:50 +03:00
static void smbd_smb2_lock_retry ( struct tevent_req * subreq ) ;
static bool smbd_smb2_lock_cancel ( struct tevent_req * req ) ;
2009-07-02 21:26:05 +04:00
static struct tevent_req * smbd_smb2_lock_send ( TALLOC_CTX * mem_ctx ,
struct tevent_context * ev ,
struct smbd_smb2_request * smb2req ,
2012-06-08 13:57:21 +04:00
struct files_struct * fsp ,
2009-07-02 21:26:05 +04:00
uint16_t in_lock_count ,
struct smbd_smb2_lock_element * in_locks )
{
struct tevent_req * req ;
struct smbd_smb2_lock_state * state ;
2009-07-08 15:13:32 +04:00
bool isunlock = false ;
uint16_t i ;
struct smbd_lock_element * locks ;
NTSTATUS status ;
2009-07-02 21:26:05 +04:00
req = tevent_req_create ( mem_ctx , & state ,
2010-05-01 08:03:20 +04:00
struct smbd_smb2_lock_state ) ;
2009-07-02 21:26:05 +04:00
if ( req = = NULL ) {
return NULL ;
}
2019-06-16 01:23:50 +03:00
state - > ev = ev ;
state - > fsp = fsp ;
2009-07-02 21:26:05 +04:00
state - > smb2req = smb2req ;
2010-05-06 19:22:13 +04:00
smb2req - > subreq = req ; /* So we can find this when going async. */
2019-06-16 01:23:50 +03:00
state - > smb1req = smbd_smb2_fake_smb_request ( smb2req ) ;
if ( tevent_req_nomem ( state - > smb1req , req ) ) {
2010-05-01 08:03:20 +04:00
return tevent_req_post ( req , ev ) ;
}
2009-07-02 21:26:05 +04:00
2012-06-14 15:22:27 +04:00
DEBUG ( 10 , ( " smbd_smb2_lock_send: %s - %s \n " ,
fsp_str_dbg ( fsp ) , fsp_fnum_dbg ( fsp ) ) ) ;
2009-07-02 21:26:05 +04:00
2009-07-08 15:13:32 +04:00
locks = talloc_array ( state , struct smbd_lock_element , in_lock_count ) ;
if ( locks = = NULL ) {
tevent_req_nterror ( req , NT_STATUS_NO_MEMORY ) ;
return tevent_req_post ( req , ev ) ;
}
switch ( in_locks [ 0 ] . flags ) {
case SMB2_LOCK_FLAG_SHARED :
case SMB2_LOCK_FLAG_EXCLUSIVE :
if ( in_lock_count > 1 ) {
tevent_req_nterror ( req , NT_STATUS_INVALID_PARAMETER ) ;
return tevent_req_post ( req , ev ) ;
}
2019-08-13 17:14:23 +03:00
state - > blocking = true ;
2009-07-08 15:13:32 +04:00
break ;
case SMB2_LOCK_FLAG_SHARED | SMB2_LOCK_FLAG_FAIL_IMMEDIATELY :
case SMB2_LOCK_FLAG_EXCLUSIVE | SMB2_LOCK_FLAG_FAIL_IMMEDIATELY :
break ;
case SMB2_LOCK_FLAG_UNLOCK :
/* only the first lock gives the UNLOCK bit - see
MS - SMB2 3.3 .5 .14 */
isunlock = true ;
break ;
default :
tevent_req_nterror ( req , NT_STATUS_INVALID_PARAMETER ) ;
return tevent_req_post ( req , ev ) ;
}
2014-06-30 13:39:20 +04:00
if ( ! isunlock & & ( in_lock_count > 1 ) ) {
/*
* 3.3 .5 .14 .2 says we SHOULD fail with INVALID_PARAMETER if we
* have more than one lock and one of those is blocking .
*/
for ( i = 0 ; i < in_lock_count ; i + + ) {
uint32_t flags = in_locks [ i ] . flags ;
if ( ( flags & SMB2_LOCK_FLAG_FAIL_IMMEDIATELY ) = = 0 ) {
tevent_req_nterror (
req , NT_STATUS_INVALID_PARAMETER ) ;
return tevent_req_post ( req , ev ) ;
}
}
}
2009-07-08 15:13:32 +04:00
for ( i = 0 ; i < in_lock_count ; i + + ) {
bool invalid = false ;
switch ( in_locks [ i ] . flags ) {
case SMB2_LOCK_FLAG_SHARED :
case SMB2_LOCK_FLAG_EXCLUSIVE :
if ( isunlock ) {
2010-05-10 22:29:34 +04:00
invalid = true ;
break ;
}
2009-07-08 15:13:32 +04:00
break ;
case SMB2_LOCK_FLAG_SHARED | SMB2_LOCK_FLAG_FAIL_IMMEDIATELY :
case SMB2_LOCK_FLAG_EXCLUSIVE | SMB2_LOCK_FLAG_FAIL_IMMEDIATELY :
if ( isunlock ) {
2010-05-10 22:29:34 +04:00
invalid = true ;
2009-07-08 15:13:32 +04:00
}
break ;
case SMB2_LOCK_FLAG_UNLOCK :
if ( ! isunlock ) {
tevent_req_nterror ( req ,
NT_STATUS_INVALID_PARAMETER ) ;
return tevent_req_post ( req , ev ) ;
}
break ;
default :
if ( isunlock ) {
/*
2014-06-30 12:32:29 +04:00
* If the first element was a UNLOCK
* we need to defer the error response
2009-07-08 15:13:32 +04:00
* to the backend , because we need to process
* all unlock elements before
*/
invalid = true ;
break ;
}
tevent_req_nterror ( req , NT_STATUS_INVALID_PARAMETER ) ;
return tevent_req_post ( req , ev ) ;
}
2019-08-08 20:26:28 +03:00
locks [ i ] . req_guid = smbd_request_guid ( smb2req - > smb1req , i ) ;
2012-06-08 19:51:47 +04:00
locks [ i ] . smblctx = fsp - > op - > global - > open_persistent_id ;
2009-07-08 15:13:32 +04:00
locks [ i ] . offset = in_locks [ i ] . offset ;
locks [ i ] . count = in_locks [ i ] . length ;
if ( in_locks [ i ] . flags & SMB2_LOCK_FLAG_EXCLUSIVE ) {
locks [ i ] . brltype = WRITE_LOCK ;
} else if ( in_locks [ i ] . flags & SMB2_LOCK_FLAG_SHARED ) {
locks [ i ] . brltype = READ_LOCK ;
} else if ( invalid ) {
/*
* this is an invalid UNLOCK element
* and the backend needs to test for
* brltype ! = UNLOCK_LOCK and return
2014-07-15 18:49:25 +04:00
* NT_STATUS_INVALID_PARAMETER
2009-07-08 15:13:32 +04:00
*/
locks [ i ] . brltype = READ_LOCK ;
} else {
locks [ i ] . brltype = UNLOCK_LOCK ;
}
2010-05-06 19:22:13 +04:00
2019-05-27 17:15:17 +03:00
DBG_DEBUG ( " index % " PRIu16 " offset=% " PRIu64 " , count=% " PRIu64 " , "
" smblctx = % " PRIu64 " type %d \n " ,
i ,
locks [ i ] . offset ,
locks [ i ] . count ,
locks [ i ] . smblctx ,
( int ) locks [ i ] . brltype ) ;
2009-07-08 15:13:32 +04:00
}
2010-05-01 08:03:20 +04:00
state - > locks = locks ;
state - > lock_count = in_lock_count ;
2009-07-08 15:13:32 +04:00
if ( isunlock ) {
2019-06-14 14:39:04 +03:00
status = smbd_do_unlocking (
2019-06-16 01:23:50 +03:00
state - > smb1req , fsp , in_lock_count , locks , WINDOWS_LOCK ) ;
2019-05-27 18:03:38 +03:00
if ( tevent_req_nterror ( req , status ) ) {
return tevent_req_post ( req , ev ) ;
}
tevent_req_done ( req ) ;
return tevent_req_post ( req , ev ) ;
2009-07-08 15:13:32 +04:00
}
2019-05-27 18:03:38 +03:00
2019-08-13 17:39:41 +03:00
smbd_smb2_lock_try ( req ) ;
if ( ! tevent_req_is_in_progress ( req ) ) {
2019-06-16 01:23:50 +03:00
return tevent_req_post ( req , ev ) ;
}
2019-05-27 18:03:38 +03:00
2019-08-13 17:39:41 +03:00
tevent_req_defer_callback ( req , smb2req - > sconn - > ev_ctx ) ;
aio_add_req_to_fsp ( state - > fsp , req ) ;
tevent_req_set_cancel_fn ( req , smbd_smb2_lock_cancel ) ;
2009-07-08 15:13:32 +04:00
2019-08-13 17:39:41 +03:00
return req ;
2009-07-02 21:26:05 +04:00
}
2019-08-19 17:25:59 +03:00
static void smbd_smb2_lock_update_retry_msecs (
struct smbd_smb2_lock_state * state )
{
/*
* The default lp_lock_spin_time ( ) is 200 ms ,
* we just use half of it to trigger the first retry .
*
* v_min is in the range of 0.001 to 10 secs
* ( 0.1 secs by default )
*
* v_max is in the range of 0.01 to 100 secs
* ( 1.0 secs by default )
*
* The typical steps are :
* 0.1 , 0.2 , 0.3 , 0.4 , . . . 1.0
*/
uint32_t v_min = MAX ( 2 , MIN ( 20000 , lp_lock_spin_time ( ) ) ) / 2 ;
uint32_t v_max = 10 * v_min ;
if ( state - > retry_msecs > = v_max ) {
state - > retry_msecs = v_max ;
return ;
}
state - > retry_msecs + = v_min ;
}
2019-08-02 15:50:27 +03:00
static void smbd_smb2_lock_update_polling_msecs (
struct smbd_smb2_lock_state * state )
{
/*
* The default lp_lock_spin_time ( ) is 200 ms .
*
* v_min is in the range of 0.002 to 20 secs
* ( 0.2 secs by default )
*
* v_max is in the range of 0.02 to 200 secs
* ( 2.0 secs by default )
*
* The typical steps are :
* 0.2 , 0.4 , 0.6 , 0.8 , . . . 2.0
*/
uint32_t v_min = MAX ( 2 , MIN ( 20000 , lp_lock_spin_time ( ) ) ) ;
uint32_t v_max = 10 * v_min ;
if ( state - > polling_msecs > = v_max ) {
state - > polling_msecs = v_max ;
return ;
}
state - > polling_msecs + = v_min ;
}
2019-08-13 17:39:41 +03:00
static void smbd_smb2_lock_try ( struct tevent_req * req )
2019-06-16 01:23:50 +03:00
{
struct smbd_smb2_lock_state * state = tevent_req_data (
req , struct smbd_smb2_lock_state ) ;
struct share_mode_lock * lck = NULL ;
uint16_t blocker_idx ;
struct server_id blocking_pid = { 0 } ;
uint64_t blocking_smblctx ;
NTSTATUS status ;
2019-08-13 17:39:41 +03:00
struct tevent_req * subreq = NULL ;
2019-08-02 15:50:27 +03:00
struct timeval endtime = { 0 } ;
2019-06-16 01:23:50 +03:00
lck = get_existing_share_mode_lock (
talloc_tos ( ) , state - > fsp - > file_id ) ;
if ( tevent_req_nomem ( lck , req ) ) {
return ;
}
status = smbd_do_locks_try (
state - > fsp ,
WINDOWS_LOCK ,
state - > lock_count ,
state - > locks ,
& blocker_idx ,
& blocking_pid ,
& blocking_smblctx ) ;
if ( NT_STATUS_IS_OK ( status ) ) {
TALLOC_FREE ( lck ) ;
tevent_req_done ( req ) ;
return ;
}
2019-08-19 17:25:59 +03:00
if ( NT_STATUS_EQUAL ( status , NT_STATUS_RETRY ) ) {
/*
* We got NT_STATUS_RETRY ,
* we reset polling_msecs so that
* that the retries based on LOCK_NOT_GRANTED
* will later start with small intervalls again .
*/
state - > polling_msecs = 0 ;
/*
* The backend wasn ' t able to decide yet .
* We need to wait even for non - blocking
* locks .
*
* The backend uses blocking_smblctx = = UINT64_MAX
* to indicate that we should use retry timers .
*
* It uses blocking_smblctx = = 0 to indicate
* it will use share_mode_wakeup_waiters ( )
* to wake us . Note that unrelated changes in
* locking . tdb may cause retries .
*/
if ( blocking_smblctx ! = UINT64_MAX ) {
SMB_ASSERT ( blocking_smblctx = = 0 ) ;
goto setup_retry ;
}
smbd_smb2_lock_update_retry_msecs ( state ) ;
DBG_DEBUG ( " Waiting for a backend decision. "
" Retry in % " PRIu32 " msecs \n " ,
state - > retry_msecs ) ;
/*
* We completely ignore state - > endtime here
* we we ' ll wait for a backend decision forever .
* If the backend is smart enough to implement
* some NT_STATUS_RETRY logic , it has to
* switch to any other status after in order
* to avoid waiting forever .
*/
endtime = timeval_current_ofs_msec ( state - > retry_msecs ) ;
goto setup_retry ;
}
2019-08-13 17:39:41 +03:00
if ( NT_STATUS_EQUAL ( status , NT_STATUS_FILE_LOCK_CONFLICT ) ) {
/*
* This is a bug and will be changed into an assert
* in future version . We should only
* ever get NT_STATUS_LOCK_NOT_GRANTED here !
*/
static uint64_t _bug_count ;
int _level = ( _bug_count + + = = 0 ) ? DBGLVL_ERR : DBGLVL_DEBUG ;
DBG_PREFIX ( _level , ( " BUG: Got %s mapping to "
" NT_STATUS_LOCK_NOT_GRANTED \n " ,
nt_errstr ( status ) ) ) ;
status = NT_STATUS_LOCK_NOT_GRANTED ;
}
if ( ! NT_STATUS_EQUAL ( status , NT_STATUS_LOCK_NOT_GRANTED ) ) {
TALLOC_FREE ( lck ) ;
tevent_req_nterror ( req , status ) ;
return ;
}
2019-08-19 17:25:59 +03:00
/*
* We got LOCK_NOT_GRANTED , make sure
* a following STATUS_RETRY will start
* with short intervalls again .
*/
state - > retry_msecs = 0 ;
2019-08-13 17:39:41 +03:00
if ( ! state - > blocking ) {
TALLOC_FREE ( lck ) ;
tevent_req_nterror ( req , status ) ;
return ;
}
2019-08-02 15:50:27 +03:00
if ( blocking_smblctx = = UINT64_MAX ) {
smbd_smb2_lock_update_polling_msecs ( state ) ;
DBG_DEBUG ( " Blocked on a posix lock. Retry in % " PRIu32 " msecs \n " ,
state - > polling_msecs ) ;
endtime = timeval_current_ofs_msec ( state - > polling_msecs ) ;
}
2019-08-19 17:25:59 +03:00
setup_retry :
2019-08-13 17:39:41 +03:00
DBG_DEBUG ( " Watching share mode lock \n " ) ;
2019-06-16 01:23:50 +03:00
2019-11-04 15:06:20 +03:00
subreq = share_mode_watch_send (
state , state - > ev , lck - > data - > id , blocking_pid ) ;
2019-06-16 01:23:50 +03:00
TALLOC_FREE ( lck ) ;
if ( tevent_req_nomem ( subreq , req ) ) {
return ;
}
tevent_req_set_callback ( subreq , smbd_smb2_lock_retry , req ) ;
2019-08-02 15:50:27 +03:00
if ( ! timeval_is_zero ( & endtime ) ) {
bool ok ;
ok = tevent_req_set_endtime ( subreq ,
state - > ev ,
endtime ) ;
if ( ! ok ) {
tevent_req_oom ( req ) ;
return ;
}
}
2019-06-16 01:23:50 +03:00
}
2019-08-13 17:39:41 +03:00
static void smbd_smb2_lock_retry ( struct tevent_req * subreq )
{
struct tevent_req * req = tevent_req_callback_data (
subreq , struct tevent_req ) ;
struct smbd_smb2_lock_state * state = tevent_req_data (
req , struct smbd_smb2_lock_state ) ;
NTSTATUS status ;
bool ok ;
/*
* Make sure we run as the user again
*/
2019-07-13 17:20:11 +03:00
ok = change_to_user_and_service_by_fsp ( state - > fsp ) ;
2019-08-13 17:39:41 +03:00
if ( ! ok ) {
tevent_req_nterror ( req , NT_STATUS_ACCESS_DENIED ) ;
return ;
}
2019-11-04 15:06:20 +03:00
status = share_mode_watch_recv ( subreq , NULL , NULL ) ;
2019-08-13 17:39:41 +03:00
TALLOC_FREE ( subreq ) ;
2019-08-02 15:50:27 +03:00
if ( NT_STATUS_EQUAL ( status , NT_STATUS_IO_TIMEOUT ) ) {
/*
* This is just a trigger for a timed retry .
*/
status = NT_STATUS_OK ;
}
2019-08-13 17:39:41 +03:00
if ( tevent_req_nterror ( req , status ) ) {
return ;
}
smbd_smb2_lock_try ( req ) ;
}
2009-07-02 21:26:05 +04:00
static NTSTATUS smbd_smb2_lock_recv ( struct tevent_req * req )
{
2019-05-28 18:00:30 +03:00
return tevent_req_simple_recv_ntstatus ( req ) ;
2009-07-02 21:26:05 +04:00
}
2010-04-09 09:15:55 +04:00
2010-05-01 08:03:20 +04:00
/****************************************************************
Cancel an outstanding blocking lock request .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static bool smbd_smb2_lock_cancel ( struct tevent_req * req )
{
2014-03-10 12:43:35 +04:00
struct smbd_smb2_request * smb2req = NULL ;
struct smbd_smb2_lock_state * state = tevent_req_data ( req ,
struct smbd_smb2_lock_state ) ;
if ( ! state ) {
return false ;
}
2010-05-01 08:03:20 +04:00
2014-03-10 12:43:35 +04:00
if ( ! state - > smb2req ) {
return false ;
}
2010-05-01 08:03:20 +04:00
2014-03-10 12:43:35 +04:00
smb2req = state - > smb2req ;
2010-05-01 08:03:20 +04:00
2014-03-10 12:47:11 +04:00
/*
2019-06-16 01:23:50 +03:00
* If the request is canceled because of close , logoff or tdis
2014-03-10 12:47:11 +04:00
* the status is NT_STATUS_RANGE_NOT_LOCKED instead of
* NT_STATUS_CANCELLED .
*/
2020-04-03 09:06:27 +03:00
if ( state - > fsp - > fsp_flags . closing | |
2019-06-16 01:23:50 +03:00
! NT_STATUS_IS_OK ( smb2req - > session - > status ) | |
! NT_STATUS_IS_OK ( smb2req - > tcon - > status ) ) {
2014-03-10 12:47:11 +04:00
tevent_req_nterror ( req , NT_STATUS_RANGE_NOT_LOCKED ) ;
return true ;
}
2012-09-20 18:16:03 +04:00
tevent_req_nterror ( req , NT_STATUS_CANCELLED ) ;
2014-03-10 12:43:35 +04:00
return true ;
2010-05-01 08:03:20 +04:00
}