2013-09-10 15:35:01 +04:00
/*
2002-01-30 09:08:46 +03:00
Unix SMB / CIFS implementation .
2000-01-13 15:09:36 +03:00
byte range locking code
2000-04-26 00:30:58 +04:00
Updated to handle range splits / merges .
Copyright ( C ) Andrew Tridgell 1992 - 2000
Copyright ( C ) Jeremy Allison 1992 - 2000
2011-05-28 12:24:20 +04:00
2000-01-13 15:09:36 +03:00
This program is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
2007-07-09 23:25:36 +04:00
the Free Software Foundation ; either version 3 of the License , or
2000-01-13 15:09:36 +03:00
( at your option ) any later version .
2011-05-28 12:24:20 +04:00
2000-01-13 15:09:36 +03:00
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
2011-05-28 12:24:20 +04:00
2000-01-13 15:09:36 +03:00
You should have received a copy of the GNU General Public License
2007-07-10 04:52:41 +04:00
along with this program . If not , see < http : //www.gnu.org/licenses/>.
2000-01-13 15:09:36 +03:00
*/
2000-04-26 00:30:58 +04:00
/* This module implements a tdb based byte range locking service,
2000-01-13 15:09:36 +03:00
replacing the fcntl ( ) based byte range locking previously
used . This allows us to provide the same semantics as NT */
# include "includes.h"
2011-02-26 01:20:06 +03:00
# include "system/filesys.h"
2017-01-01 23:00:55 +03:00
# include "lib/util/server_id.h"
2011-03-23 14:43:17 +03:00
# include "locking/proto.h"
2010-07-04 22:20:44 +04:00
# include "smbd/globals.h"
2011-07-07 19:42:08 +04:00
# include "dbwrap/dbwrap.h"
2011-07-06 18:40:21 +04:00
# include "dbwrap/dbwrap_open.h"
2011-02-25 01:05:57 +03:00
# include "serverid.h"
2011-03-24 17:31:06 +03:00
# include "messages.h"
2013-03-13 17:47:18 +04:00
# include "util_tdb.h"
2000-01-13 15:09:36 +03:00
2005-04-27 22:32:37 +04:00
# undef DBGC_CLASS
# define DBGC_CLASS DBGC_LOCKING
2001-08-27 12:19:43 +04:00
# define ZERO_ZERO 0
2000-04-26 00:30:58 +04:00
/* The open brlock.tdb database. */
2000-01-13 15:09:36 +03:00
2007-05-27 14:35:14 +04:00
static struct db_context * brlock_db ;
2000-01-13 15:09:36 +03:00
2013-09-10 21:42:06 +04:00
struct byte_range_lock {
struct files_struct * fsp ;
2019-08-08 20:26:28 +03:00
TALLOC_CTX * req_mem_ctx ;
const struct GUID * req_guid ;
2013-09-10 21:42:06 +04:00
unsigned int num_locks ;
bool modified ;
struct lock_struct * lock_data ;
struct db_record * record ;
} ;
2006-04-10 19:33:04 +04:00
/****************************************************************************
Debug info at level 10 for lock struct .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2012-06-28 11:54:14 +04:00
static void print_lock_struct ( unsigned int i , const struct lock_struct * pls )
2006-04-10 19:33:04 +04:00
{
2015-04-28 14:30:58 +03:00
struct server_id_buf tmp ;
2019-05-20 14:52:12 +03:00
DBG_DEBUG ( " [%u]: smblctx = % " PRIu64 " , tid = % " PRIu32 " , pid = %s, "
" start = % " PRIu64 " , size = % " PRIu64 " , fnum = % " PRIu64 " , "
" %s %s \n " ,
i ,
pls - > context . smblctx ,
pls - > context . tid ,
server_id_str_buf ( pls - > context . pid , & tmp ) ,
pls - > start ,
pls - > size ,
pls - > fnum ,
lock_type_name ( pls - > lock_type ) ,
lock_flav_name ( pls - > lock_flav ) ) ;
2006-04-10 19:33:04 +04:00
}
2013-09-10 21:40:43 +04:00
unsigned int brl_num_locks ( const struct byte_range_lock * brl )
{
return brl - > num_locks ;
}
2013-09-10 21:41:32 +04:00
struct files_struct * brl_fsp ( struct byte_range_lock * brl )
{
return brl - > fsp ;
}
2019-08-08 20:26:28 +03:00
TALLOC_CTX * brl_req_mem_ctx ( const struct byte_range_lock * brl )
{
if ( brl - > req_mem_ctx = = NULL ) {
return talloc_get_type_abort ( brl , struct byte_range_lock ) ;
}
return brl - > req_mem_ctx ;
}
const struct GUID * brl_req_guid ( const struct byte_range_lock * brl )
{
if ( brl - > req_guid = = NULL ) {
static const struct GUID brl_zero_req_guid ;
return & brl_zero_req_guid ;
}
return brl - > req_guid ;
}
2000-01-13 15:09:36 +03:00
/****************************************************************************
2000-04-26 00:30:58 +04:00
See if two locking contexts are equal .
2000-01-13 15:09:36 +03:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2000-04-26 00:30:58 +04:00
2013-09-10 15:39:52 +04:00
static bool brl_same_context ( const struct lock_context * ctx1 ,
2006-04-10 19:33:04 +04:00
const struct lock_context * ctx2 )
2000-01-13 15:09:36 +03:00
{
2019-10-09 22:38:04 +03:00
return ( server_id_equal ( & ctx1 - > pid , & ctx2 - > pid ) & &
2010-05-07 17:20:50 +04:00
( ctx1 - > smblctx = = ctx2 - > smblctx ) & &
2005-09-30 21:13:37 +04:00
( ctx1 - > tid = = ctx2 - > tid ) ) ;
2000-01-13 15:09:36 +03:00
}
2019-09-05 09:43:32 +03:00
bool byte_range_valid ( uint64_t ofs , uint64_t len )
{
uint64_t max_len = UINT64_MAX - ofs ;
uint64_t effective_len ;
/*
* [ MS - FSA ] specifies this :
*
* If ( ( ( FileOffset + Length - 1 ) < FileOffset ) & & Length ! = 0 ) {
* return STATUS_INVALID_LOCK_RANGE
* }
*
* We avoid integer wrapping and calculate
* max and effective len instead .
*/
if ( len = = 0 ) {
return true ;
}
effective_len = len - 1 ;
if ( effective_len < = max_len ) {
return true ;
}
return false ;
}
bool byte_range_overlap ( uint64_t ofs1 ,
uint64_t len1 ,
uint64_t ofs2 ,
uint64_t len2 )
{
uint64_t last1 ;
uint64_t last2 ;
bool valid ;
/*
* This is based on [ MS - FSA ] 2.1 .4 .10
* Algorithm for Determining If a Range Access
* Conflicts with Byte - Range Locks
*/
/*
* The { 0 , 0 } range doesn ' t conflict with any byte - range lock
*/
if ( ofs1 = = 0 & & len1 = = 0 ) {
return false ;
}
if ( ofs2 = = 0 & & len2 = = 0 ) {
return false ;
}
/*
* The caller should have checked that the ranges are
* valid . But currently we gracefully handle
* the overflow of a read / write check .
*/
valid = byte_range_valid ( ofs1 , len1 ) ;
if ( valid ) {
last1 = ofs1 + len1 - 1 ;
} else {
last1 = UINT64_MAX ;
}
valid = byte_range_valid ( ofs2 , len2 ) ;
if ( valid ) {
last2 = ofs2 + len2 - 1 ;
} else {
last2 = UINT64_MAX ;
}
/*
* If one range starts after the last
* byte of the other range there ' s
* no conflict .
*/
if ( ofs1 > last2 ) {
return false ;
}
if ( ofs2 > last1 ) {
return false ;
}
return true ;
}
2004-10-19 02:01:10 +04:00
/****************************************************************************
See if lck1 and lck2 overlap .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2007-10-19 04:40:25 +04:00
static bool brl_overlap ( const struct lock_struct * lck1 ,
2006-04-10 19:33:04 +04:00
const struct lock_struct * lck2 )
2004-10-19 02:01:10 +04:00
{
2019-09-05 09:43:32 +03:00
return byte_range_overlap ( lck1 - > start ,
lck1 - > size ,
lck2 - > start ,
lck2 - > size ) ;
2004-10-19 02:01:10 +04:00
}
2000-01-13 15:09:36 +03:00
/****************************************************************************
2000-04-26 00:30:58 +04:00
See if lock2 can be added when lock1 is in place .
2000-01-13 15:09:36 +03:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2000-04-26 00:30:58 +04:00
2013-09-10 15:35:01 +04:00
static bool brl_conflict ( const struct lock_struct * lck1 ,
2006-04-10 19:33:04 +04:00
const struct lock_struct * lck2 )
2001-08-25 01:09:38 +04:00
{
2006-04-10 19:33:04 +04:00
/* Read locks never conflict. */
2001-08-25 01:09:38 +04:00
if ( lck1 - > lock_type = = READ_LOCK & & lck2 - > lock_type = = READ_LOCK ) {
return False ;
}
2009-02-21 02:22:15 +03:00
/* A READ lock can stack on top of a WRITE lock if they have the same
* context & fnum . */
if ( lck1 - > lock_type = = WRITE_LOCK & & lck2 - > lock_type = = READ_LOCK & &
brl_same_context ( & lck1 - > context , & lck2 - > context ) & &
lck1 - > fnum = = lck2 - > fnum ) {
2001-08-25 01:09:38 +04:00
return False ;
}
2004-10-19 02:01:10 +04:00
return brl_overlap ( lck1 , lck2 ) ;
2013-09-10 15:35:01 +04:00
}
2001-08-25 01:09:38 +04:00
2006-04-10 19:33:04 +04:00
/****************************************************************************
See if lock2 can be added when lock1 is in place - when both locks are POSIX
flavour . POSIX locks ignore fnum - they only care about dev / ino which we
know already match .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2013-09-10 15:35:01 +04:00
static bool brl_conflict_posix ( const struct lock_struct * lck1 ,
2006-04-10 19:33:04 +04:00
const struct lock_struct * lck2 )
{
# if defined(DEVELOPER)
SMB_ASSERT ( lck1 - > lock_flav = = POSIX_LOCK ) ;
SMB_ASSERT ( lck2 - > lock_flav = = POSIX_LOCK ) ;
# endif
/* Read locks never conflict. */
if ( lck1 - > lock_type = = READ_LOCK & & lck2 - > lock_type = = READ_LOCK ) {
return False ;
}
2014-01-29 03:55:19 +04:00
/* Locks on the same context don't conflict. Ignore fnum. */
2006-04-10 19:33:04 +04:00
if ( brl_same_context ( & lck1 - > context , & lck2 - > context ) ) {
return False ;
}
/* One is read, the other write, or the context is different,
do they overlap ? */
return brl_overlap ( lck1 , lck2 ) ;
2013-09-10 15:35:01 +04:00
}
2006-04-10 19:33:04 +04:00
2001-08-27 12:19:43 +04:00
# if ZERO_ZERO
2013-09-10 15:35:01 +04:00
static bool brl_conflict1 ( const struct lock_struct * lck1 ,
2006-04-10 19:33:04 +04:00
const struct lock_struct * lck2 )
2001-08-27 12:19:43 +04:00
{
if ( lck1 - > lock_type = = READ_LOCK & & lck2 - > lock_type = = READ_LOCK ) {
return False ;
}
if ( brl_same_context ( & lck1 - > context , & lck2 - > context ) & &
lck2 - > lock_type = = READ_LOCK & & lck1 - > fnum = = lck2 - > fnum ) {
return False ;
}
if ( lck2 - > start = = 0 & & lck2 - > size = = 0 & & lck1 - > size ! = 0 ) {
return True ;
}
if ( lck1 - > start > = ( lck2 - > start + lck2 - > size ) | |
lck2 - > start > = ( lck1 - > start + lck1 - > size ) ) {
return False ;
}
2011-05-28 12:24:20 +04:00
2001-08-27 12:19:43 +04:00
return True ;
2013-09-10 15:35:01 +04:00
}
2001-08-27 12:19:43 +04:00
# endif
2001-08-25 01:09:38 +04:00
/****************************************************************************
Check to see if this lock conflicts , but ignore our own locks on the
2006-04-10 19:33:04 +04:00
same fnum only . This is the read / write lock check code path .
This is never used in the POSIX lock case .
2001-08-25 01:09:38 +04:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2014-07-11 15:21:20 +04:00
static bool brl_conflict_other ( const struct lock_struct * lock ,
const struct lock_struct * rw_probe )
2000-01-13 15:09:36 +03:00
{
2014-07-11 15:21:20 +04:00
if ( lock - > lock_type = = READ_LOCK & & rw_probe - > lock_type = = READ_LOCK ) {
2000-01-14 11:01:44 +03:00
return False ;
2014-07-11 15:21:20 +04:00
}
2000-01-13 15:09:36 +03:00
2014-07-11 15:21:20 +04:00
if ( lock - > lock_flav = = POSIX_LOCK & &
rw_probe - > lock_flav = = POSIX_LOCK ) {
2014-07-11 15:27:39 +04:00
/*
* POSIX flavour locks never conflict here - this is only called
* in the read / write path .
*/
2006-04-10 19:33:04 +04:00
return False ;
2014-07-11 15:21:20 +04:00
}
2006-04-10 19:33:04 +04:00
2014-07-11 15:27:39 +04:00
if ( ! brl_overlap ( lock , rw_probe ) ) {
/*
* I / O can only conflict when overlapping a lock , thus let it
* pass
*/
return false ;
}
if ( ! brl_same_context ( & lock - > context , & rw_probe - > context ) ) {
/*
* Different process , conflict
*/
return true ;
}
2003-02-22 04:09:57 +03:00
2014-07-11 15:27:39 +04:00
if ( lock - > fnum ! = rw_probe - > fnum ) {
/*
* Different file handle , conflict
*/
return true ;
2003-02-22 04:09:57 +03:00
}
2000-01-13 15:09:36 +03:00
2014-07-11 15:27:39 +04:00
if ( ( lock - > lock_type = = READ_LOCK ) & &
( rw_probe - > lock_type = = WRITE_LOCK ) ) {
/*
* Incoming WRITE locks conflict with existing READ locks even
* if the context is the same . JRA . See LOCKTEST7 in
* smbtorture .
*/
return true ;
}
/*
* I / O request compatible with existing lock , let it pass without
* conflict
*/
return false ;
2013-09-10 15:35:01 +04:00
}
2000-01-13 15:09:36 +03:00
/****************************************************************************
2006-04-10 19:33:04 +04:00
Open up the brlock . tdb database .
2000-01-13 15:09:36 +03:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2001-05-23 00:35:48 +04:00
2007-12-05 22:53:22 +03:00
void brl_init ( bool read_only )
2000-05-03 18:29:05 +04:00
{
2009-11-16 11:40:47 +03:00
int tdb_flags ;
2014-11-02 22:21:43 +03:00
char * db_path ;
2009-11-16 11:40:47 +03:00
2007-05-27 14:35:14 +04:00
if ( brlock_db ) {
2006-04-10 19:33:04 +04:00
return ;
}
2009-11-16 11:40:47 +03:00
2019-06-17 17:36:01 +03:00
tdb_flags = SMBD_VOLATILE_TDB_FLAGS | TDB_SEQNUM ;
2009-11-16 11:40:47 +03:00
2018-08-16 11:51:44 +03:00
db_path = lock_path ( talloc_tos ( ) , " brlock.tdb " ) ;
2014-11-02 22:21:43 +03:00
if ( db_path = = NULL ) {
DEBUG ( 0 , ( " out of memory! \n " ) ) ;
return ;
}
brlock_db = db_open ( NULL , db_path ,
2019-06-17 17:36:01 +03:00
SMBD_VOLATILE_TDB_HASH_SIZE , tdb_flags ,
2012-01-06 20:19:54 +04:00
read_only ? O_RDONLY : ( O_RDWR | O_CREAT ) , 0644 ,
2014-01-27 17:49:12 +04:00
DBWRAP_LOCK_ORDER_2 , DBWRAP_FLAG_NONE ) ;
2007-05-27 14:35:14 +04:00
if ( ! brlock_db ) {
2006-04-10 19:33:04 +04:00
DEBUG ( 0 , ( " Failed to open byte range locking database %s \n " ,
2014-11-02 22:21:43 +03:00
db_path ) ) ;
TALLOC_FREE ( db_path ) ;
2006-04-10 19:33:04 +04:00
return ;
}
2014-11-02 22:21:43 +03:00
TALLOC_FREE ( db_path ) ;
2006-04-10 19:33:04 +04:00
}
2000-05-03 18:29:05 +04:00
2006-04-10 19:33:04 +04:00
/****************************************************************************
Close down the brlock . tdb database .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2000-05-03 18:29:05 +04:00
2007-12-05 22:53:22 +03:00
void brl_shutdown ( void )
2006-04-10 19:33:04 +04:00
{
2007-05-27 14:35:14 +04:00
TALLOC_FREE ( brlock_db ) ;
2006-04-10 19:33:04 +04:00
}
2000-05-03 18:29:05 +04:00
2006-04-10 19:33:04 +04:00
# if ZERO_ZERO
/****************************************************************************
Compare two locks for sorting .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2001-05-23 00:35:48 +04:00
2013-09-10 15:35:01 +04:00
static int lock_compare ( const struct lock_struct * lck1 ,
2006-04-10 19:33:04 +04:00
const struct lock_struct * lck2 )
{
if ( lck1 - > start ! = lck2 - > start ) {
2024-04-07 06:07:20 +03:00
return NUMERIC_CMP ( lck1 - > start , lck2 - > start ) ;
2006-04-10 19:33:04 +04:00
}
2024-04-07 06:07:20 +03:00
return NUMERIC_CMP ( lck1 - > size , lck2 - > size ) ;
2006-04-10 19:33:04 +04:00
}
# endif
2001-05-23 00:35:48 +04:00
2006-04-10 19:33:04 +04:00
/****************************************************************************
Lock a range of bytes - Windows lock semantics .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2001-05-23 00:35:48 +04:00
2009-02-10 08:51:29 +03:00
NTSTATUS brl_lock_windows_default ( struct byte_range_lock * br_lck ,
2019-07-01 15:58:35 +03:00
struct lock_struct * plock )
2006-04-10 19:33:04 +04:00
{
unsigned int i ;
files_struct * fsp = br_lck - > fsp ;
2007-05-06 00:43:06 +04:00
struct lock_struct * locks = br_lck - > lock_data ;
2009-02-03 22:56:35 +03:00
NTSTATUS status ;
2019-09-05 09:43:32 +03:00
bool valid ;
2006-04-10 19:33:04 +04:00
2009-02-10 08:51:29 +03:00
SMB_ASSERT ( plock - > lock_type ! = UNLOCK_LOCK ) ;
2019-09-05 09:43:32 +03:00
valid = byte_range_valid ( plock - > start , plock - > size ) ;
if ( ! valid ) {
2010-05-06 02:57:57 +04:00
return NT_STATUS_INVALID_LOCK_RANGE ;
}
2009-12-05 01:04:08 +03:00
2010-05-06 02:57:57 +04:00
for ( i = 0 ; i < br_lck - > num_locks ; i + + ) {
2006-04-10 19:33:04 +04:00
/* Do any Windows or POSIX locks conflict ? */
if ( brl_conflict ( & locks [ i ] , plock ) ) {
2014-08-19 16:36:55 +04:00
if ( ! serverid_exists ( & locks [ i ] . context . pid ) ) {
locks [ i ] . context . pid . pid = 0 ;
br_lck - > modified = true ;
continue ;
}
2007-05-20 00:57:12 +04:00
/* Remember who blocked us. */
2010-05-07 17:20:50 +04:00
plock - > context . smblctx = locks [ i ] . context . smblctx ;
2019-06-13 13:46:01 +03:00
return NT_STATUS_LOCK_NOT_GRANTED ;
2006-04-10 19:33:04 +04:00
}
# if ZERO_ZERO
2013-09-10 15:35:01 +04:00
if ( plock - > start = = 0 & & plock - > size = = 0 & &
2006-04-10 19:33:04 +04:00
locks [ i ] . size = = 0 ) {
break ;
2001-05-23 00:35:48 +04:00
}
2006-04-10 19:33:04 +04:00
# endif
}
2001-05-23 00:35:48 +04:00
2019-06-20 12:07:17 +03:00
contend_level2_oplocks_begin ( fsp , LEVEL2_CONTEND_WINDOWS_BRL ) ;
2009-02-03 22:56:35 +03:00
2006-04-10 19:33:04 +04:00
/* We can get the Windows lock, now see if it needs to
be mapped into a lower level POSIX one , and if so can
2006-07-11 22:01:26 +04:00
we get it ? */
2000-05-03 18:29:05 +04:00
2019-06-20 12:07:17 +03:00
if ( lp_posix_locking ( fsp - > conn - > params ) ) {
2006-07-11 22:01:26 +04:00
int errno_ret ;
if ( ! set_posix_lock_windows_flavour ( fsp ,
plock - > start ,
plock - > size ,
plock - > lock_type ,
& plock - > context ,
locks ,
br_lck - > num_locks ,
& errno_ret ) ) {
2007-05-20 00:57:12 +04:00
/* We don't know who blocked us. */
2010-05-07 17:20:50 +04:00
plock - > context . smblctx = 0xFFFFFFFFFFFFFFFFLL ;
2007-05-20 00:57:12 +04:00
2006-07-11 22:01:26 +04:00
if ( errno_ret = = EACCES | | errno_ret = = EAGAIN ) {
2019-08-19 13:33:28 +03:00
status = NT_STATUS_LOCK_NOT_GRANTED ;
2009-02-03 22:56:35 +03:00
goto fail ;
2006-04-10 19:33:04 +04:00
} else {
2009-02-03 22:56:35 +03:00
status = map_nt_error_from_unix ( errno ) ;
goto fail ;
2006-04-10 19:33:04 +04:00
}
2000-05-03 18:29:05 +04:00
}
}
2000-04-26 00:30:58 +04:00
2006-04-10 19:33:04 +04:00
/* no conflicts - add it to the list of locks */
2013-09-10 23:04:47 +04:00
locks = talloc_realloc ( br_lck , locks , struct lock_struct ,
( br_lck - > num_locks + 1 ) ) ;
2006-04-10 19:33:04 +04:00
if ( ! locks ) {
2009-02-03 22:56:35 +03:00
status = NT_STATUS_NO_MEMORY ;
goto fail ;
2000-05-03 18:29:05 +04:00
}
2006-04-10 19:33:04 +04:00
memcpy ( & locks [ br_lck - > num_locks ] , plock , sizeof ( struct lock_struct ) ) ;
br_lck - > num_locks + = 1 ;
2007-05-06 00:43:06 +04:00
br_lck - > lock_data = locks ;
2006-04-10 19:33:04 +04:00
br_lck - > modified = True ;
return NT_STATUS_OK ;
2009-02-03 22:56:35 +03:00
fail :
2019-06-20 12:07:17 +03:00
contend_level2_oplocks_end ( fsp , LEVEL2_CONTEND_WINDOWS_BRL ) ;
2009-02-03 22:56:35 +03:00
return status ;
2000-05-03 18:29:05 +04:00
}
/****************************************************************************
2006-04-10 19:33:04 +04:00
Cope with POSIX range splits and merges .
2000-05-03 18:29:05 +04:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2001-05-23 00:35:48 +04:00
2009-10-05 21:27:48 +04:00
static unsigned int brlock_posix_split_merge ( struct lock_struct * lck_arr , /* Output array. */
struct lock_struct * ex , /* existing lock. */
struct lock_struct * plock ) /* proposed lock. */
2000-01-13 15:09:36 +03:00
{
2007-10-19 04:40:25 +04:00
bool lock_types_differ = ( ex - > lock_type ! = plock - > lock_type ) ;
2006-04-10 19:33:04 +04:00
/* We can't merge non-conflicting locks on different context - ignore fnum. */
if ( ! brl_same_context ( & ex - > context , & plock - > context ) ) {
/* Just copy. */
memcpy ( & lck_arr [ 0 ] , ex , sizeof ( struct lock_struct ) ) ;
return 1 ;
2000-05-03 18:29:05 +04:00
}
2006-04-10 19:33:04 +04:00
/* We now know we have the same context. */
/* Did we overlap ? */
/*********************************************
2009-10-05 21:27:48 +04:00
+ - - - - - - - - - +
| ex |
+ - - - - - - - - - +
+ - - - - - - - +
| plock |
+ - - - - - - - +
2006-04-10 19:33:04 +04:00
OR . . . .
2009-10-05 21:27:48 +04:00
+ - - - - - - - - - +
| ex |
+ - - - - - - - - - +
2006-04-10 19:33:04 +04:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2006-07-11 22:01:26 +04:00
if ( ( ex - > start > ( plock - > start + plock - > size ) ) | |
2009-10-05 21:27:48 +04:00
( plock - > start > ( ex - > start + ex - > size ) ) ) {
2006-04-10 19:33:04 +04:00
/* No overlap with this lock - copy existing. */
2009-10-05 21:27:48 +04:00
2006-04-10 19:33:04 +04:00
memcpy ( & lck_arr [ 0 ] , ex , sizeof ( struct lock_struct ) ) ;
return 1 ;
2002-07-15 14:35:28 +04:00
}
2006-04-10 19:33:04 +04:00
/*********************************************
2006-07-11 22:01:26 +04:00
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - +
| ex |
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - +
2006-04-10 19:33:04 +04:00
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - +
| plock | - > replace with plock .
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - +
2009-10-05 21:27:48 +04:00
OR
+ - - - - - - - - - - - - - - - +
| ex |
+ - - - - - - - - - - - - - - - +
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - +
| plock | - > replace with plock .
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - +
2006-04-10 19:33:04 +04:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
if ( ( ex - > start > = plock - > start ) & &
2009-10-05 21:27:48 +04:00
( ex - > start + ex - > size < = plock - > start + plock - > size ) ) {
/* Replace - discard existing lock. */
return 0 ;
2006-04-10 19:33:04 +04:00
}
/*********************************************
2009-10-05 21:27:48 +04:00
Adjacent after .
+ - - - - - - - +
| ex |
+ - - - - - - - +
+ - - - - - - - - - - - - - - - +
| plock |
+ - - - - - - - - - - - - - - - +
BECOMES . . . .
+ - - - - - - - - - - - - - - - + - - - - - - - +
| plock | ex | - different lock types .
+ - - - - - - - - - - - - - - - + - - - - - - - +
OR . . . . ( merge )
+ - - - - - - - - - - - - - - - - - - - - - - - +
| plock | - same lock type .
+ - - - - - - - - - - - - - - - - - - - - - - - +
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
if ( plock - > start + plock - > size = = ex - > start ) {
/* If the lock types are the same, we merge, if different, we
add the remainder of the old lock . */
if ( lock_types_differ ) {
/* Add existing. */
memcpy ( & lck_arr [ 0 ] , ex , sizeof ( struct lock_struct ) ) ;
return 1 ;
} else {
/* Merge - adjust incoming lock as we may have more
* merging to come . */
plock - > size + = ex - > size ;
return 0 ;
}
}
/*********************************************
Adjacent before .
+ - - - - - - - +
| ex |
+ - - - - - - - +
+ - - - - - - - - - - - - - - - +
| plock |
+ - - - - - - - - - - - - - - - +
BECOMES . . . .
+ - - - - - - - + - - - - - - - - - - - - - - - +
| ex | plock | - different lock types
+ - - - - - - - + - - - - - - - - - - - - - - - +
OR . . . . ( merge )
+ - - - - - - - - - - - - - - - - - - - - - - - +
| plock | - same lock type .
+ - - - - - - - - - - - - - - - - - - - - - - - +
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
if ( ex - > start + ex - > size = = plock - > start ) {
/* If the lock types are the same, we merge, if different, we
add the existing lock . */
if ( lock_types_differ ) {
memcpy ( & lck_arr [ 0 ] , ex , sizeof ( struct lock_struct ) ) ;
return 1 ;
} else {
/* Merge - adjust incoming lock as we may have more
* merging to come . */
plock - > start = ex - > start ;
plock - > size + = ex - > size ;
return 0 ;
}
}
/*********************************************
Overlap after .
2006-07-11 22:01:26 +04:00
+ - - - - - - - - - - - - - - - - - - - - - - - +
| ex |
+ - - - - - - - - - - - - - - - - - - - - - - - +
2006-04-10 19:33:04 +04:00
+ - - - - - - - - - - - - - - - +
| plock |
+ - - - - - - - - - - - - - - - +
2009-10-05 21:27:48 +04:00
OR
+ - - - - - - - - - - - - - - - - +
| ex |
+ - - - - - - - - - - - - - - - - +
2006-07-11 22:01:26 +04:00
+ - - - - - - - - - - - - - - - +
| plock |
+ - - - - - - - - - - - - - - - +
2006-04-10 19:33:04 +04:00
BECOMES . . . .
+ - - - - - - - - - - - - - - - + - - - - - - - +
| plock | ex | - different lock types .
+ - - - - - - - - - - - - - - - + - - - - - - - +
2006-07-11 22:01:26 +04:00
OR . . . . ( merge )
2006-04-10 19:33:04 +04:00
+ - - - - - - - - - - - - - - - - - - - - - - - +
2009-10-05 21:27:48 +04:00
| plock | - same lock type .
2006-04-10 19:33:04 +04:00
+ - - - - - - - - - - - - - - - - - - - - - - - +
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
if ( ( ex - > start > = plock - > start ) & &
2009-10-05 21:27:48 +04:00
( ex - > start < = plock - > start + plock - > size ) & &
( ex - > start + ex - > size > plock - > start + plock - > size ) ) {
2006-04-10 19:33:04 +04:00
/* If the lock types are the same, we merge, if different, we
2009-10-05 21:27:48 +04:00
add the remainder of the old lock . */
2006-04-10 19:33:04 +04:00
if ( lock_types_differ ) {
2009-10-05 21:27:48 +04:00
/* Add remaining existing. */
memcpy ( & lck_arr [ 0 ] , ex , sizeof ( struct lock_struct ) ) ;
2006-04-10 19:33:04 +04:00
/* Adjust existing start and size. */
2009-10-05 21:27:48 +04:00
lck_arr [ 0 ] . start = plock - > start + plock - > size ;
lck_arr [ 0 ] . size = ( ex - > start + ex - > size ) - ( plock - > start + plock - > size ) ;
2006-04-10 19:33:04 +04:00
return 1 ;
2009-10-05 21:27:48 +04:00
} else {
/* Merge - adjust incoming lock as we may have more
* merging to come . */
plock - > size + = ( ex - > start + ex - > size ) - ( plock - > start + plock - > size ) ;
return 0 ;
2006-04-10 19:33:04 +04:00
}
}
/*********************************************
2009-10-05 21:27:48 +04:00
Overlap before .
+ - - - - - - - - - - - - - - - - - - - - - - - +
| ex |
+ - - - - - - - - - - - - - - - - - - - - - - - +
+ - - - - - - - - - - - - - - - +
| plock |
+ - - - - - - - - - - - - - - - +
OR
+ - - - - - - - - - - - - - +
| ex |
+ - - - - - - - - - - - - - +
+ - - - - - - - - - - - - - - - +
| plock |
+ - - - - - - - - - - - - - - - +
2006-04-10 19:33:04 +04:00
BECOMES . . . .
2009-10-05 21:27:48 +04:00
+ - - - - - - - + - - - - - - - - - - - - - - - +
| ex | plock | - different lock types
+ - - - - - - - + - - - - - - - - - - - - - - - +
2006-04-10 19:33:04 +04:00
2006-07-11 22:01:26 +04:00
OR . . . . ( merge )
2009-10-05 21:27:48 +04:00
+ - - - - - - - - - - - - - - - - - - - - - - - +
| plock | - same lock type .
+ - - - - - - - - - - - - - - - - - - - - - - - +
2006-04-10 19:33:04 +04:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
if ( ( ex - > start < plock - > start ) & &
2006-07-11 22:01:26 +04:00
( ex - > start + ex - > size > = plock - > start ) & &
2006-04-10 19:33:04 +04:00
( ex - > start + ex - > size < = plock - > start + plock - > size ) ) {
/* If the lock types are the same, we merge, if different, we
2009-10-05 21:27:48 +04:00
add the truncated old lock . */
2006-04-10 19:33:04 +04:00
if ( lock_types_differ ) {
memcpy ( & lck_arr [ 0 ] , ex , sizeof ( struct lock_struct ) ) ;
/* Adjust existing size. */
lck_arr [ 0 ] . size = plock - > start - ex - > start ;
return 1 ;
2009-10-05 21:27:48 +04:00
} else {
/* Merge - adjust incoming lock as we may have more
* merging to come . MUST ADJUST plock SIZE FIRST ! */
plock - > size + = ( plock - > start - ex - > start ) ;
plock - > start = ex - > start ;
return 0 ;
2006-04-10 19:33:04 +04:00
}
}
/*********************************************
2009-10-05 21:27:48 +04:00
Complete overlap .
2006-04-10 19:33:04 +04:00
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - +
| ex |
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - +
+ - - - - - - - - - +
| plock |
+ - - - - - - - - - +
BECOMES . . . . .
+ - - - - - - - + - - - - - - - - - + - - - - - - - - - +
| ex | plock | ex | - different lock types .
+ - - - - - - - + - - - - - - - - - + - - - - - - - - - +
OR
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - +
2009-10-05 21:27:48 +04:00
| plock | - same lock type .
2006-04-10 19:33:04 +04:00
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - +
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
if ( ( ex - > start < plock - > start ) & & ( ex - > start + ex - > size > plock - > start + plock - > size ) ) {
if ( lock_types_differ ) {
/* We have to split ex into two locks here. */
memcpy ( & lck_arr [ 0 ] , ex , sizeof ( struct lock_struct ) ) ;
2009-10-05 21:27:48 +04:00
memcpy ( & lck_arr [ 1 ] , ex , sizeof ( struct lock_struct ) ) ;
2006-04-10 19:33:04 +04:00
/* Adjust first existing size. */
lck_arr [ 0 ] . size = plock - > start - ex - > start ;
/* Adjust second existing start and size. */
2009-10-05 21:27:48 +04:00
lck_arr [ 1 ] . start = plock - > start + plock - > size ;
lck_arr [ 1 ] . size = ( ex - > start + ex - > size ) - ( plock - > start + plock - > size ) ;
return 2 ;
2006-04-10 19:33:04 +04:00
} else {
2009-10-05 21:27:48 +04:00
/* Just eat the existing locks, merge them into plock. */
plock - > start = ex - > start ;
plock - > size = ex - > size ;
return 0 ;
2006-04-10 19:33:04 +04:00
}
}
/* Never get here. */
2007-06-16 01:58:49 +04:00
smb_panic ( " brlock_posix_split_merge " ) ;
2006-04-10 19:33:04 +04:00
/* Notreached. */
2007-06-16 01:58:49 +04:00
2006-04-26 01:36:35 +04:00
/* Keep some compilers happy. */
return 0 ;
2000-01-13 15:09:36 +03:00
}
2001-05-23 00:35:48 +04:00
/****************************************************************************
2006-04-10 19:33:04 +04:00
Lock a range of bytes - POSIX lock semantics .
We must cope with range splits and merges .
2001-05-23 00:35:48 +04:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2019-07-01 16:13:59 +03:00
static NTSTATUS brl_lock_posix ( struct byte_range_lock * br_lck ,
2007-05-20 00:57:12 +04:00
struct lock_struct * plock )
2001-05-23 00:35:48 +04:00
{
2009-02-03 22:56:35 +03:00
unsigned int i , count , posix_count ;
2007-05-06 00:43:06 +04:00
struct lock_struct * locks = br_lck - > lock_data ;
2006-04-10 19:33:04 +04:00
struct lock_struct * tp ;
2009-02-03 22:56:35 +03:00
bool break_oplocks = false ;
NTSTATUS status ;
2006-04-10 19:33:04 +04:00
/* No zero-zero locks for POSIX. */
if ( plock - > start = = 0 & & plock - > size = = 0 ) {
return NT_STATUS_INVALID_PARAMETER ;
}
/* Don't allow 64-bit lock wrap. */
2010-05-06 02:57:57 +04:00
if ( plock - > start + plock - > size - 1 < plock - > start ) {
2006-04-10 19:33:04 +04:00
return NT_STATUS_INVALID_PARAMETER ;
}
/* The worst case scenario here is we have to split an
existing POSIX lock range into two , and add our lock ,
so we need at most 2 more entries . */
2013-09-10 23:04:47 +04:00
tp = talloc_array ( br_lck , struct lock_struct , br_lck - > num_locks + 2 ) ;
2006-04-10 19:33:04 +04:00
if ( ! tp ) {
return NT_STATUS_NO_MEMORY ;
}
2009-10-05 21:27:48 +04:00
2009-02-03 22:56:35 +03:00
count = posix_count = 0 ;
2009-10-05 21:27:48 +04:00
2006-04-10 19:33:04 +04:00
for ( i = 0 ; i < br_lck - > num_locks ; i + + ) {
2006-07-29 23:14:24 +04:00
struct lock_struct * curr_lock = & locks [ i ] ;
if ( curr_lock - > lock_flav = = WINDOWS_LOCK ) {
2006-04-10 19:33:04 +04:00
/* Do any Windows flavour locks conflict ? */
2006-07-29 23:14:24 +04:00
if ( brl_conflict ( curr_lock , plock ) ) {
2014-08-19 16:36:55 +04:00
if ( ! serverid_exists ( & curr_lock - > context . pid ) ) {
curr_lock - > context . pid . pid = 0 ;
br_lck - > modified = true ;
continue ;
}
2006-04-10 19:33:04 +04:00
/* No games with error messages. */
2013-09-10 23:04:47 +04:00
TALLOC_FREE ( tp ) ;
2007-05-20 00:57:12 +04:00
/* Remember who blocked us. */
2010-05-07 17:20:50 +04:00
plock - > context . smblctx = curr_lock - > context . smblctx ;
2019-08-19 13:33:28 +03:00
return NT_STATUS_LOCK_NOT_GRANTED ;
2006-04-10 19:33:04 +04:00
}
/* Just copy the Windows lock into the new array. */
2006-07-29 23:14:24 +04:00
memcpy ( & tp [ count ] , curr_lock , sizeof ( struct lock_struct ) ) ;
2006-04-10 19:33:04 +04:00
count + + ;
} else {
2009-02-10 20:09:12 +03:00
unsigned int tmp_count = 0 ;
2009-02-03 22:56:35 +03:00
2006-04-10 19:33:04 +04:00
/* POSIX conflict semantics are different. */
2006-07-29 23:14:24 +04:00
if ( brl_conflict_posix ( curr_lock , plock ) ) {
2014-08-19 16:36:55 +04:00
if ( ! serverid_exists ( & curr_lock - > context . pid ) ) {
curr_lock - > context . pid . pid = 0 ;
br_lck - > modified = true ;
continue ;
}
2006-04-10 19:33:04 +04:00
/* Can't block ourselves with POSIX locks. */
/* No games with error messages. */
2013-09-10 23:04:47 +04:00
TALLOC_FREE ( tp ) ;
2007-05-20 00:57:12 +04:00
/* Remember who blocked us. */
2010-05-07 17:20:50 +04:00
plock - > context . smblctx = curr_lock - > context . smblctx ;
2019-08-19 13:33:28 +03:00
return NT_STATUS_LOCK_NOT_GRANTED ;
2006-04-10 19:33:04 +04:00
}
/* Work out overlaps. */
2009-10-05 21:27:48 +04:00
tmp_count + = brlock_posix_split_merge ( & tp [ count ] , curr_lock , plock ) ;
2009-02-03 22:56:35 +03:00
posix_count + = tmp_count ;
count + = tmp_count ;
2006-04-10 19:33:04 +04:00
}
}
2009-02-03 22:56:35 +03:00
/*
* Break oplocks while we hold a brl . Since lock ( ) and unlock ( ) calls
2023-07-13 09:50:21 +03:00
* are not symmetric with POSIX semantics , we cannot guarantee our
2009-02-03 22:56:35 +03:00
* contend_level2_oplocks_begin / end calls will be acquired and
* released one - for - one as with Windows semantics . Therefore we only
* call contend_level2_oplocks_begin if this is the first POSIX brl on
* the file .
*/
2019-06-20 12:07:17 +03:00
break_oplocks = ( posix_count = = 0 ) ;
2009-02-03 22:56:35 +03:00
if ( break_oplocks ) {
contend_level2_oplocks_begin ( br_lck - > fsp ,
LEVEL2_CONTEND_POSIX_BRL ) ;
}
2009-10-05 21:27:48 +04:00
/* Try and add the lock in order, sorted by lock start. */
for ( i = 0 ; i < count ; i + + ) {
struct lock_struct * curr_lock = & tp [ i ] ;
if ( curr_lock - > start < = plock - > start ) {
continue ;
}
}
if ( i < count ) {
memmove ( & tp [ i + 1 ] , & tp [ i ] ,
( count - i ) * sizeof ( struct lock_struct ) ) ;
2006-07-11 22:01:26 +04:00
}
2009-10-05 21:27:48 +04:00
memcpy ( & tp [ i ] , plock , sizeof ( struct lock_struct ) ) ;
count + + ;
2006-07-11 22:01:26 +04:00
2006-04-10 19:33:04 +04:00
/* We can get the POSIX lock, now see if it needs to
be mapped into a lower level POSIX one , and if so can
2006-07-11 22:01:26 +04:00
we get it ? */
2006-04-10 19:33:04 +04:00
2019-06-20 12:07:17 +03:00
if ( lp_posix_locking ( br_lck - > fsp - > conn - > params ) ) {
2006-07-11 22:01:26 +04:00
int errno_ret ;
2001-05-23 00:35:48 +04:00
2006-07-11 22:01:26 +04:00
/* The lower layer just needs to attempt to
get the system POSIX lock . We ' ve weeded out
any conflicts above . */
2006-04-10 19:33:04 +04:00
2006-07-11 22:01:26 +04:00
if ( ! set_posix_lock_posix_flavour ( br_lck - > fsp ,
plock - > start ,
plock - > size ,
plock - > lock_type ,
2016-05-17 22:49:36 +03:00
& plock - > context ,
2006-07-11 22:01:26 +04:00
& errno_ret ) ) {
2007-05-20 00:57:12 +04:00
/* We don't know who blocked us. */
2010-05-07 17:20:50 +04:00
plock - > context . smblctx = 0xFFFFFFFFFFFFFFFFLL ;
2007-05-20 00:57:12 +04:00
2006-07-11 22:01:26 +04:00
if ( errno_ret = = EACCES | | errno_ret = = EAGAIN ) {
2013-09-10 23:04:47 +04:00
TALLOC_FREE ( tp ) ;
2019-08-19 13:33:28 +03:00
status = NT_STATUS_LOCK_NOT_GRANTED ;
2009-02-03 22:56:35 +03:00
goto fail ;
2006-04-10 19:33:04 +04:00
} else {
2013-09-10 23:04:47 +04:00
TALLOC_FREE ( tp ) ;
2009-02-03 22:56:35 +03:00
status = map_nt_error_from_unix ( errno ) ;
goto fail ;
2006-04-10 19:33:04 +04:00
}
}
2002-07-15 14:35:28 +04:00
}
2000-01-13 15:09:36 +03:00
2009-10-05 21:27:48 +04:00
/* If we didn't use all the allocated size,
* Realloc so we don ' t leak entries per lock call . */
if ( count < br_lck - > num_locks + 2 ) {
2013-09-10 23:04:47 +04:00
tp = talloc_realloc ( br_lck , tp , struct lock_struct , count ) ;
2009-10-05 21:27:48 +04:00
if ( ! tp ) {
status = NT_STATUS_NO_MEMORY ;
goto fail ;
}
2001-08-27 12:19:43 +04:00
}
2009-10-05 21:27:48 +04:00
2006-04-10 19:33:04 +04:00
br_lck - > num_locks = count ;
2013-09-10 23:04:47 +04:00
TALLOC_FREE ( br_lck - > lock_data ) ;
2007-05-06 00:43:06 +04:00
br_lck - > lock_data = tp ;
2006-07-29 23:14:24 +04:00
locks = tp ;
2006-04-10 19:33:04 +04:00
br_lck - > modified = True ;
2006-07-29 23:14:24 +04:00
/* A successful downgrade from write to read lock can trigger a lock
re - evalutation where waiting readers can now proceed . */
2006-04-10 19:33:04 +04:00
return NT_STATUS_OK ;
2009-02-03 22:56:35 +03:00
fail :
if ( break_oplocks ) {
contend_level2_oplocks_end ( br_lck - > fsp ,
LEVEL2_CONTEND_POSIX_BRL ) ;
}
return status ;
2001-08-27 12:19:43 +04:00
}
2000-01-13 15:09:36 +03:00
/****************************************************************************
2000-04-26 00:30:58 +04:00
Lock a range of bytes .
2000-01-13 15:09:36 +03:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2000-04-26 00:30:58 +04:00
2019-07-01 16:15:41 +03:00
NTSTATUS brl_lock (
struct byte_range_lock * br_lck ,
uint64_t smblctx ,
struct server_id pid ,
br_off start ,
br_off size ,
enum brl_type lock_type ,
enum brl_flavour lock_flav ,
struct server_id * blocker_pid ,
uint64_t * psmblctx )
2000-01-13 15:09:36 +03:00
{
2006-04-10 19:33:04 +04:00
NTSTATUS ret ;
struct lock_struct lock ;
2000-01-13 15:09:36 +03:00
2015-10-29 14:11:00 +03:00
ZERO_STRUCT ( lock ) ;
2001-08-27 12:19:43 +04:00
# if !ZERO_ZERO
2001-08-25 01:09:38 +04:00
if ( start = = 0 & & size = = 0 ) {
2001-08-27 12:19:43 +04:00
DEBUG ( 0 , ( " client sent 0/0 lock - please report this \n " ) ) ;
2001-08-25 01:09:38 +04:00
}
# endif
2014-07-11 14:41:54 +04:00
lock = ( struct lock_struct ) {
. context . smblctx = smblctx ,
. context . pid = pid ,
. context . tid = br_lck - > fsp - > conn - > cnum ,
. start = start ,
. size = size ,
. fnum = br_lck - > fsp - > fnum ,
. lock_type = lock_type ,
. lock_flav = lock_flav
} ;
2000-01-13 15:09:36 +03:00
2006-04-10 19:33:04 +04:00
if ( lock_flav = = WINDOWS_LOCK ) {
2019-07-01 15:42:50 +03:00
ret = SMB_VFS_BRL_LOCK_WINDOWS (
2019-07-01 15:55:42 +03:00
br_lck - > fsp - > conn , br_lck , & lock ) ;
2006-04-10 19:33:04 +04:00
} else {
2019-07-01 16:13:59 +03:00
ret = brl_lock_posix ( br_lck , & lock ) ;
2000-01-13 15:09:36 +03:00
}
2001-08-27 12:19:43 +04:00
# if ZERO_ZERO
/* sort the lock list */
2010-02-14 02:02:35 +03:00
TYPESAFE_QSORT ( br_lck - > lock_data , ( size_t ) br_lck - > num_locks , lock_compare ) ;
2001-08-27 12:19:43 +04:00
# endif
2007-05-20 00:57:12 +04:00
/* If we're returning an error, return who blocked us. */
2010-05-07 17:20:50 +04:00
if ( ! NT_STATUS_IS_OK ( ret ) & & psmblctx ) {
2019-06-20 13:05:30 +03:00
* blocker_pid = lock . context . pid ;
2010-05-07 17:20:50 +04:00
* psmblctx = lock . context . smblctx ;
2007-05-20 00:57:12 +04:00
}
2006-04-10 19:33:04 +04:00
return ret ;
2000-01-13 15:09:36 +03:00
}
/****************************************************************************
2006-04-10 19:33:04 +04:00
Unlock a range of bytes - Windows semantics .
2000-01-13 15:09:36 +03:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2000-04-26 00:30:58 +04:00
2019-07-01 16:25:27 +03:00
bool brl_unlock_windows_default ( struct byte_range_lock * br_lck ,
const struct lock_struct * plock )
2000-01-13 15:09:36 +03:00
{
2019-06-20 12:07:17 +03:00
unsigned int i ;
2007-05-06 00:43:06 +04:00
struct lock_struct * locks = br_lck - > lock_data ;
2006-07-11 22:01:26 +04:00
enum brl_type deleted_lock_type = READ_LOCK ; /* shut the compiler up.... */
2000-01-13 15:09:36 +03:00
2009-02-10 08:51:29 +03:00
SMB_ASSERT ( plock - > lock_type = = UNLOCK_LOCK ) ;
2006-04-10 19:33:04 +04:00
# if ZERO_ZERO
2006-07-11 22:01:26 +04:00
/* Delete write locks by preference... The lock list
is sorted in the zero zero case . */
2006-04-10 19:33:04 +04:00
for ( i = 0 ; i < br_lck - > num_locks ; i + + ) {
2006-07-15 04:34:08 +04:00
struct lock_struct * lock = & locks [ i ] ;
2000-01-13 15:09:36 +03:00
2006-04-10 19:33:04 +04:00
if ( lock - > lock_type = = WRITE_LOCK & &
brl_same_context ( & lock - > context , & plock - > context ) & &
lock - > fnum = = plock - > fnum & &
lock - > lock_flav = = WINDOWS_LOCK & &
lock - > start = = plock - > start & &
lock - > size = = plock - > size ) {
2000-01-13 15:09:36 +03:00
2006-04-10 19:33:04 +04:00
/* found it - delete it */
2006-07-11 22:01:26 +04:00
deleted_lock_type = lock - > lock_type ;
break ;
2006-04-10 19:33:04 +04:00
}
2000-04-28 02:23:04 +04:00
}
2006-07-11 22:01:26 +04:00
if ( i ! = br_lck - > num_locks ) {
/* We found it - don't search again. */
goto unlock_continue ;
}
2006-04-10 19:33:04 +04:00
# endif
2000-01-13 15:09:36 +03:00
2006-04-10 19:33:04 +04:00
for ( i = 0 ; i < br_lck - > num_locks ; i + + ) {
2006-07-15 04:34:08 +04:00
struct lock_struct * lock = & locks [ i ] ;
2000-01-13 15:09:36 +03:00
2006-04-10 19:33:04 +04:00
/* Only remove our own locks that match in start, size, and flavour. */
if ( brl_same_context ( & lock - > context , & plock - > context ) & &
lock - > fnum = = plock - > fnum & &
lock - > lock_flav = = WINDOWS_LOCK & &
lock - > start = = plock - > start & &
lock - > size = = plock - > size ) {
2006-07-11 22:01:26 +04:00
deleted_lock_type = lock - > lock_type ;
2006-04-10 19:33:04 +04:00
break ;
}
}
2000-04-26 00:30:58 +04:00
2006-04-10 19:33:04 +04:00
if ( i = = br_lck - > num_locks ) {
/* we didn't find it */
return False ;
}
2000-04-26 00:30:58 +04:00
2006-07-11 22:01:26 +04:00
# if ZERO_ZERO
unlock_continue :
# endif
2020-03-26 00:37:34 +03:00
ARRAY_DEL_ELEMENT ( locks , i , br_lck - > num_locks ) ;
2006-07-11 22:01:26 +04:00
br_lck - > num_locks - = 1 ;
br_lck - > modified = True ;
/* Unlock the underlying POSIX regions. */
2006-11-11 20:05:11 +03:00
if ( lp_posix_locking ( br_lck - > fsp - > conn - > params ) ) {
2006-07-11 22:01:26 +04:00
release_posix_lock_windows_flavour ( br_lck - > fsp ,
plock - > start ,
plock - > size ,
deleted_lock_type ,
& plock - > context ,
locks ,
br_lck - > num_locks ) ;
2006-04-10 19:33:04 +04:00
}
2003-04-05 00:38:12 +04:00
2009-02-03 22:56:35 +03:00
contend_level2_oplocks_end ( br_lck - > fsp , LEVEL2_CONTEND_WINDOWS_BRL ) ;
2006-04-10 19:33:04 +04:00
return True ;
}
/****************************************************************************
Unlock a range of bytes - POSIX semantics .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2003-02-27 04:04:34 +03:00
2019-07-01 16:18:58 +03:00
static bool brl_unlock_posix ( struct byte_range_lock * br_lck ,
2009-10-05 21:27:48 +04:00
struct lock_struct * plock )
2006-04-10 19:33:04 +04:00
{
2019-06-20 12:07:17 +03:00
unsigned int i , count ;
2006-04-10 19:33:04 +04:00
struct lock_struct * tp ;
2007-05-06 00:43:06 +04:00
struct lock_struct * locks = br_lck - > lock_data ;
2007-10-19 04:40:25 +04:00
bool overlap_found = False ;
2006-04-10 19:33:04 +04:00
/* No zero-zero locks for POSIX. */
if ( plock - > start = = 0 & & plock - > size = = 0 ) {
return False ;
}
2003-02-27 04:04:34 +03:00
2006-04-10 19:33:04 +04:00
/* Don't allow 64-bit lock wrap. */
if ( plock - > start + plock - > size < plock - > start | |
plock - > start + plock - > size < plock - > size ) {
DEBUG ( 10 , ( " brl_unlock_posix: lock wrap \n " ) ) ;
return False ;
}
2003-04-05 00:38:12 +04:00
2006-04-10 19:33:04 +04:00
/* The worst case scenario here is we have to split an
existing POSIX lock range into two , so we need at most
1 more entry . */
2003-04-05 00:38:12 +04:00
2013-09-10 23:04:47 +04:00
tp = talloc_array ( br_lck , struct lock_struct , br_lck - > num_locks + 1 ) ;
2006-04-10 19:33:04 +04:00
if ( ! tp ) {
DEBUG ( 10 , ( " brl_unlock_posix: malloc fail \n " ) ) ;
return False ;
}
2003-02-27 04:04:34 +03:00
2009-10-05 21:27:48 +04:00
count = 0 ;
2006-04-10 19:33:04 +04:00
for ( i = 0 ; i < br_lck - > num_locks ; i + + ) {
2006-07-15 04:34:08 +04:00
struct lock_struct * lock = & locks [ i ] ;
2006-04-10 19:33:04 +04:00
unsigned int tmp_count ;
2003-02-27 04:04:34 +03:00
2006-04-10 19:33:04 +04:00
/* Only remove our own locks - ignore fnum. */
2019-06-20 12:07:17 +03:00
if ( ! brl_same_context ( & lock - > context , & plock - > context ) ) {
2006-04-10 19:33:04 +04:00
memcpy ( & tp [ count ] , lock , sizeof ( struct lock_struct ) ) ;
count + + ;
continue ;
}
2003-02-27 04:04:34 +03:00
2009-10-05 21:27:48 +04:00
if ( lock - > lock_flav = = WINDOWS_LOCK ) {
/* Do any Windows flavour locks conflict ? */
if ( brl_conflict ( lock , plock ) ) {
2013-09-10 23:04:47 +04:00
TALLOC_FREE ( tp ) ;
2009-10-05 21:27:48 +04:00
return false ;
2006-04-10 19:33:04 +04:00
}
2009-10-05 21:27:48 +04:00
/* Just copy the Windows lock into the new array. */
memcpy ( & tp [ count ] , lock , sizeof ( struct lock_struct ) ) ;
2006-04-10 19:33:04 +04:00
count + + ;
continue ;
2009-10-05 21:27:48 +04:00
}
/* Work out overlaps. */
tmp_count = brlock_posix_split_merge ( & tp [ count ] , lock , plock ) ;
if ( tmp_count = = 0 ) {
/* plock overlapped the existing lock completely,
or replaced it . Don ' t copy the existing lock . */
overlap_found = true ;
} else if ( tmp_count = = 1 ) {
/* Either no overlap, (simple copy of existing lock) or
* an overlap of an existing lock . */
/* If the lock changed size, we had an overlap. */
if ( tp [ count ] . size ! = lock - > size ) {
overlap_found = true ;
}
count + = tmp_count ;
} else if ( tmp_count = = 2 ) {
/* We split a lock range in two. */
overlap_found = true ;
count + = tmp_count ;
2006-04-10 19:33:04 +04:00
/* Optimisation... */
/* We know we're finished here as we can't overlap any
more POSIX locks . Copy the rest of the lock array . */
2009-10-05 21:27:48 +04:00
2006-04-10 19:33:04 +04:00
if ( i < br_lck - > num_locks - 1 ) {
2009-10-05 21:27:48 +04:00
memcpy ( & tp [ count ] , & locks [ i + 1 ] ,
2006-04-10 19:33:04 +04:00
sizeof ( * locks ) * ( ( br_lck - > num_locks - 1 ) - i ) ) ;
count + = ( ( br_lck - > num_locks - 1 ) - i ) ;
}
break ;
}
2009-10-05 21:27:48 +04:00
2006-04-10 19:33:04 +04:00
}
2000-01-13 15:09:36 +03:00
2006-04-10 19:33:04 +04:00
if ( ! overlap_found ) {
/* Just ignore - no change. */
2013-09-10 23:04:47 +04:00
TALLOC_FREE ( tp ) ;
2006-04-10 19:33:04 +04:00
DEBUG ( 10 , ( " brl_unlock_posix: No overlap - unlocked. \n " ) ) ;
return True ;
}
/* Unlock any POSIX regions. */
2006-11-11 20:05:11 +03:00
if ( lp_posix_locking ( br_lck - > fsp - > conn - > params ) ) {
2006-07-11 22:01:26 +04:00
release_posix_lock_posix_flavour ( br_lck - > fsp ,
plock - > start ,
plock - > size ,
& plock - > context ,
tp ,
count ) ;
2006-04-10 19:33:04 +04:00
}
/* Realloc so we don't leak entries per unlock call. */
if ( count ) {
2013-09-10 23:04:47 +04:00
tp = talloc_realloc ( br_lck , tp , struct lock_struct , count ) ;
2006-04-10 19:33:04 +04:00
if ( ! tp ) {
DEBUG ( 10 , ( " brl_unlock_posix: realloc fail \n " ) ) ;
return False ;
2000-01-13 15:09:36 +03:00
}
2006-04-10 19:33:04 +04:00
} else {
/* We deleted the last lock. */
2013-09-10 23:04:47 +04:00
TALLOC_FREE ( tp ) ;
2006-04-10 19:33:04 +04:00
tp = NULL ;
2000-01-13 15:09:36 +03:00
}
2009-10-05 21:27:48 +04:00
contend_level2_oplocks_end ( br_lck - > fsp ,
LEVEL2_CONTEND_POSIX_BRL ) ;
2009-02-03 22:56:35 +03:00
2006-04-10 19:33:04 +04:00
br_lck - > num_locks = count ;
2013-09-10 23:04:47 +04:00
TALLOC_FREE ( br_lck - > lock_data ) ;
2006-08-08 13:56:38 +04:00
locks = tp ;
2007-05-06 00:43:06 +04:00
br_lck - > lock_data = tp ;
2006-04-10 19:33:04 +04:00
br_lck - > modified = True ;
2000-01-13 15:09:36 +03:00
2006-04-10 19:33:04 +04:00
return True ;
}
2001-06-30 05:59:48 +04:00
2000-01-13 15:09:36 +03:00
/****************************************************************************
2006-04-10 19:33:04 +04:00
Unlock a range of bytes .
2000-01-13 15:09:36 +03:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2000-04-26 00:30:58 +04:00
2019-07-01 16:28:10 +03:00
bool brl_unlock ( struct byte_range_lock * br_lck ,
2010-05-07 17:20:50 +04:00
uint64_t smblctx ,
2007-05-07 13:35:35 +04:00
struct server_id pid ,
2006-04-10 19:33:04 +04:00
br_off start ,
br_off size ,
enum brl_flavour lock_flav )
2000-01-13 15:09:36 +03:00
{
2006-04-10 19:33:04 +04:00
struct lock_struct lock ;
2000-01-13 15:09:36 +03:00
2010-05-07 17:20:50 +04:00
lock . context . smblctx = smblctx ;
2006-04-10 19:33:04 +04:00
lock . context . pid = pid ;
lock . context . tid = br_lck - > fsp - > conn - > cnum ;
lock . start = start ;
lock . size = size ;
lock . fnum = br_lck - > fsp - > fnum ;
lock . lock_type = UNLOCK_LOCK ;
lock . lock_flav = lock_flav ;
if ( lock_flav = = WINDOWS_LOCK ) {
2019-07-01 16:25:27 +03:00
return SMB_VFS_BRL_UNLOCK_WINDOWS (
br_lck - > fsp - > conn , br_lck , & lock ) ;
2006-04-10 19:33:04 +04:00
} else {
2019-07-01 16:18:58 +03:00
return brl_unlock_posix ( br_lck , & lock ) ;
2006-04-10 19:33:04 +04:00
}
}
2000-01-13 15:09:36 +03:00
2006-04-10 19:33:04 +04:00
/****************************************************************************
Test if we could add a lock if we wanted to .
Returns True if the region required is currently unlocked , False if locked .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2000-01-13 15:09:36 +03:00
2014-08-19 16:36:55 +04:00
bool brl_locktest ( struct byte_range_lock * br_lck ,
2014-07-11 17:35:45 +04:00
const struct lock_struct * rw_probe )
2006-04-10 19:33:04 +04:00
{
2007-10-19 04:40:25 +04:00
bool ret = True ;
2006-04-10 19:33:04 +04:00
unsigned int i ;
2014-08-19 16:36:55 +04:00
struct lock_struct * locks = br_lck - > lock_data ;
2006-04-10 19:33:04 +04:00
files_struct * fsp = br_lck - > fsp ;
2000-01-13 15:09:36 +03:00
2006-04-10 19:33:04 +04:00
/* Make sure existing locks don't conflict */
for ( i = 0 ; i < br_lck - > num_locks ; i + + ) {
/*
* Our own locks don ' t conflict .
*/
2014-07-11 17:35:45 +04:00
if ( brl_conflict_other ( & locks [ i ] , rw_probe ) ) {
2014-08-19 16:36:55 +04:00
if ( br_lck - > record = = NULL ) {
/* readonly */
return false ;
}
if ( ! serverid_exists ( & locks [ i ] . context . pid ) ) {
locks [ i ] . context . pid . pid = 0 ;
br_lck - > modified = true ;
continue ;
}
2006-04-10 19:33:04 +04:00
return False ;
2000-01-13 15:09:36 +03:00
}
}
2006-04-10 19:33:04 +04:00
/*
* There is no lock held by an SMB daemon , check to
* see if there is a POSIX lock from a UNIX or NFS process .
* This only conflicts with Windows locks , not POSIX locks .
*/
2014-07-11 17:35:45 +04:00
if ( lp_posix_locking ( fsp - > conn - > params ) & &
( rw_probe - > lock_flav = = WINDOWS_LOCK ) ) {
/*
* Make copies - - is_posix_locked might modify the values
*/
br_off start = rw_probe - > start ;
br_off size = rw_probe - > size ;
enum brl_type lock_type = rw_probe - > lock_type ;
2006-04-10 19:33:04 +04:00
ret = is_posix_locked ( fsp , & start , & size , & lock_type , WINDOWS_LOCK ) ;
2014-07-11 14:52:06 +04:00
DEBUG ( 10 , ( " brl_locktest: posix start=%ju len=%ju %s for %s "
" file %s \n " , ( uintmax_t ) start , ( uintmax_t ) size ,
ret ? " locked " : " unlocked " ,
fsp_fnum_dbg ( fsp ) , fsp_str_dbg ( fsp ) ) ) ;
2006-04-10 19:33:04 +04:00
/* We need to return the inverse of is_posix_locked. */
ret = ! ret ;
}
2000-01-13 15:09:36 +03:00
/* no conflicts - we could have added it */
2006-04-10 19:33:04 +04:00
return ret ;
}
2000-01-13 15:09:36 +03:00
2006-04-10 19:33:04 +04:00
/****************************************************************************
Query for existing locks .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
NTSTATUS brl_lockquery ( struct byte_range_lock * br_lck ,
2010-05-07 17:20:50 +04:00
uint64_t * psmblctx ,
2007-05-07 13:35:35 +04:00
struct server_id pid ,
2006-04-10 19:33:04 +04:00
br_off * pstart ,
2013-09-10 15:35:01 +04:00
br_off * psize ,
2006-04-10 19:33:04 +04:00
enum brl_type * plock_type ,
enum brl_flavour lock_flav )
{
unsigned int i ;
struct lock_struct lock ;
2007-05-06 00:43:06 +04:00
const struct lock_struct * locks = br_lck - > lock_data ;
2006-04-10 19:33:04 +04:00
files_struct * fsp = br_lck - > fsp ;
2010-05-07 17:20:50 +04:00
lock . context . smblctx = * psmblctx ;
2006-04-10 19:33:04 +04:00
lock . context . pid = pid ;
lock . context . tid = br_lck - > fsp - > conn - > cnum ;
lock . start = * pstart ;
lock . size = * psize ;
lock . fnum = fsp - > fnum ;
lock . lock_type = * plock_type ;
lock . lock_flav = lock_flav ;
/* Make sure existing locks don't conflict */
for ( i = 0 ; i < br_lck - > num_locks ; i + + ) {
2006-07-15 04:05:47 +04:00
const struct lock_struct * exlock = & locks [ i ] ;
2007-10-19 04:40:25 +04:00
bool conflict = False ;
2006-04-10 19:33:04 +04:00
if ( exlock - > lock_flav = = WINDOWS_LOCK ) {
conflict = brl_conflict ( exlock , & lock ) ;
2013-09-10 15:35:01 +04:00
} else {
2006-04-10 19:33:04 +04:00
conflict = brl_conflict_posix ( exlock , & lock ) ;
}
if ( conflict ) {
2010-05-07 17:20:50 +04:00
* psmblctx = exlock - > context . smblctx ;
2006-04-10 19:33:04 +04:00
* pstart = exlock - > start ;
* psize = exlock - > size ;
* plock_type = exlock - > lock_type ;
return NT_STATUS_LOCK_NOT_GRANTED ;
}
}
/*
* There is no lock held by an SMB daemon , check to
* see if there is a POSIX lock from a UNIX or NFS process .
*/
2006-11-11 20:05:11 +03:00
if ( lp_posix_locking ( fsp - > conn - > params ) ) {
2007-10-19 04:40:25 +04:00
bool ret = is_posix_locked ( fsp , pstart , psize , plock_type , POSIX_LOCK ) ;
2006-04-10 19:33:04 +04:00
2014-07-11 14:52:06 +04:00
DEBUG ( 10 , ( " brl_lockquery: posix start=%ju len=%ju %s for %s "
" file %s \n " , ( uintmax_t ) * pstart ,
( uintmax_t ) * psize , ret ? " locked " : " unlocked " ,
fsp_fnum_dbg ( fsp ) , fsp_str_dbg ( fsp ) ) ) ;
2006-04-10 19:33:04 +04:00
if ( ret ) {
2010-05-07 17:20:50 +04:00
/* Hmmm. No clue what to set smblctx to - use -1. */
* psmblctx = 0xFFFFFFFFFFFFFFFFLL ;
2006-04-10 19:33:04 +04:00
return NT_STATUS_LOCK_NOT_GRANTED ;
}
}
return NT_STATUS_OK ;
2000-01-13 15:09:36 +03:00
}
2000-01-14 07:32:57 +03:00
2009-07-24 04:28:58 +04:00
2006-04-10 19:33:04 +04:00
/****************************************************************************
Remove any locks associated with a open file .
2006-07-11 22:01:26 +04:00
We return True if this process owns any other Windows locks on this
fd and so we should not immediately close the fd .
2006-04-10 19:33:04 +04:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2019-07-01 16:31:04 +03:00
void brl_close_fnum ( struct byte_range_lock * br_lck )
2006-04-10 19:33:04 +04:00
{
files_struct * fsp = br_lck - > fsp ;
2012-06-06 17:28:14 +04:00
uint32_t tid = fsp - > conn - > cnum ;
2012-06-28 11:54:41 +04:00
uint64_t fnum = fsp - > fnum ;
2011-07-16 03:11:07 +04:00
unsigned int i ;
2007-05-06 00:43:06 +04:00
struct lock_struct * locks = br_lck - > lock_data ;
2011-12-15 14:50:43 +04:00
struct server_id pid = messaging_server_id ( fsp - > conn - > sconn - > msg_ctx ) ;
2011-07-16 03:11:07 +04:00
struct lock_struct * locks_copy ;
unsigned int num_locks_copy ;
/* Copy the current lock array. */
if ( br_lck - > num_locks ) {
locks_copy = ( struct lock_struct * ) talloc_memdup ( br_lck , locks , br_lck - > num_locks * sizeof ( struct lock_struct ) ) ;
if ( ! locks_copy ) {
smb_panic ( " brl_close_fnum: talloc failed " ) ;
2006-07-11 22:01:26 +04:00
}
2012-09-04 13:56:15 +04:00
} else {
2011-07-16 03:11:07 +04:00
locks_copy = NULL ;
2006-07-11 22:01:26 +04:00
}
2011-07-16 03:11:07 +04:00
num_locks_copy = br_lck - > num_locks ;
2006-07-11 22:01:26 +04:00
2011-07-16 03:11:07 +04:00
for ( i = 0 ; i < num_locks_copy ; i + + ) {
struct lock_struct * lock = & locks_copy [ i ] ;
2003-02-27 04:04:34 +03:00
2019-10-09 22:38:04 +03:00
if ( lock - > context . tid = = tid & &
server_id_equal ( & lock - > context . pid , & pid ) & &
2011-07-16 03:11:07 +04:00
( lock - > fnum = = fnum ) ) {
2019-07-01 16:28:10 +03:00
brl_unlock (
2011-07-16 03:11:07 +04:00
br_lck ,
lock - > context . smblctx ,
pid ,
lock - > start ,
lock - > size ,
lock - > lock_flav ) ;
2006-04-10 19:33:04 +04:00
}
2006-07-12 20:32:02 +04:00
}
2000-01-14 07:32:57 +03:00
}
2000-01-16 14:14:44 +03:00
2012-06-30 23:48:43 +04:00
bool brl_mark_disconnected ( struct files_struct * fsp )
{
uint32_t tid = fsp - > conn - > cnum ;
2014-05-01 21:58:51 +04:00
uint64_t smblctx ;
2012-06-30 23:48:43 +04:00
uint64_t fnum = fsp - > fnum ;
unsigned int i ;
struct server_id self = messaging_server_id ( fsp - > conn - > sconn - > msg_ctx ) ;
struct byte_range_lock * br_lck = NULL ;
2014-05-01 21:58:51 +04:00
if ( fsp - > op = = NULL ) {
return false ;
}
smblctx = fsp - > op - > global - > open_persistent_id ;
2012-06-30 23:48:43 +04:00
if ( ! fsp - > op - > global - > durable ) {
return false ;
}
if ( fsp - > current_lock_count = = 0 ) {
return true ;
}
br_lck = brl_get_locks ( talloc_tos ( ) , fsp ) ;
if ( br_lck = = NULL ) {
return false ;
}
for ( i = 0 ; i < br_lck - > num_locks ; i + + ) {
struct lock_struct * lock = & br_lck - > lock_data [ i ] ;
/*
* as this is a durable handle , we only expect locks
* of the current file handle !
*/
if ( lock - > context . smblctx ! = smblctx ) {
TALLOC_FREE ( br_lck ) ;
return false ;
}
if ( lock - > context . tid ! = tid ) {
TALLOC_FREE ( br_lck ) ;
return false ;
}
2019-10-09 22:38:04 +03:00
if ( ! server_id_equal ( & lock - > context . pid , & self ) ) {
2012-06-30 23:48:43 +04:00
TALLOC_FREE ( br_lck ) ;
return false ;
}
if ( lock - > fnum ! = fnum ) {
TALLOC_FREE ( br_lck ) ;
return false ;
}
server_id_set_disconnected ( & lock - > context . pid ) ;
lock - > context . tid = TID_FIELD_INVALID ;
lock - > fnum = FNUM_FIELD_INVALID ;
}
br_lck - > modified = true ;
TALLOC_FREE ( br_lck ) ;
return true ;
}
bool brl_reconnect_disconnected ( struct files_struct * fsp )
{
uint32_t tid = fsp - > conn - > cnum ;
2014-05-01 21:58:51 +04:00
uint64_t smblctx ;
2012-06-30 23:48:43 +04:00
uint64_t fnum = fsp - > fnum ;
unsigned int i ;
struct server_id self = messaging_server_id ( fsp - > conn - > sconn - > msg_ctx ) ;
struct byte_range_lock * br_lck = NULL ;
2014-05-01 21:58:51 +04:00
if ( fsp - > op = = NULL ) {
return false ;
}
smblctx = fsp - > op - > global - > open_persistent_id ;
2012-06-30 23:48:43 +04:00
if ( ! fsp - > op - > global - > durable ) {
return false ;
}
2013-04-12 13:13:57 +04:00
/*
* When reconnecting , we do not want to validate the brlock entries
* and thereby remove our own ( disconnected ) entries but reactivate
* them instead .
*/
2012-06-30 23:48:43 +04:00
br_lck = brl_get_locks ( talloc_tos ( ) , fsp ) ;
if ( br_lck = = NULL ) {
return false ;
}
if ( br_lck - > num_locks = = 0 ) {
TALLOC_FREE ( br_lck ) ;
return true ;
}
for ( i = 0 ; i < br_lck - > num_locks ; i + + ) {
struct lock_struct * lock = & br_lck - > lock_data [ i ] ;
/*
* as this is a durable handle we only expect locks
* of the current file handle !
*/
if ( lock - > context . smblctx ! = smblctx ) {
TALLOC_FREE ( br_lck ) ;
return false ;
}
if ( lock - > context . tid ! = TID_FIELD_INVALID ) {
TALLOC_FREE ( br_lck ) ;
return false ;
}
if ( ! server_id_is_disconnected ( & lock - > context . pid ) ) {
TALLOC_FREE ( br_lck ) ;
return false ;
}
if ( lock - > fnum ! = FNUM_FIELD_INVALID ) {
TALLOC_FREE ( br_lck ) ;
return false ;
}
lock - > context . pid = self ;
lock - > context . tid = tid ;
lock - > fnum = fnum ;
}
fsp - > current_lock_count = br_lck - > num_locks ;
br_lck - > modified = true ;
TALLOC_FREE ( br_lck ) ;
return true ;
}
2007-05-29 17:26:44 +04:00
struct brl_forall_cb {
void ( * fn ) ( struct file_id id , struct server_id pid ,
enum brl_type lock_type ,
enum brl_flavour lock_flav ,
br_off start , br_off size ,
void * private_data ) ;
void * private_data ;
} ;
2006-05-03 20:07:21 +04:00
/****************************************************************************
Traverse the whole database with this function , calling traverse_callback
on each lock .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2012-08-08 19:46:59 +04:00
static int brl_traverse_fn ( struct db_record * rec , void * state )
2006-05-03 20:07:21 +04:00
{
2007-05-29 17:26:44 +04:00
struct brl_forall_cb * cb = ( struct brl_forall_cb * ) state ;
2006-05-03 20:07:21 +04:00
struct lock_struct * locks ;
2007-05-29 13:30:34 +04:00
struct file_id * key ;
2006-05-03 20:07:21 +04:00
unsigned int i ;
unsigned int num_locks = 0 ;
2011-08-17 12:53:58 +04:00
TDB_DATA dbkey ;
TDB_DATA value ;
dbkey = dbwrap_record_get_key ( rec ) ;
value = dbwrap_record_get_value ( rec ) ;
2006-05-03 20:07:21 +04:00
/* In a traverse function we must make a copy of
dbuf before modifying it . */
2013-09-10 23:04:47 +04:00
locks = ( struct lock_struct * ) talloc_memdup (
talloc_tos ( ) , value . dptr , value . dsize ) ;
2006-05-03 20:07:21 +04:00
if ( ! locks ) {
return - 1 ; /* Terminate traversal. */
}
2011-08-17 12:53:58 +04:00
key = ( struct file_id * ) dbkey . dptr ;
2014-08-20 13:07:14 +04:00
num_locks = value . dsize / sizeof ( * locks ) ;
2006-05-03 06:14:09 +04:00
2008-01-16 12:09:48 +03:00
if ( cb - > fn ) {
for ( i = 0 ; i < num_locks ; i + + ) {
cb - > fn ( * key ,
locks [ i ] . context . pid ,
locks [ i ] . lock_type ,
locks [ i ] . lock_flav ,
locks [ i ] . start ,
locks [ i ] . size ,
cb - > private_data ) ;
}
2000-01-16 14:14:44 +03:00
}
2006-05-03 20:07:21 +04:00
2013-09-10 23:04:47 +04:00
TALLOC_FREE ( locks ) ;
2000-01-16 14:14:44 +03:00
return 0 ;
}
/*******************************************************************
2000-04-26 00:30:58 +04:00
Call the specified function on each lock in the database .
2000-01-16 14:14:44 +03:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2000-04-26 00:30:58 +04:00
2007-05-29 17:26:44 +04:00
int brl_forall ( void ( * fn ) ( struct file_id id , struct server_id pid ,
enum brl_type lock_type ,
enum brl_flavour lock_flav ,
br_off start , br_off size ,
void * private_data ) ,
void * private_data )
2000-01-16 14:14:44 +03:00
{
2007-05-29 17:26:44 +04:00
struct brl_forall_cb cb ;
2011-08-17 12:53:58 +04:00
NTSTATUS status ;
int count = 0 ;
2007-05-29 17:26:44 +04:00
2007-05-27 14:35:14 +04:00
if ( ! brlock_db ) {
2006-04-10 19:33:04 +04:00
return 0 ;
}
2007-05-29 17:26:44 +04:00
cb . fn = fn ;
cb . private_data = private_data ;
2012-08-08 19:46:59 +04:00
status = dbwrap_traverse ( brlock_db , brl_traverse_fn , & cb , & count ) ;
2011-08-17 12:53:58 +04:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
return - 1 ;
} else {
return count ;
}
2000-01-16 14:14:44 +03:00
}
2006-04-10 19:33:04 +04:00
/*******************************************************************
Store a potentially modified set of byte range lock data back into
the database .
Unlock the record .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2011-03-17 12:04:12 +03:00
static void byte_range_lock_flush ( struct byte_range_lock * br_lck )
2006-04-10 19:33:04 +04:00
{
2014-08-19 16:36:55 +04:00
unsigned i ;
struct lock_struct * locks = br_lck - > lock_data ;
2006-04-10 19:33:04 +04:00
if ( ! br_lck - > modified ) {
2013-09-13 16:13:51 +04:00
DEBUG ( 10 , ( " br_lck not modified \n " ) ) ;
2006-04-10 19:33:04 +04:00
goto done ;
}
2014-08-19 16:36:55 +04:00
i = 0 ;
while ( i < br_lck - > num_locks ) {
if ( locks [ i ] . context . pid . pid = = 0 ) {
/*
* Autocleanup , the process conflicted and does not
* exist anymore .
*/
locks [ i ] = locks [ br_lck - > num_locks - 1 ] ;
br_lck - > num_locks - = 1 ;
} else {
i + = 1 ;
}
}
2019-06-30 20:51:13 +03:00
if ( br_lck - > num_locks = = 0 ) {
2006-04-10 19:33:04 +04:00
/* No locks - delete this entry. */
2011-08-17 12:53:58 +04:00
NTSTATUS status = dbwrap_record_delete ( br_lck - > record ) ;
2007-05-27 14:35:14 +04:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
DEBUG ( 0 , ( " delete_rec returned %s \n " ,
nt_errstr ( status ) ) ) ;
2007-06-16 01:58:49 +04:00
smb_panic ( " Could not delete byte range lock entry " ) ;
2006-04-10 19:33:04 +04:00
}
} else {
2019-06-30 20:51:13 +03:00
TDB_DATA data = {
. dsize = br_lck - > num_locks * sizeof ( struct lock_struct ) ,
. dptr = ( uint8_t * ) br_lck - > lock_data ,
} ;
2007-05-27 14:35:14 +04:00
NTSTATUS status ;
2011-08-17 12:53:58 +04:00
status = dbwrap_record_store ( br_lck - > record , data , TDB_REPLACE ) ;
2007-05-27 14:35:14 +04:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
DEBUG ( 0 , ( " store returned %s \n " , nt_errstr ( status ) ) ) ;
2007-06-16 01:58:49 +04:00
smb_panic ( " Could not store byte range mode entry " ) ;
2006-04-10 19:33:04 +04:00
}
}
2013-09-13 16:13:51 +04:00
DEBUG ( 10 , ( " seqnum=%d \n " , dbwrap_get_seqnum ( brlock_db ) ) ) ;
2006-04-10 19:33:04 +04:00
done :
2011-03-17 12:04:12 +03:00
br_lck - > modified = false ;
2007-05-27 14:35:14 +04:00
TALLOC_FREE ( br_lck - > record ) ;
2011-03-17 12:04:12 +03:00
}
static int byte_range_lock_destructor ( struct byte_range_lock * br_lck )
{
byte_range_lock_flush ( br_lck ) ;
2006-04-10 19:33:04 +04:00
return 0 ;
}
2014-10-29 01:27:09 +03:00
static bool brl_parse_data ( struct byte_range_lock * br_lck , TDB_DATA data )
{
size_t data_len ;
if ( data . dsize = = 0 ) {
return true ;
}
2019-06-30 20:51:13 +03:00
if ( data . dsize % sizeof ( struct lock_struct ) ! = 0 ) {
2014-10-29 01:27:09 +03:00
DEBUG ( 1 , ( " Invalid data size: %u \n " , ( unsigned ) data . dsize ) ) ;
return false ;
}
br_lck - > num_locks = data . dsize / sizeof ( struct lock_struct ) ;
data_len = br_lck - > num_locks * sizeof ( struct lock_struct ) ;
br_lck - > lock_data = talloc_memdup ( br_lck , data . dptr , data_len ) ;
if ( br_lck - > lock_data = = NULL ) {
DEBUG ( 1 , ( " talloc_memdup failed \n " ) ) ;
return false ;
}
return true ;
}
2006-04-10 19:33:04 +04:00
/*******************************************************************
Fetch a set of byte range lock data from the database .
Leave the record locked .
2006-07-11 22:01:26 +04:00
TALLOC_FREE ( brl ) will release the lock in the destructor .
2006-04-10 19:33:04 +04:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2013-09-11 15:53:26 +04:00
struct byte_range_lock * brl_get_locks ( TALLOC_CTX * mem_ctx , files_struct * fsp )
2006-04-10 19:33:04 +04:00
{
2007-05-27 14:35:14 +04:00
TDB_DATA key , data ;
2014-10-29 01:27:09 +03:00
struct byte_range_lock * br_lck ;
2006-04-10 19:33:04 +04:00
2014-10-29 01:27:09 +03:00
br_lck = talloc_zero ( mem_ctx , struct byte_range_lock ) ;
2006-04-10 19:33:04 +04:00
if ( br_lck = = NULL ) {
return NULL ;
}
br_lck - > fsp = fsp ;
2006-04-13 03:00:58 +04:00
2015-05-10 02:14:39 +03:00
key . dptr = ( uint8_t * ) & fsp - > file_id ;
2007-05-29 13:30:34 +04:00
key . dsize = sizeof ( struct file_id ) ;
2006-04-10 19:33:04 +04:00
2013-09-11 15:51:44 +04:00
br_lck - > record = dbwrap_fetch_locked ( brlock_db , br_lck , key ) ;
2007-05-27 14:35:14 +04:00
2013-09-11 15:51:44 +04:00
if ( br_lck - > record = = NULL ) {
DEBUG ( 3 , ( " Could not lock byte range lock entry \n " ) ) ;
TALLOC_FREE ( br_lck ) ;
return NULL ;
2006-04-10 19:33:04 +04:00
}
2013-09-11 15:51:44 +04:00
data = dbwrap_record_get_value ( br_lck - > record ) ;
2014-10-29 01:27:09 +03:00
if ( ! brl_parse_data ( br_lck , data ) ) {
TALLOC_FREE ( br_lck ) ;
return NULL ;
2013-09-11 16:48:14 +04:00
}
2014-10-29 01:27:09 +03:00
talloc_set_destructor ( br_lck , byte_range_lock_destructor ) ;
2011-05-28 12:24:20 +04:00
2006-04-10 19:33:04 +04:00
if ( DEBUGLEVEL > = 10 ) {
unsigned int i ;
2019-09-03 17:18:00 +03:00
struct file_id_buf buf ;
2007-05-06 00:43:06 +04:00
struct lock_struct * locks = br_lck - > lock_data ;
2019-09-03 17:18:00 +03:00
DBG_DEBUG ( " %u current locks on file_id %s \n " ,
br_lck - > num_locks ,
file_id_str_buf ( fsp - > file_id , & buf ) ) ;
2006-04-10 19:33:04 +04:00
for ( i = 0 ; i < br_lck - > num_locks ; i + + ) {
print_lock_struct ( i , & locks [ i ] ) ;
}
}
2011-03-17 12:04:12 +03:00
2006-04-10 19:33:04 +04:00
return br_lck ;
}
2006-08-01 00:58:02 +04:00
2019-08-08 20:26:28 +03:00
struct byte_range_lock * brl_get_locks_for_locking ( TALLOC_CTX * mem_ctx ,
files_struct * fsp ,
TALLOC_CTX * req_mem_ctx ,
const struct GUID * req_guid )
{
struct byte_range_lock * br_lck = NULL ;
br_lck = brl_get_locks ( mem_ctx , fsp ) ;
if ( br_lck = = NULL ) {
return NULL ;
}
SMB_ASSERT ( req_mem_ctx ! = NULL ) ;
br_lck - > req_mem_ctx = req_mem_ctx ;
SMB_ASSERT ( req_guid ! = NULL ) ;
br_lck - > req_guid = req_guid ;
return br_lck ;
}
2013-09-11 15:36:54 +04:00
struct brl_get_locks_readonly_state {
TALLOC_CTX * mem_ctx ;
struct byte_range_lock * * br_lock ;
} ;
static void brl_get_locks_readonly_parser ( TDB_DATA key , TDB_DATA data ,
void * private_data )
2006-08-01 00:58:02 +04:00
{
2013-09-11 15:36:54 +04:00
struct brl_get_locks_readonly_state * state =
( struct brl_get_locks_readonly_state * ) private_data ;
2014-10-29 01:27:09 +03:00
struct byte_range_lock * br_lck ;
2009-11-16 11:40:47 +03:00
2014-10-29 01:27:09 +03:00
br_lck = talloc_pooled_object (
2013-09-11 15:36:54 +04:00
state - > mem_ctx , struct byte_range_lock , 1 , data . dsize ) ;
2014-10-29 01:27:09 +03:00
if ( br_lck = = NULL ) {
2013-09-11 15:36:54 +04:00
* state - > br_lock = NULL ;
return ;
}
2015-03-12 01:54:55 +03:00
* br_lck = ( struct byte_range_lock ) { 0 } ;
2014-10-29 01:27:09 +03:00
if ( ! brl_parse_data ( br_lck , data ) ) {
* state - > br_lock = NULL ;
return ;
2013-09-11 16:48:14 +04:00
}
2014-10-29 01:27:09 +03:00
* state - > br_lock = br_lck ;
2013-09-11 15:36:54 +04:00
}
struct byte_range_lock * brl_get_locks_readonly ( files_struct * fsp )
{
struct byte_range_lock * br_lock = NULL ;
2014-10-29 19:29:06 +03:00
struct brl_get_locks_readonly_state state ;
NTSTATUS status ;
2013-09-11 15:36:54 +04:00
2013-09-13 16:13:51 +04:00
DEBUG ( 10 , ( " seqnum=%d, fsp->brlock_seqnum=%d \n " ,
dbwrap_get_seqnum ( brlock_db ) , fsp - > brlock_seqnum ) ) ;
2009-11-16 11:40:47 +03:00
if ( ( fsp - > brlock_rec ! = NULL )
2011-08-17 12:53:58 +04:00
& & ( dbwrap_get_seqnum ( brlock_db ) = = fsp - > brlock_seqnum ) ) {
2013-09-11 15:36:54 +04:00
/*
* We have cached the brlock_rec and the database did not
* change .
*/
2009-11-16 11:40:47 +03:00
return fsp - > brlock_rec ;
}
2014-10-29 19:29:06 +03:00
/*
* Parse the record fresh from the database
*/
state . mem_ctx = fsp ;
state . br_lock = & br_lock ;
2009-11-16 11:40:47 +03:00
2014-10-29 19:29:06 +03:00
status = dbwrap_parse_record (
brlock_db ,
make_tdb_data ( ( uint8_t * ) & fsp - > file_id ,
sizeof ( fsp - > file_id ) ) ,
brl_get_locks_readonly_parser , & state ) ;
if ( NT_STATUS_EQUAL ( status , NT_STATUS_NOT_FOUND ) ) {
2013-09-11 15:36:54 +04:00
/*
2014-10-29 19:29:06 +03:00
* No locks on this file . Return an empty br_lock .
2013-09-11 15:36:54 +04:00
*/
2019-08-08 20:26:28 +03:00
br_lock = talloc_zero ( fsp , struct byte_range_lock ) ;
2013-09-11 15:36:54 +04:00
if ( br_lock = = NULL ) {
2014-10-29 19:29:06 +03:00
return NULL ;
2013-09-11 15:36:54 +04:00
}
2014-10-29 19:29:06 +03:00
} else if ( ! NT_STATUS_IS_OK ( status ) ) {
DEBUG ( 3 , ( " Could not parse byte range lock record: "
" %s \n " , nt_errstr ( status ) ) ) ;
return NULL ;
}
if ( br_lock = = NULL ) {
return NULL ;
2009-11-16 11:40:47 +03:00
}
2013-09-11 15:36:54 +04:00
br_lock - > fsp = fsp ;
br_lock - > modified = false ;
br_lock - > record = NULL ;
2019-05-26 09:20:47 +03:00
/*
* Cache the brlock struct , invalidated when the dbwrap_seqnum
* changes . See beginning of this routine .
*/
TALLOC_FREE ( fsp - > brlock_rec ) ;
fsp - > brlock_rec = br_lock ;
fsp - > brlock_seqnum = dbwrap_get_seqnum ( brlock_db ) ;
2009-11-16 11:40:47 +03:00
2013-09-11 15:36:54 +04:00
return br_lock ;
2006-08-01 00:58:02 +04:00
}
2007-05-29 18:49:19 +04:00
2013-03-13 17:47:18 +04:00
bool brl_cleanup_disconnected ( struct file_id fid , uint64_t open_persistent_id )
{
bool ret = false ;
TALLOC_CTX * frame = talloc_stackframe ( ) ;
TDB_DATA key , val ;
struct db_record * rec ;
struct lock_struct * lock ;
unsigned n , num ;
2019-09-03 17:26:09 +03:00
struct file_id_buf buf ;
2013-03-13 17:47:18 +04:00
NTSTATUS status ;
key = make_tdb_data ( ( void * ) & fid , sizeof ( fid ) ) ;
rec = dbwrap_fetch_locked ( brlock_db , frame , key ) ;
if ( rec = = NULL ) {
2019-09-03 17:26:09 +03:00
DBG_INFO ( " failed to fetch record for file %s \n " ,
file_id_str_buf ( fid , & buf ) ) ;
2013-03-13 17:47:18 +04:00
goto done ;
}
val = dbwrap_record_get_value ( rec ) ;
lock = ( struct lock_struct * ) val . dptr ;
num = val . dsize / sizeof ( struct lock_struct ) ;
if ( lock = = NULL ) {
2019-09-03 17:26:09 +03:00
DBG_DEBUG ( " no byte range locks for file %s \n " ,
file_id_str_buf ( fid , & buf ) ) ;
2013-03-13 17:47:18 +04:00
ret = true ;
goto done ;
}
for ( n = 0 ; n < num ; n + + ) {
struct lock_context * ctx = & lock [ n ] . context ;
if ( ! server_id_is_disconnected ( & ctx - > pid ) ) {
2015-04-28 14:30:58 +03:00
struct server_id_buf tmp ;
2019-09-03 17:26:09 +03:00
DBG_INFO ( " byte range lock "
" %s used by server %s, do not cleanup \n " ,
file_id_str_buf ( fid , & buf ) ,
server_id_str_buf ( ctx - > pid , & tmp ) ) ;
2013-03-13 17:47:18 +04:00
goto done ;
}
if ( ctx - > smblctx ! = open_persistent_id ) {
2019-09-03 17:26:09 +03:00
DBG_INFO ( " byte range lock %s expected smblctx % " PRIu64 " "
" but found % " PRIu64 " , do not cleanup \n " ,
file_id_str_buf ( fid , & buf ) ,
open_persistent_id ,
ctx - > smblctx ) ;
2013-03-13 17:47:18 +04:00
goto done ;
}
}
status = dbwrap_record_delete ( rec ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
2019-09-03 17:26:09 +03:00
DBG_INFO ( " failed to delete record "
" for file %s from %s, open % " PRIu64 " : %s \n " ,
file_id_str_buf ( fid , & buf ) ,
dbwrap_name ( brlock_db ) ,
open_persistent_id ,
nt_errstr ( status ) ) ;
2013-03-13 17:47:18 +04:00
goto done ;
}
2019-09-03 17:26:09 +03:00
DBG_DEBUG ( " file %s cleaned up %u entries from open % " PRIu64 " \n " ,
file_id_str_buf ( fid , & buf ) ,
num ,
open_persistent_id ) ;
2013-03-13 17:47:18 +04:00
ret = true ;
done :
talloc_free ( frame ) ;
return ret ;
}