2000-01-13 15:09:36 +03:00
/*
2002-01-30 09:08:46 +03:00
Unix SMB / CIFS implementation .
2000-01-13 15:09:36 +03:00
byte range locking code
2000-04-26 00:30:58 +04:00
Updated to handle range splits / merges .
Copyright ( C ) Andrew Tridgell 1992 - 2000
Copyright ( C ) Jeremy Allison 1992 - 2000
2011-05-28 12:24:20 +04:00
2000-01-13 15:09:36 +03:00
This program is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
2007-07-09 23:25:36 +04:00
the Free Software Foundation ; either version 3 of the License , or
2000-01-13 15:09:36 +03:00
( at your option ) any later version .
2011-05-28 12:24:20 +04:00
2000-01-13 15:09:36 +03:00
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
2011-05-28 12:24:20 +04:00
2000-01-13 15:09:36 +03:00
You should have received a copy of the GNU General Public License
2007-07-10 04:52:41 +04:00
along with this program . If not , see < http : //www.gnu.org/licenses/>.
2000-01-13 15:09:36 +03:00
*/
2000-04-26 00:30:58 +04:00
/* This module implements a tdb based byte range locking service,
2000-01-13 15:09:36 +03:00
replacing the fcntl ( ) based byte range locking previously
used . This allows us to provide the same semantics as NT */
# include "includes.h"
2011-02-26 01:20:06 +03:00
# include "system/filesys.h"
2011-03-23 14:43:17 +03:00
# include "locking/proto.h"
2010-07-04 22:20:44 +04:00
# include "smbd/globals.h"
2011-07-07 19:42:08 +04:00
# include "dbwrap/dbwrap.h"
2011-07-06 18:40:21 +04:00
# include "dbwrap/dbwrap_open.h"
2011-02-25 01:05:57 +03:00
# include "serverid.h"
2011-03-24 17:31:06 +03:00
# include "messages.h"
2000-01-13 15:09:36 +03:00
2005-04-27 22:32:37 +04:00
# undef DBGC_CLASS
# define DBGC_CLASS DBGC_LOCKING
2001-08-27 12:19:43 +04:00
# define ZERO_ZERO 0
2000-04-26 00:30:58 +04:00
/* The open brlock.tdb database. */
2000-01-13 15:09:36 +03:00
2007-05-27 14:35:14 +04:00
static struct db_context * brlock_db ;
2000-01-13 15:09:36 +03:00
2006-04-10 19:33:04 +04:00
/****************************************************************************
Debug info at level 10 for lock struct .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2012-06-28 11:54:14 +04:00
static void print_lock_struct ( unsigned int i , const struct lock_struct * pls )
2006-04-10 19:33:04 +04:00
{
2010-05-07 17:20:50 +04:00
DEBUG ( 10 , ( " [%u]: smblctx = %llu, tid = %u, pid = %s, " ,
2006-04-10 19:33:04 +04:00
i ,
2010-05-07 17:20:50 +04:00
( unsigned long long ) pls - > context . smblctx ,
2006-04-10 19:33:04 +04:00
( unsigned int ) pls - > context . tid ,
2011-06-08 08:05:55 +04:00
server_id_str ( talloc_tos ( ) , & pls - > context . pid ) ) ) ;
2011-05-28 12:24:20 +04:00
2012-06-06 17:42:48 +04:00
DEBUG ( 10 , ( " start = %.0f, size = %.0f, fnum = %llu, %s %s \n " ,
2006-04-10 19:33:04 +04:00
( double ) pls - > start ,
( double ) pls - > size ,
2012-06-06 17:42:48 +04:00
( unsigned long long ) pls - > fnum ,
2006-04-10 19:33:04 +04:00
lock_type_name ( pls - > lock_type ) ,
lock_flav_name ( pls - > lock_flav ) ) ) ;
}
2000-01-13 15:09:36 +03:00
/****************************************************************************
2000-04-26 00:30:58 +04:00
See if two locking contexts are equal .
2000-01-13 15:09:36 +03:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2000-04-26 00:30:58 +04:00
2007-10-19 04:40:25 +04:00
bool brl_same_context ( const struct lock_context * ctx1 ,
2006-04-10 19:33:04 +04:00
const struct lock_context * ctx2 )
2000-01-13 15:09:36 +03:00
{
2012-06-16 02:26:26 +04:00
return ( serverid_equal ( & ctx1 - > pid , & ctx2 - > pid ) & &
2010-05-07 17:20:50 +04:00
( ctx1 - > smblctx = = ctx2 - > smblctx ) & &
2005-09-30 21:13:37 +04:00
( ctx1 - > tid = = ctx2 - > tid ) ) ;
2000-01-13 15:09:36 +03:00
}
2004-10-19 02:01:10 +04:00
/****************************************************************************
See if lck1 and lck2 overlap .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2007-10-19 04:40:25 +04:00
static bool brl_overlap ( const struct lock_struct * lck1 ,
2006-04-10 19:33:04 +04:00
const struct lock_struct * lck2 )
2004-10-19 02:01:10 +04:00
{
2009-02-21 02:22:15 +03:00
/* XXX Remove for Win7 compatibility. */
2013-02-18 13:02:51 +04:00
/* this extra check is not redundant - it copes with locks
2004-10-19 02:01:10 +04:00
that go beyond the end of 64 bit file space */
if ( lck1 - > size ! = 0 & &
lck1 - > start = = lck2 - > start & &
lck1 - > size = = lck2 - > size ) {
return True ;
}
if ( lck1 - > start > = ( lck2 - > start + lck2 - > size ) | |
lck2 - > start > = ( lck1 - > start + lck1 - > size ) ) {
return False ;
}
return True ;
}
2000-01-13 15:09:36 +03:00
/****************************************************************************
2000-04-26 00:30:58 +04:00
See if lock2 can be added when lock1 is in place .
2000-01-13 15:09:36 +03:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2000-04-26 00:30:58 +04:00
2007-10-19 04:40:25 +04:00
static bool brl_conflict ( const struct lock_struct * lck1 ,
2006-04-10 19:33:04 +04:00
const struct lock_struct * lck2 )
2001-08-25 01:09:38 +04:00
{
2006-04-10 19:33:04 +04:00
/* Ignore PENDING locks. */
2006-07-29 23:14:24 +04:00
if ( IS_PENDING_LOCK ( lck1 - > lock_type ) | | IS_PENDING_LOCK ( lck2 - > lock_type ) )
2003-02-27 04:04:34 +03:00
return False ;
2006-04-10 19:33:04 +04:00
/* Read locks never conflict. */
2001-08-25 01:09:38 +04:00
if ( lck1 - > lock_type = = READ_LOCK & & lck2 - > lock_type = = READ_LOCK ) {
return False ;
}
2009-02-21 02:22:15 +03:00
/* A READ lock can stack on top of a WRITE lock if they have the same
* context & fnum . */
if ( lck1 - > lock_type = = WRITE_LOCK & & lck2 - > lock_type = = READ_LOCK & &
brl_same_context ( & lck1 - > context , & lck2 - > context ) & &
lck1 - > fnum = = lck2 - > fnum ) {
2001-08-25 01:09:38 +04:00
return False ;
}
2004-10-19 02:01:10 +04:00
return brl_overlap ( lck1 , lck2 ) ;
2001-08-25 01:09:38 +04:00
}
2006-04-10 19:33:04 +04:00
/****************************************************************************
See if lock2 can be added when lock1 is in place - when both locks are POSIX
flavour . POSIX locks ignore fnum - they only care about dev / ino which we
know already match .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2007-10-19 04:40:25 +04:00
static bool brl_conflict_posix ( const struct lock_struct * lck1 ,
2006-04-10 19:33:04 +04:00
const struct lock_struct * lck2 )
{
# if defined(DEVELOPER)
SMB_ASSERT ( lck1 - > lock_flav = = POSIX_LOCK ) ;
SMB_ASSERT ( lck2 - > lock_flav = = POSIX_LOCK ) ;
# endif
/* Ignore PENDING locks. */
2006-07-29 23:14:24 +04:00
if ( IS_PENDING_LOCK ( lck1 - > lock_type ) | | IS_PENDING_LOCK ( lck2 - > lock_type ) )
2006-04-10 19:33:04 +04:00
return False ;
/* Read locks never conflict. */
if ( lck1 - > lock_type = = READ_LOCK & & lck2 - > lock_type = = READ_LOCK ) {
return False ;
}
/* Locks on the same context con't conflict. Ignore fnum. */
if ( brl_same_context ( & lck1 - > context , & lck2 - > context ) ) {
return False ;
}
/* One is read, the other write, or the context is different,
do they overlap ? */
return brl_overlap ( lck1 , lck2 ) ;
}
2001-08-27 12:19:43 +04:00
# if ZERO_ZERO
2007-10-19 04:40:25 +04:00
static bool brl_conflict1 ( const struct lock_struct * lck1 ,
2006-04-10 19:33:04 +04:00
const struct lock_struct * lck2 )
2001-08-27 12:19:43 +04:00
{
2006-07-29 23:14:24 +04:00
if ( IS_PENDING_LOCK ( lck1 - > lock_type ) | | IS_PENDING_LOCK ( lck2 - > lock_type ) )
2003-02-27 04:04:34 +03:00
return False ;
2001-08-27 12:19:43 +04:00
if ( lck1 - > lock_type = = READ_LOCK & & lck2 - > lock_type = = READ_LOCK ) {
return False ;
}
if ( brl_same_context ( & lck1 - > context , & lck2 - > context ) & &
lck2 - > lock_type = = READ_LOCK & & lck1 - > fnum = = lck2 - > fnum ) {
return False ;
}
if ( lck2 - > start = = 0 & & lck2 - > size = = 0 & & lck1 - > size ! = 0 ) {
return True ;
}
if ( lck1 - > start > = ( lck2 - > start + lck2 - > size ) | |
lck2 - > start > = ( lck1 - > start + lck1 - > size ) ) {
return False ;
}
2011-05-28 12:24:20 +04:00
2001-08-27 12:19:43 +04:00
return True ;
}
# endif
2001-08-25 01:09:38 +04:00
/****************************************************************************
Check to see if this lock conflicts , but ignore our own locks on the
2006-04-10 19:33:04 +04:00
same fnum only . This is the read / write lock check code path .
This is never used in the POSIX lock case .
2001-08-25 01:09:38 +04:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2007-10-19 04:40:25 +04:00
static bool brl_conflict_other ( const struct lock_struct * lck1 , const struct lock_struct * lck2 )
2000-01-13 15:09:36 +03:00
{
2006-07-29 23:14:24 +04:00
if ( IS_PENDING_LOCK ( lck1 - > lock_type ) | | IS_PENDING_LOCK ( lck2 - > lock_type ) )
2003-02-27 04:04:34 +03:00
return False ;
2000-01-14 11:01:44 +03:00
if ( lck1 - > lock_type = = READ_LOCK & & lck2 - > lock_type = = READ_LOCK )
return False ;
2000-01-13 15:09:36 +03:00
2006-04-10 19:33:04 +04:00
/* POSIX flavour locks never conflict here - this is only called
in the read / write path . */
if ( lck1 - > lock_flav = = POSIX_LOCK & & lck2 - > lock_flav = = POSIX_LOCK )
return False ;
2003-02-22 04:09:57 +03:00
/*
* Incoming WRITE locks conflict with existing READ locks even
* if the context is the same . JRA . See LOCKTEST7 in smbtorture .
*/
if ( ! ( lck2 - > lock_type = = WRITE_LOCK & & lck1 - > lock_type = = READ_LOCK ) ) {
if ( brl_same_context ( & lck1 - > context , & lck2 - > context ) & &
lck1 - > fnum = = lck2 - > fnum )
return False ;
}
2000-01-13 15:09:36 +03:00
2004-10-19 02:01:10 +04:00
return brl_overlap ( lck1 , lck2 ) ;
2000-01-13 15:09:36 +03:00
}
2006-07-29 23:14:24 +04:00
/****************************************************************************
Check if an unlock overlaps a pending lock .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2007-10-19 04:40:25 +04:00
static bool brl_pending_overlap ( const struct lock_struct * lock , const struct lock_struct * pend_lock )
2006-07-29 23:14:24 +04:00
{
if ( ( lock - > start < = pend_lock - > start ) & & ( lock - > start + lock - > size > pend_lock - > start ) )
return True ;
if ( ( lock - > start > = pend_lock - > start ) & & ( lock - > start < = pend_lock - > start + pend_lock - > size ) )
return True ;
return False ;
}
2004-10-19 02:01:10 +04:00
/****************************************************************************
2006-07-18 01:09:02 +04:00
Amazingly enough , w2k3 " remembers " whether the last lock failure on a fnum
2004-10-19 02:01:10 +04:00
is the same as this one and changes its error code . I wonder if any
app depends on this ?
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2009-02-10 08:51:29 +03:00
NTSTATUS brl_lock_failed ( files_struct * fsp , const struct lock_struct * lock , bool blocking_lock )
2004-10-19 02:01:10 +04:00
{
2006-07-18 01:09:02 +04:00
if ( lock - > start > = 0xEF000000 & & ( lock - > start > > 63 ) = = 0 ) {
2004-10-19 02:01:10 +04:00
/* amazing the little things you learn with a test
suite . Locks beyond this offset ( as a 64 bit
number ! ) always generate the conflict error code ,
unless the top bit is set */
2006-07-18 05:05:51 +04:00
if ( ! blocking_lock ) {
2006-07-18 01:09:02 +04:00
fsp - > last_lock_failure = * lock ;
}
return NT_STATUS_FILE_LOCK_CONFLICT ;
}
2012-06-16 02:26:26 +04:00
if ( serverid_equal ( & lock - > context . pid , & fsp - > last_lock_failure . context . pid ) & &
2006-07-18 01:09:02 +04:00
lock - > context . tid = = fsp - > last_lock_failure . context . tid & &
lock - > fnum = = fsp - > last_lock_failure . fnum & &
lock - > start = = fsp - > last_lock_failure . start ) {
2004-10-19 02:01:10 +04:00
return NT_STATUS_FILE_LOCK_CONFLICT ;
}
2006-07-18 01:09:02 +04:00
2006-07-18 05:05:51 +04:00
if ( ! blocking_lock ) {
2006-07-18 01:09:02 +04:00
fsp - > last_lock_failure = * lock ;
}
2004-10-19 02:01:10 +04:00
return NT_STATUS_LOCK_NOT_GRANTED ;
}
2000-01-13 15:09:36 +03:00
/****************************************************************************
2006-04-10 19:33:04 +04:00
Open up the brlock . tdb database .
2000-01-13 15:09:36 +03:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2001-05-23 00:35:48 +04:00
2007-12-05 22:53:22 +03:00
void brl_init ( bool read_only )
2000-05-03 18:29:05 +04:00
{
2009-11-16 11:40:47 +03:00
int tdb_flags ;
2007-05-27 14:35:14 +04:00
if ( brlock_db ) {
2006-04-10 19:33:04 +04:00
return ;
}
2009-11-16 11:40:47 +03:00
2010-09-27 16:46:07 +04:00
tdb_flags = TDB_DEFAULT | TDB_VOLATILE | TDB_CLEAR_IF_FIRST | TDB_INCOMPATIBLE_HASH ;
2009-11-16 11:40:47 +03:00
if ( ! lp_clustering ( ) ) {
/*
* We can ' t use the SEQNUM trick to cache brlock
* entries in the clustering case because ctdb seqnum
* propagation has a delay .
*/
tdb_flags | = TDB_SEQNUM ;
}
2008-01-16 12:09:48 +03:00
brlock_db = db_open ( NULL , lock_path ( " brlock.tdb " ) ,
2009-11-16 11:40:47 +03:00
lp_open_files_db_hash_size ( ) , tdb_flags ,
2012-01-06 20:19:54 +04:00
read_only ? O_RDONLY : ( O_RDWR | O_CREAT ) , 0644 ,
DBWRAP_LOCK_ORDER_2 ) ;
2007-05-27 14:35:14 +04:00
if ( ! brlock_db ) {
2006-04-10 19:33:04 +04:00
DEBUG ( 0 , ( " Failed to open byte range locking database %s \n " ,
lock_path ( " brlock.tdb " ) ) ) ;
return ;
}
}
2000-05-03 18:29:05 +04:00
2006-04-10 19:33:04 +04:00
/****************************************************************************
Close down the brlock . tdb database .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2000-05-03 18:29:05 +04:00
2007-12-05 22:53:22 +03:00
void brl_shutdown ( void )
2006-04-10 19:33:04 +04:00
{
2007-05-27 14:35:14 +04:00
TALLOC_FREE ( brlock_db ) ;
2006-04-10 19:33:04 +04:00
}
2000-05-03 18:29:05 +04:00
2006-04-10 19:33:04 +04:00
# if ZERO_ZERO
/****************************************************************************
Compare two locks for sorting .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2001-05-23 00:35:48 +04:00
2006-04-10 19:33:04 +04:00
static int lock_compare ( const struct lock_struct * lck1 ,
const struct lock_struct * lck2 )
{
if ( lck1 - > start ! = lck2 - > start ) {
return ( lck1 - > start - lck2 - > start ) ;
}
if ( lck2 - > size ! = lck1 - > size ) {
return ( ( int ) lck1 - > size - ( int ) lck2 - > size ) ;
}
return 0 ;
}
# endif
2001-05-23 00:35:48 +04:00
2006-04-10 19:33:04 +04:00
/****************************************************************************
Lock a range of bytes - Windows lock semantics .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2001-05-23 00:35:48 +04:00
2009-02-10 08:51:29 +03:00
NTSTATUS brl_lock_windows_default ( struct byte_range_lock * br_lck ,
struct lock_struct * plock , bool blocking_lock )
2006-04-10 19:33:04 +04:00
{
unsigned int i ;
files_struct * fsp = br_lck - > fsp ;
2007-05-06 00:43:06 +04:00
struct lock_struct * locks = br_lck - > lock_data ;
2009-02-03 22:56:35 +03:00
NTSTATUS status ;
2006-04-10 19:33:04 +04:00
2009-02-10 08:51:29 +03:00
SMB_ASSERT ( plock - > lock_type ! = UNLOCK_LOCK ) ;
2010-05-06 02:57:57 +04:00
if ( ( plock - > start + plock - > size - 1 < plock - > start ) & &
plock - > size ! = 0 ) {
return NT_STATUS_INVALID_LOCK_RANGE ;
}
2009-12-05 01:04:08 +03:00
2010-05-06 02:57:57 +04:00
for ( i = 0 ; i < br_lck - > num_locks ; i + + ) {
2006-04-10 19:33:04 +04:00
/* Do any Windows or POSIX locks conflict ? */
if ( brl_conflict ( & locks [ i ] , plock ) ) {
2007-05-20 00:57:12 +04:00
/* Remember who blocked us. */
2010-05-07 17:20:50 +04:00
plock - > context . smblctx = locks [ i ] . context . smblctx ;
2006-07-18 05:05:51 +04:00
return brl_lock_failed ( fsp , plock , blocking_lock ) ;
2006-04-10 19:33:04 +04:00
}
# if ZERO_ZERO
if ( plock - > start = = 0 & & plock - > size = = 0 & &
locks [ i ] . size = = 0 ) {
break ;
2001-05-23 00:35:48 +04:00
}
2006-04-10 19:33:04 +04:00
# endif
}
2001-05-23 00:35:48 +04:00
2009-02-03 22:56:35 +03:00
if ( ! IS_PENDING_LOCK ( plock - > lock_type ) ) {
contend_level2_oplocks_begin ( fsp , LEVEL2_CONTEND_WINDOWS_BRL ) ;
}
2006-04-10 19:33:04 +04:00
/* We can get the Windows lock, now see if it needs to
be mapped into a lower level POSIX one , and if so can
2006-07-11 22:01:26 +04:00
we get it ? */
2000-05-03 18:29:05 +04:00
2006-11-11 20:05:11 +03:00
if ( ! IS_PENDING_LOCK ( plock - > lock_type ) & & lp_posix_locking ( fsp - > conn - > params ) ) {
2006-07-11 22:01:26 +04:00
int errno_ret ;
if ( ! set_posix_lock_windows_flavour ( fsp ,
plock - > start ,
plock - > size ,
plock - > lock_type ,
& plock - > context ,
locks ,
br_lck - > num_locks ,
& errno_ret ) ) {
2007-05-20 00:57:12 +04:00
/* We don't know who blocked us. */
2010-05-07 17:20:50 +04:00
plock - > context . smblctx = 0xFFFFFFFFFFFFFFFFLL ;
2007-05-20 00:57:12 +04:00
2006-07-11 22:01:26 +04:00
if ( errno_ret = = EACCES | | errno_ret = = EAGAIN ) {
2009-02-03 22:56:35 +03:00
status = NT_STATUS_FILE_LOCK_CONFLICT ;
goto fail ;
2006-04-10 19:33:04 +04:00
} else {
2009-02-03 22:56:35 +03:00
status = map_nt_error_from_unix ( errno ) ;
goto fail ;
2006-04-10 19:33:04 +04:00
}
2000-05-03 18:29:05 +04:00
}
}
2000-04-26 00:30:58 +04:00
2006-04-10 19:33:04 +04:00
/* no conflicts - add it to the list of locks */
locks = ( struct lock_struct * ) SMB_REALLOC ( locks , ( br_lck - > num_locks + 1 ) * sizeof ( * locks ) ) ;
if ( ! locks ) {
2009-02-03 22:56:35 +03:00
status = NT_STATUS_NO_MEMORY ;
goto fail ;
2000-05-03 18:29:05 +04:00
}
2006-04-10 19:33:04 +04:00
memcpy ( & locks [ br_lck - > num_locks ] , plock , sizeof ( struct lock_struct ) ) ;
br_lck - > num_locks + = 1 ;
2007-05-06 00:43:06 +04:00
br_lck - > lock_data = locks ;
2006-04-10 19:33:04 +04:00
br_lck - > modified = True ;
return NT_STATUS_OK ;
2009-02-03 22:56:35 +03:00
fail :
if ( ! IS_PENDING_LOCK ( plock - > lock_type ) ) {
contend_level2_oplocks_end ( fsp , LEVEL2_CONTEND_WINDOWS_BRL ) ;
}
return status ;
2000-05-03 18:29:05 +04:00
}
/****************************************************************************
2006-04-10 19:33:04 +04:00
Cope with POSIX range splits and merges .
2000-05-03 18:29:05 +04:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2001-05-23 00:35:48 +04:00
2009-10-05 21:27:48 +04:00
static unsigned int brlock_posix_split_merge ( struct lock_struct * lck_arr , /* Output array. */
struct lock_struct * ex , /* existing lock. */
struct lock_struct * plock ) /* proposed lock. */
2000-01-13 15:09:36 +03:00
{
2007-10-19 04:40:25 +04:00
bool lock_types_differ = ( ex - > lock_type ! = plock - > lock_type ) ;
2006-04-10 19:33:04 +04:00
/* We can't merge non-conflicting locks on different context - ignore fnum. */
if ( ! brl_same_context ( & ex - > context , & plock - > context ) ) {
/* Just copy. */
memcpy ( & lck_arr [ 0 ] , ex , sizeof ( struct lock_struct ) ) ;
return 1 ;
2000-05-03 18:29:05 +04:00
}
2006-04-10 19:33:04 +04:00
/* We now know we have the same context. */
/* Did we overlap ? */
/*********************************************
2009-10-05 21:27:48 +04:00
+ - - - - - - - - - +
| ex |
+ - - - - - - - - - +
+ - - - - - - - +
| plock |
+ - - - - - - - +
2006-04-10 19:33:04 +04:00
OR . . . .
2009-10-05 21:27:48 +04:00
+ - - - - - - - - - +
| ex |
+ - - - - - - - - - +
2006-04-10 19:33:04 +04:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2006-07-11 22:01:26 +04:00
if ( ( ex - > start > ( plock - > start + plock - > size ) ) | |
2009-10-05 21:27:48 +04:00
( plock - > start > ( ex - > start + ex - > size ) ) ) {
2006-04-10 19:33:04 +04:00
/* No overlap with this lock - copy existing. */
2009-10-05 21:27:48 +04:00
2006-04-10 19:33:04 +04:00
memcpy ( & lck_arr [ 0 ] , ex , sizeof ( struct lock_struct ) ) ;
return 1 ;
2002-07-15 14:35:28 +04:00
}
2006-04-10 19:33:04 +04:00
/*********************************************
2006-07-11 22:01:26 +04:00
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - +
| ex |
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - +
2006-04-10 19:33:04 +04:00
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - +
| plock | - > replace with plock .
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - +
2009-10-05 21:27:48 +04:00
OR
+ - - - - - - - - - - - - - - - +
| ex |
+ - - - - - - - - - - - - - - - +
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - +
| plock | - > replace with plock .
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - +
2006-04-10 19:33:04 +04:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
if ( ( ex - > start > = plock - > start ) & &
2009-10-05 21:27:48 +04:00
( ex - > start + ex - > size < = plock - > start + plock - > size ) ) {
/* Replace - discard existing lock. */
return 0 ;
2006-04-10 19:33:04 +04:00
}
/*********************************************
2009-10-05 21:27:48 +04:00
Adjacent after .
+ - - - - - - - +
| ex |
+ - - - - - - - +
+ - - - - - - - - - - - - - - - +
| plock |
+ - - - - - - - - - - - - - - - +
BECOMES . . . .
+ - - - - - - - - - - - - - - - + - - - - - - - +
| plock | ex | - different lock types .
+ - - - - - - - - - - - - - - - + - - - - - - - +
OR . . . . ( merge )
+ - - - - - - - - - - - - - - - - - - - - - - - +
| plock | - same lock type .
+ - - - - - - - - - - - - - - - - - - - - - - - +
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
if ( plock - > start + plock - > size = = ex - > start ) {
/* If the lock types are the same, we merge, if different, we
add the remainder of the old lock . */
if ( lock_types_differ ) {
/* Add existing. */
memcpy ( & lck_arr [ 0 ] , ex , sizeof ( struct lock_struct ) ) ;
return 1 ;
} else {
/* Merge - adjust incoming lock as we may have more
* merging to come . */
plock - > size + = ex - > size ;
return 0 ;
}
}
/*********************************************
Adjacent before .
+ - - - - - - - +
| ex |
+ - - - - - - - +
+ - - - - - - - - - - - - - - - +
| plock |
+ - - - - - - - - - - - - - - - +
BECOMES . . . .
+ - - - - - - - + - - - - - - - - - - - - - - - +
| ex | plock | - different lock types
+ - - - - - - - + - - - - - - - - - - - - - - - +
OR . . . . ( merge )
+ - - - - - - - - - - - - - - - - - - - - - - - +
| plock | - same lock type .
+ - - - - - - - - - - - - - - - - - - - - - - - +
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
if ( ex - > start + ex - > size = = plock - > start ) {
/* If the lock types are the same, we merge, if different, we
add the existing lock . */
if ( lock_types_differ ) {
memcpy ( & lck_arr [ 0 ] , ex , sizeof ( struct lock_struct ) ) ;
return 1 ;
} else {
/* Merge - adjust incoming lock as we may have more
* merging to come . */
plock - > start = ex - > start ;
plock - > size + = ex - > size ;
return 0 ;
}
}
/*********************************************
Overlap after .
2006-07-11 22:01:26 +04:00
+ - - - - - - - - - - - - - - - - - - - - - - - +
| ex |
+ - - - - - - - - - - - - - - - - - - - - - - - +
2006-04-10 19:33:04 +04:00
+ - - - - - - - - - - - - - - - +
| plock |
+ - - - - - - - - - - - - - - - +
2009-10-05 21:27:48 +04:00
OR
+ - - - - - - - - - - - - - - - - +
| ex |
+ - - - - - - - - - - - - - - - - +
2006-07-11 22:01:26 +04:00
+ - - - - - - - - - - - - - - - +
| plock |
+ - - - - - - - - - - - - - - - +
2006-04-10 19:33:04 +04:00
BECOMES . . . .
+ - - - - - - - - - - - - - - - + - - - - - - - +
| plock | ex | - different lock types .
+ - - - - - - - - - - - - - - - + - - - - - - - +
2006-07-11 22:01:26 +04:00
OR . . . . ( merge )
2006-04-10 19:33:04 +04:00
+ - - - - - - - - - - - - - - - - - - - - - - - +
2009-10-05 21:27:48 +04:00
| plock | - same lock type .
2006-04-10 19:33:04 +04:00
+ - - - - - - - - - - - - - - - - - - - - - - - +
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
if ( ( ex - > start > = plock - > start ) & &
2009-10-05 21:27:48 +04:00
( ex - > start < = plock - > start + plock - > size ) & &
( ex - > start + ex - > size > plock - > start + plock - > size ) ) {
2006-04-10 19:33:04 +04:00
/* If the lock types are the same, we merge, if different, we
2009-10-05 21:27:48 +04:00
add the remainder of the old lock . */
2006-04-10 19:33:04 +04:00
if ( lock_types_differ ) {
2009-10-05 21:27:48 +04:00
/* Add remaining existing. */
memcpy ( & lck_arr [ 0 ] , ex , sizeof ( struct lock_struct ) ) ;
2006-04-10 19:33:04 +04:00
/* Adjust existing start and size. */
2009-10-05 21:27:48 +04:00
lck_arr [ 0 ] . start = plock - > start + plock - > size ;
lck_arr [ 0 ] . size = ( ex - > start + ex - > size ) - ( plock - > start + plock - > size ) ;
2006-04-10 19:33:04 +04:00
return 1 ;
2009-10-05 21:27:48 +04:00
} else {
/* Merge - adjust incoming lock as we may have more
* merging to come . */
plock - > size + = ( ex - > start + ex - > size ) - ( plock - > start + plock - > size ) ;
return 0 ;
2006-04-10 19:33:04 +04:00
}
}
/*********************************************
2009-10-05 21:27:48 +04:00
Overlap before .
+ - - - - - - - - - - - - - - - - - - - - - - - +
| ex |
+ - - - - - - - - - - - - - - - - - - - - - - - +
+ - - - - - - - - - - - - - - - +
| plock |
+ - - - - - - - - - - - - - - - +
OR
+ - - - - - - - - - - - - - +
| ex |
+ - - - - - - - - - - - - - +
+ - - - - - - - - - - - - - - - +
| plock |
+ - - - - - - - - - - - - - - - +
2006-04-10 19:33:04 +04:00
BECOMES . . . .
2009-10-05 21:27:48 +04:00
+ - - - - - - - + - - - - - - - - - - - - - - - +
| ex | plock | - different lock types
+ - - - - - - - + - - - - - - - - - - - - - - - +
2006-04-10 19:33:04 +04:00
2006-07-11 22:01:26 +04:00
OR . . . . ( merge )
2009-10-05 21:27:48 +04:00
+ - - - - - - - - - - - - - - - - - - - - - - - +
| plock | - same lock type .
+ - - - - - - - - - - - - - - - - - - - - - - - +
2006-04-10 19:33:04 +04:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
if ( ( ex - > start < plock - > start ) & &
2006-07-11 22:01:26 +04:00
( ex - > start + ex - > size > = plock - > start ) & &
2006-04-10 19:33:04 +04:00
( ex - > start + ex - > size < = plock - > start + plock - > size ) ) {
/* If the lock types are the same, we merge, if different, we
2009-10-05 21:27:48 +04:00
add the truncated old lock . */
2006-04-10 19:33:04 +04:00
if ( lock_types_differ ) {
memcpy ( & lck_arr [ 0 ] , ex , sizeof ( struct lock_struct ) ) ;
/* Adjust existing size. */
lck_arr [ 0 ] . size = plock - > start - ex - > start ;
return 1 ;
2009-10-05 21:27:48 +04:00
} else {
/* Merge - adjust incoming lock as we may have more
* merging to come . MUST ADJUST plock SIZE FIRST ! */
plock - > size + = ( plock - > start - ex - > start ) ;
plock - > start = ex - > start ;
return 0 ;
2006-04-10 19:33:04 +04:00
}
}
/*********************************************
2009-10-05 21:27:48 +04:00
Complete overlap .
2006-04-10 19:33:04 +04:00
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - +
| ex |
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - +
+ - - - - - - - - - +
| plock |
+ - - - - - - - - - +
BECOMES . . . . .
+ - - - - - - - + - - - - - - - - - + - - - - - - - - - +
| ex | plock | ex | - different lock types .
+ - - - - - - - + - - - - - - - - - + - - - - - - - - - +
OR
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - +
2009-10-05 21:27:48 +04:00
| plock | - same lock type .
2006-04-10 19:33:04 +04:00
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - +
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
if ( ( ex - > start < plock - > start ) & & ( ex - > start + ex - > size > plock - > start + plock - > size ) ) {
if ( lock_types_differ ) {
/* We have to split ex into two locks here. */
memcpy ( & lck_arr [ 0 ] , ex , sizeof ( struct lock_struct ) ) ;
2009-10-05 21:27:48 +04:00
memcpy ( & lck_arr [ 1 ] , ex , sizeof ( struct lock_struct ) ) ;
2006-04-10 19:33:04 +04:00
/* Adjust first existing size. */
lck_arr [ 0 ] . size = plock - > start - ex - > start ;
/* Adjust second existing start and size. */
2009-10-05 21:27:48 +04:00
lck_arr [ 1 ] . start = plock - > start + plock - > size ;
lck_arr [ 1 ] . size = ( ex - > start + ex - > size ) - ( plock - > start + plock - > size ) ;
return 2 ;
2006-04-10 19:33:04 +04:00
} else {
2009-10-05 21:27:48 +04:00
/* Just eat the existing locks, merge them into plock. */
plock - > start = ex - > start ;
plock - > size = ex - > size ;
return 0 ;
2006-04-10 19:33:04 +04:00
}
}
/* Never get here. */
2007-06-16 01:58:49 +04:00
smb_panic ( " brlock_posix_split_merge " ) ;
2006-04-10 19:33:04 +04:00
/* Notreached. */
2007-06-16 01:58:49 +04:00
2006-04-26 01:36:35 +04:00
/* Keep some compilers happy. */
return 0 ;
2000-01-13 15:09:36 +03:00
}
2001-05-23 00:35:48 +04:00
/****************************************************************************
2006-04-10 19:33:04 +04:00
Lock a range of bytes - POSIX lock semantics .
We must cope with range splits and merges .
2001-05-23 00:35:48 +04:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2007-05-14 17:01:28 +04:00
static NTSTATUS brl_lock_posix ( struct messaging_context * msg_ctx ,
struct byte_range_lock * br_lck ,
2007-05-20 00:57:12 +04:00
struct lock_struct * plock )
2001-05-23 00:35:48 +04:00
{
2009-02-03 22:56:35 +03:00
unsigned int i , count , posix_count ;
2007-05-06 00:43:06 +04:00
struct lock_struct * locks = br_lck - > lock_data ;
2006-04-10 19:33:04 +04:00
struct lock_struct * tp ;
2007-10-19 04:40:25 +04:00
bool signal_pending_read = False ;
2009-02-03 22:56:35 +03:00
bool break_oplocks = false ;
NTSTATUS status ;
2006-04-10 19:33:04 +04:00
/* No zero-zero locks for POSIX. */
if ( plock - > start = = 0 & & plock - > size = = 0 ) {
return NT_STATUS_INVALID_PARAMETER ;
}
/* Don't allow 64-bit lock wrap. */
2010-05-06 02:57:57 +04:00
if ( plock - > start + plock - > size - 1 < plock - > start ) {
2006-04-10 19:33:04 +04:00
return NT_STATUS_INVALID_PARAMETER ;
}
/* The worst case scenario here is we have to split an
existing POSIX lock range into two , and add our lock ,
so we need at most 2 more entries . */
tp = SMB_MALLOC_ARRAY ( struct lock_struct , ( br_lck - > num_locks + 2 ) ) ;
if ( ! tp ) {
return NT_STATUS_NO_MEMORY ;
}
2009-10-05 21:27:48 +04:00
2009-02-03 22:56:35 +03:00
count = posix_count = 0 ;
2009-10-05 21:27:48 +04:00
2006-04-10 19:33:04 +04:00
for ( i = 0 ; i < br_lck - > num_locks ; i + + ) {
2006-07-29 23:14:24 +04:00
struct lock_struct * curr_lock = & locks [ i ] ;
/* If we have a pending read lock, a lock downgrade should
trigger a lock re - evaluation . */
if ( curr_lock - > lock_type = = PENDING_READ_LOCK & &
brl_pending_overlap ( plock , curr_lock ) ) {
signal_pending_read = True ;
}
if ( curr_lock - > lock_flav = = WINDOWS_LOCK ) {
2006-04-10 19:33:04 +04:00
/* Do any Windows flavour locks conflict ? */
2006-07-29 23:14:24 +04:00
if ( brl_conflict ( curr_lock , plock ) ) {
2006-04-10 19:33:04 +04:00
/* No games with error messages. */
SAFE_FREE ( tp ) ;
2007-05-20 00:57:12 +04:00
/* Remember who blocked us. */
2010-05-07 17:20:50 +04:00
plock - > context . smblctx = curr_lock - > context . smblctx ;
2006-04-10 19:33:04 +04:00
return NT_STATUS_FILE_LOCK_CONFLICT ;
}
/* Just copy the Windows lock into the new array. */
2006-07-29 23:14:24 +04:00
memcpy ( & tp [ count ] , curr_lock , sizeof ( struct lock_struct ) ) ;
2006-04-10 19:33:04 +04:00
count + + ;
} else {
2009-02-10 20:09:12 +03:00
unsigned int tmp_count = 0 ;
2009-02-03 22:56:35 +03:00
2006-04-10 19:33:04 +04:00
/* POSIX conflict semantics are different. */
2006-07-29 23:14:24 +04:00
if ( brl_conflict_posix ( curr_lock , plock ) ) {
2006-04-10 19:33:04 +04:00
/* Can't block ourselves with POSIX locks. */
/* No games with error messages. */
SAFE_FREE ( tp ) ;
2007-05-20 00:57:12 +04:00
/* Remember who blocked us. */
2010-05-07 17:20:50 +04:00
plock - > context . smblctx = curr_lock - > context . smblctx ;
2006-04-10 19:33:04 +04:00
return NT_STATUS_FILE_LOCK_CONFLICT ;
}
/* Work out overlaps. */
2009-10-05 21:27:48 +04:00
tmp_count + = brlock_posix_split_merge ( & tp [ count ] , curr_lock , plock ) ;
2009-02-03 22:56:35 +03:00
posix_count + = tmp_count ;
count + = tmp_count ;
2006-04-10 19:33:04 +04:00
}
}
2009-02-03 22:56:35 +03:00
/*
* Break oplocks while we hold a brl . Since lock ( ) and unlock ( ) calls
* are not symetric with POSIX semantics , we cannot guarantee our
* contend_level2_oplocks_begin / end calls will be acquired and
* released one - for - one as with Windows semantics . Therefore we only
* call contend_level2_oplocks_begin if this is the first POSIX brl on
* the file .
*/
break_oplocks = ( ! IS_PENDING_LOCK ( plock - > lock_type ) & &
posix_count = = 0 ) ;
if ( break_oplocks ) {
contend_level2_oplocks_begin ( br_lck - > fsp ,
LEVEL2_CONTEND_POSIX_BRL ) ;
}
2009-10-05 21:27:48 +04:00
/* Try and add the lock in order, sorted by lock start. */
for ( i = 0 ; i < count ; i + + ) {
struct lock_struct * curr_lock = & tp [ i ] ;
if ( curr_lock - > start < = plock - > start ) {
continue ;
}
}
if ( i < count ) {
memmove ( & tp [ i + 1 ] , & tp [ i ] ,
( count - i ) * sizeof ( struct lock_struct ) ) ;
2006-07-11 22:01:26 +04:00
}
2009-10-05 21:27:48 +04:00
memcpy ( & tp [ i ] , plock , sizeof ( struct lock_struct ) ) ;
count + + ;
2006-07-11 22:01:26 +04:00
2006-04-10 19:33:04 +04:00
/* We can get the POSIX lock, now see if it needs to
be mapped into a lower level POSIX one , and if so can
2006-07-11 22:01:26 +04:00
we get it ? */
2006-04-10 19:33:04 +04:00
2006-11-11 20:05:11 +03:00
if ( ! IS_PENDING_LOCK ( plock - > lock_type ) & & lp_posix_locking ( br_lck - > fsp - > conn - > params ) ) {
2006-07-11 22:01:26 +04:00
int errno_ret ;
2001-05-23 00:35:48 +04:00
2006-07-11 22:01:26 +04:00
/* The lower layer just needs to attempt to
get the system POSIX lock . We ' ve weeded out
any conflicts above . */
2006-04-10 19:33:04 +04:00
2006-07-11 22:01:26 +04:00
if ( ! set_posix_lock_posix_flavour ( br_lck - > fsp ,
plock - > start ,
plock - > size ,
plock - > lock_type ,
& errno_ret ) ) {
2007-05-20 00:57:12 +04:00
/* We don't know who blocked us. */
2010-05-07 17:20:50 +04:00
plock - > context . smblctx = 0xFFFFFFFFFFFFFFFFLL ;
2007-05-20 00:57:12 +04:00
2006-07-11 22:01:26 +04:00
if ( errno_ret = = EACCES | | errno_ret = = EAGAIN ) {
2006-04-10 19:33:04 +04:00
SAFE_FREE ( tp ) ;
2009-02-03 22:56:35 +03:00
status = NT_STATUS_FILE_LOCK_CONFLICT ;
goto fail ;
2006-04-10 19:33:04 +04:00
} else {
SAFE_FREE ( tp ) ;
2009-02-03 22:56:35 +03:00
status = map_nt_error_from_unix ( errno ) ;
goto fail ;
2006-04-10 19:33:04 +04:00
}
}
2002-07-15 14:35:28 +04:00
}
2000-01-13 15:09:36 +03:00
2009-10-05 21:27:48 +04:00
/* If we didn't use all the allocated size,
* Realloc so we don ' t leak entries per lock call . */
if ( count < br_lck - > num_locks + 2 ) {
tp = ( struct lock_struct * ) SMB_REALLOC ( tp , count * sizeof ( * locks ) ) ;
if ( ! tp ) {
status = NT_STATUS_NO_MEMORY ;
goto fail ;
}
2001-08-27 12:19:43 +04:00
}
2009-10-05 21:27:48 +04:00
2006-04-10 19:33:04 +04:00
br_lck - > num_locks = count ;
2006-07-15 04:05:47 +04:00
SAFE_FREE ( br_lck - > lock_data ) ;
2007-05-06 00:43:06 +04:00
br_lck - > lock_data = tp ;
2006-07-29 23:14:24 +04:00
locks = tp ;
2006-04-10 19:33:04 +04:00
br_lck - > modified = True ;
2006-07-29 23:14:24 +04:00
/* A successful downgrade from write to read lock can trigger a lock
re - evalutation where waiting readers can now proceed . */
if ( signal_pending_read ) {
/* Send unlock messages to any pending read waiters that overlap. */
for ( i = 0 ; i < br_lck - > num_locks ; i + + ) {
struct lock_struct * pend_lock = & locks [ i ] ;
/* Ignore non-pending locks. */
if ( ! IS_PENDING_LOCK ( pend_lock - > lock_type ) ) {
continue ;
}
if ( pend_lock - > lock_type = = PENDING_READ_LOCK & &
brl_pending_overlap ( plock , pend_lock ) ) {
DEBUG ( 10 , ( " brl_lock_posix: sending unlock message to pid %s \n " ,
procid_str_static ( & pend_lock - > context . pid ) ) ) ;
2007-05-14 17:01:28 +04:00
messaging_send ( msg_ctx , pend_lock - > context . pid ,
MSG_SMB_UNLOCK , & data_blob_null ) ;
2006-07-29 23:14:24 +04:00
}
}
}
2006-04-10 19:33:04 +04:00
return NT_STATUS_OK ;
2009-02-03 22:56:35 +03:00
fail :
if ( break_oplocks ) {
contend_level2_oplocks_end ( br_lck - > fsp ,
LEVEL2_CONTEND_POSIX_BRL ) ;
}
return status ;
2001-08-27 12:19:43 +04:00
}
2009-07-24 04:28:58 +04:00
NTSTATUS smb_vfs_call_brl_lock_windows ( struct vfs_handle_struct * handle ,
struct byte_range_lock * br_lck ,
struct lock_struct * plock ,
bool blocking_lock ,
struct blocking_lock_record * blr )
{
VFS_FIND ( brl_lock_windows ) ;
2011-12-04 08:45:04 +04:00
return handle - > fns - > brl_lock_windows_fn ( handle , br_lck , plock ,
blocking_lock , blr ) ;
2009-07-24 04:28:58 +04:00
}
2000-01-13 15:09:36 +03:00
/****************************************************************************
2000-04-26 00:30:58 +04:00
Lock a range of bytes .
2000-01-13 15:09:36 +03:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2000-04-26 00:30:58 +04:00
2007-05-14 17:01:28 +04:00
NTSTATUS brl_lock ( struct messaging_context * msg_ctx ,
struct byte_range_lock * br_lck ,
2010-05-07 17:20:50 +04:00
uint64_t smblctx ,
2007-05-07 13:35:35 +04:00
struct server_id pid ,
2006-04-10 19:33:04 +04:00
br_off start ,
br_off size ,
enum brl_type lock_type ,
enum brl_flavour lock_flav ,
2007-10-19 04:40:25 +04:00
bool blocking_lock ,
2010-05-07 17:20:50 +04:00
uint64_t * psmblctx ,
2009-02-10 08:51:29 +03:00
struct blocking_lock_record * blr )
2000-01-13 15:09:36 +03:00
{
2006-04-10 19:33:04 +04:00
NTSTATUS ret ;
struct lock_struct lock ;
2000-01-13 15:09:36 +03:00
2001-08-27 12:19:43 +04:00
# if !ZERO_ZERO
2001-08-25 01:09:38 +04:00
if ( start = = 0 & & size = = 0 ) {
2001-08-27 12:19:43 +04:00
DEBUG ( 0 , ( " client sent 0/0 lock - please report this \n " ) ) ;
2001-08-25 01:09:38 +04:00
}
# endif
2008-04-12 04:23:48 +04:00
# ifdef DEVELOPER
/* Quieten valgrind on test. */
memset ( & lock , ' \0 ' , sizeof ( lock ) ) ;
# endif
2010-05-07 17:20:50 +04:00
lock . context . smblctx = smblctx ;
2000-01-13 15:09:36 +03:00
lock . context . pid = pid ;
2006-04-10 19:33:04 +04:00
lock . context . tid = br_lck - > fsp - > conn - > cnum ;
2000-01-13 15:09:36 +03:00
lock . start = start ;
lock . size = size ;
2006-04-10 19:33:04 +04:00
lock . fnum = br_lck - > fsp - > fnum ;
2000-01-13 15:09:36 +03:00
lock . lock_type = lock_type ;
2006-04-10 19:33:04 +04:00
lock . lock_flav = lock_flav ;
2000-01-13 15:09:36 +03:00
2006-04-10 19:33:04 +04:00
if ( lock_flav = = WINDOWS_LOCK ) {
2009-02-10 08:51:29 +03:00
ret = SMB_VFS_BRL_LOCK_WINDOWS ( br_lck - > fsp - > conn , br_lck ,
& lock , blocking_lock , blr ) ;
2006-04-10 19:33:04 +04:00
} else {
2007-05-14 17:01:28 +04:00
ret = brl_lock_posix ( msg_ctx , br_lck , & lock ) ;
2000-01-13 15:09:36 +03:00
}
2001-08-27 12:19:43 +04:00
# if ZERO_ZERO
/* sort the lock list */
2010-02-14 02:02:35 +03:00
TYPESAFE_QSORT ( br_lck - > lock_data , ( size_t ) br_lck - > num_locks , lock_compare ) ;
2001-08-27 12:19:43 +04:00
# endif
2007-05-20 00:57:12 +04:00
/* If we're returning an error, return who blocked us. */
2010-05-07 17:20:50 +04:00
if ( ! NT_STATUS_IS_OK ( ret ) & & psmblctx ) {
* psmblctx = lock . context . smblctx ;
2007-05-20 00:57:12 +04:00
}
2006-04-10 19:33:04 +04:00
return ret ;
2000-01-13 15:09:36 +03:00
}
/****************************************************************************
2006-04-10 19:33:04 +04:00
Unlock a range of bytes - Windows semantics .
2000-01-13 15:09:36 +03:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2000-04-26 00:30:58 +04:00
2009-02-10 08:51:29 +03:00
bool brl_unlock_windows_default ( struct messaging_context * msg_ctx ,
2007-05-14 17:01:28 +04:00
struct byte_range_lock * br_lck ,
const struct lock_struct * plock )
2000-01-13 15:09:36 +03:00
{
2006-04-10 19:33:04 +04:00
unsigned int i , j ;
2007-05-06 00:43:06 +04:00
struct lock_struct * locks = br_lck - > lock_data ;
2006-07-11 22:01:26 +04:00
enum brl_type deleted_lock_type = READ_LOCK ; /* shut the compiler up.... */
2000-01-13 15:09:36 +03:00
2009-02-10 08:51:29 +03:00
SMB_ASSERT ( plock - > lock_type = = UNLOCK_LOCK ) ;
2006-04-10 19:33:04 +04:00
# if ZERO_ZERO
2006-07-11 22:01:26 +04:00
/* Delete write locks by preference... The lock list
is sorted in the zero zero case . */
2006-04-10 19:33:04 +04:00
for ( i = 0 ; i < br_lck - > num_locks ; i + + ) {
2006-07-15 04:34:08 +04:00
struct lock_struct * lock = & locks [ i ] ;
2000-01-13 15:09:36 +03:00
2006-04-10 19:33:04 +04:00
if ( lock - > lock_type = = WRITE_LOCK & &
brl_same_context ( & lock - > context , & plock - > context ) & &
lock - > fnum = = plock - > fnum & &
lock - > lock_flav = = WINDOWS_LOCK & &
lock - > start = = plock - > start & &
lock - > size = = plock - > size ) {
2000-01-13 15:09:36 +03:00
2006-04-10 19:33:04 +04:00
/* found it - delete it */
2006-07-11 22:01:26 +04:00
deleted_lock_type = lock - > lock_type ;
break ;
2006-04-10 19:33:04 +04:00
}
2000-04-28 02:23:04 +04:00
}
2006-07-11 22:01:26 +04:00
if ( i ! = br_lck - > num_locks ) {
/* We found it - don't search again. */
goto unlock_continue ;
}
2006-04-10 19:33:04 +04:00
# endif
2000-01-13 15:09:36 +03:00
2006-04-10 19:33:04 +04:00
for ( i = 0 ; i < br_lck - > num_locks ; i + + ) {
2006-07-15 04:34:08 +04:00
struct lock_struct * lock = & locks [ i ] ;
2000-01-13 15:09:36 +03:00
2010-05-06 20:07:49 +04:00
if ( IS_PENDING_LOCK ( lock - > lock_type ) ) {
continue ;
}
2006-04-10 19:33:04 +04:00
/* Only remove our own locks that match in start, size, and flavour. */
if ( brl_same_context ( & lock - > context , & plock - > context ) & &
lock - > fnum = = plock - > fnum & &
lock - > lock_flav = = WINDOWS_LOCK & &
lock - > start = = plock - > start & &
lock - > size = = plock - > size ) {
2006-07-11 22:01:26 +04:00
deleted_lock_type = lock - > lock_type ;
2006-04-10 19:33:04 +04:00
break ;
}
}
2000-04-26 00:30:58 +04:00
2006-04-10 19:33:04 +04:00
if ( i = = br_lck - > num_locks ) {
/* we didn't find it */
return False ;
}
2000-04-26 00:30:58 +04:00
2006-07-11 22:01:26 +04:00
# if ZERO_ZERO
unlock_continue :
# endif
/* Actually delete the lock. */
if ( i < br_lck - > num_locks - 1 ) {
memmove ( & locks [ i ] , & locks [ i + 1 ] ,
sizeof ( * locks ) * ( ( br_lck - > num_locks - 1 ) - i ) ) ;
}
br_lck - > num_locks - = 1 ;
br_lck - > modified = True ;
/* Unlock the underlying POSIX regions. */
2006-11-11 20:05:11 +03:00
if ( lp_posix_locking ( br_lck - > fsp - > conn - > params ) ) {
2006-07-11 22:01:26 +04:00
release_posix_lock_windows_flavour ( br_lck - > fsp ,
plock - > start ,
plock - > size ,
deleted_lock_type ,
& plock - > context ,
locks ,
br_lck - > num_locks ) ;
2006-04-10 19:33:04 +04:00
}
2003-04-05 00:38:12 +04:00
2006-04-10 19:33:04 +04:00
/* Send unlock messages to any pending waiters that overlap. */
for ( j = 0 ; j < br_lck - > num_locks ; j + + ) {
struct lock_struct * pend_lock = & locks [ j ] ;
2003-04-05 00:38:12 +04:00
2006-04-10 19:33:04 +04:00
/* Ignore non-pending locks. */
2006-07-29 23:14:24 +04:00
if ( ! IS_PENDING_LOCK ( pend_lock - > lock_type ) ) {
2006-04-10 19:33:04 +04:00
continue ;
}
2000-04-28 02:23:04 +04:00
2006-04-10 19:33:04 +04:00
/* We could send specific lock info here... */
2006-07-15 04:34:08 +04:00
if ( brl_pending_overlap ( plock , pend_lock ) ) {
2006-04-10 19:33:04 +04:00
DEBUG ( 10 , ( " brl_unlock: sending unlock message to pid %s \n " ,
procid_str_static ( & pend_lock - > context . pid ) ) ) ;
2007-05-14 17:01:28 +04:00
messaging_send ( msg_ctx , pend_lock - > context . pid ,
MSG_SMB_UNLOCK , & data_blob_null ) ;
2000-04-28 02:23:04 +04:00
}
2001-08-25 01:09:38 +04:00
}
2009-02-03 22:56:35 +03:00
contend_level2_oplocks_end ( br_lck - > fsp , LEVEL2_CONTEND_WINDOWS_BRL ) ;
2006-04-10 19:33:04 +04:00
return True ;
}
/****************************************************************************
Unlock a range of bytes - POSIX semantics .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2003-02-27 04:04:34 +03:00
2007-10-19 04:40:25 +04:00
static bool brl_unlock_posix ( struct messaging_context * msg_ctx ,
2007-05-14 17:01:28 +04:00
struct byte_range_lock * br_lck ,
2009-10-05 21:27:48 +04:00
struct lock_struct * plock )
2006-04-10 19:33:04 +04:00
{
2009-10-05 21:27:48 +04:00
unsigned int i , j , count ;
2006-04-10 19:33:04 +04:00
struct lock_struct * tp ;
2007-05-06 00:43:06 +04:00
struct lock_struct * locks = br_lck - > lock_data ;
2007-10-19 04:40:25 +04:00
bool overlap_found = False ;
2006-04-10 19:33:04 +04:00
/* No zero-zero locks for POSIX. */
if ( plock - > start = = 0 & & plock - > size = = 0 ) {
return False ;
}
2003-02-27 04:04:34 +03:00
2006-04-10 19:33:04 +04:00
/* Don't allow 64-bit lock wrap. */
if ( plock - > start + plock - > size < plock - > start | |
plock - > start + plock - > size < plock - > size ) {
DEBUG ( 10 , ( " brl_unlock_posix: lock wrap \n " ) ) ;
return False ;
}
2003-04-05 00:38:12 +04:00
2006-04-10 19:33:04 +04:00
/* The worst case scenario here is we have to split an
existing POSIX lock range into two , so we need at most
1 more entry . */
2003-04-05 00:38:12 +04:00
2006-04-10 19:33:04 +04:00
tp = SMB_MALLOC_ARRAY ( struct lock_struct , ( br_lck - > num_locks + 1 ) ) ;
if ( ! tp ) {
DEBUG ( 10 , ( " brl_unlock_posix: malloc fail \n " ) ) ;
return False ;
}
2003-02-27 04:04:34 +03:00
2009-10-05 21:27:48 +04:00
count = 0 ;
2006-04-10 19:33:04 +04:00
for ( i = 0 ; i < br_lck - > num_locks ; i + + ) {
2006-07-15 04:34:08 +04:00
struct lock_struct * lock = & locks [ i ] ;
2006-04-10 19:33:04 +04:00
unsigned int tmp_count ;
2003-02-27 04:04:34 +03:00
2006-04-10 19:33:04 +04:00
/* Only remove our own locks - ignore fnum. */
2006-07-29 23:14:24 +04:00
if ( IS_PENDING_LOCK ( lock - > lock_type ) | |
2006-04-10 19:33:04 +04:00
! brl_same_context ( & lock - > context , & plock - > context ) ) {
memcpy ( & tp [ count ] , lock , sizeof ( struct lock_struct ) ) ;
count + + ;
continue ;
}
2003-02-27 04:04:34 +03:00
2009-10-05 21:27:48 +04:00
if ( lock - > lock_flav = = WINDOWS_LOCK ) {
/* Do any Windows flavour locks conflict ? */
if ( brl_conflict ( lock , plock ) ) {
SAFE_FREE ( tp ) ;
return false ;
2006-04-10 19:33:04 +04:00
}
2009-10-05 21:27:48 +04:00
/* Just copy the Windows lock into the new array. */
memcpy ( & tp [ count ] , lock , sizeof ( struct lock_struct ) ) ;
2006-04-10 19:33:04 +04:00
count + + ;
continue ;
2009-10-05 21:27:48 +04:00
}
/* Work out overlaps. */
tmp_count = brlock_posix_split_merge ( & tp [ count ] , lock , plock ) ;
if ( tmp_count = = 0 ) {
/* plock overlapped the existing lock completely,
or replaced it . Don ' t copy the existing lock . */
overlap_found = true ;
} else if ( tmp_count = = 1 ) {
/* Either no overlap, (simple copy of existing lock) or
* an overlap of an existing lock . */
/* If the lock changed size, we had an overlap. */
if ( tp [ count ] . size ! = lock - > size ) {
overlap_found = true ;
}
count + = tmp_count ;
} else if ( tmp_count = = 2 ) {
/* We split a lock range in two. */
overlap_found = true ;
count + = tmp_count ;
2006-04-10 19:33:04 +04:00
/* Optimisation... */
/* We know we're finished here as we can't overlap any
more POSIX locks . Copy the rest of the lock array . */
2009-10-05 21:27:48 +04:00
2006-04-10 19:33:04 +04:00
if ( i < br_lck - > num_locks - 1 ) {
2009-10-05 21:27:48 +04:00
memcpy ( & tp [ count ] , & locks [ i + 1 ] ,
2006-04-10 19:33:04 +04:00
sizeof ( * locks ) * ( ( br_lck - > num_locks - 1 ) - i ) ) ;
count + = ( ( br_lck - > num_locks - 1 ) - i ) ;
}
break ;
}
2009-10-05 21:27:48 +04:00
2006-04-10 19:33:04 +04:00
}
2000-01-13 15:09:36 +03:00
2006-04-10 19:33:04 +04:00
if ( ! overlap_found ) {
/* Just ignore - no change. */
SAFE_FREE ( tp ) ;
DEBUG ( 10 , ( " brl_unlock_posix: No overlap - unlocked. \n " ) ) ;
return True ;
}
/* Unlock any POSIX regions. */
2006-11-11 20:05:11 +03:00
if ( lp_posix_locking ( br_lck - > fsp - > conn - > params ) ) {
2006-07-11 22:01:26 +04:00
release_posix_lock_posix_flavour ( br_lck - > fsp ,
plock - > start ,
plock - > size ,
& plock - > context ,
tp ,
count ) ;
2006-04-10 19:33:04 +04:00
}
/* Realloc so we don't leak entries per unlock call. */
if ( count ) {
tp = ( struct lock_struct * ) SMB_REALLOC ( tp , count * sizeof ( * locks ) ) ;
if ( ! tp ) {
DEBUG ( 10 , ( " brl_unlock_posix: realloc fail \n " ) ) ;
return False ;
2000-01-13 15:09:36 +03:00
}
2006-04-10 19:33:04 +04:00
} else {
/* We deleted the last lock. */
SAFE_FREE ( tp ) ;
tp = NULL ;
2000-01-13 15:09:36 +03:00
}
2009-10-05 21:27:48 +04:00
contend_level2_oplocks_end ( br_lck - > fsp ,
LEVEL2_CONTEND_POSIX_BRL ) ;
2009-02-03 22:56:35 +03:00
2006-04-10 19:33:04 +04:00
br_lck - > num_locks = count ;
2006-07-15 04:05:47 +04:00
SAFE_FREE ( br_lck - > lock_data ) ;
2006-08-08 13:56:38 +04:00
locks = tp ;
2007-05-06 00:43:06 +04:00
br_lck - > lock_data = tp ;
2006-04-10 19:33:04 +04:00
br_lck - > modified = True ;
2000-01-13 15:09:36 +03:00
2006-04-10 19:33:04 +04:00
/* Send unlock messages to any pending waiters that overlap. */
2000-01-13 15:09:36 +03:00
2006-04-10 19:33:04 +04:00
for ( j = 0 ; j < br_lck - > num_locks ; j + + ) {
struct lock_struct * pend_lock = & locks [ j ] ;
/* Ignore non-pending locks. */
2006-07-29 23:14:24 +04:00
if ( ! IS_PENDING_LOCK ( pend_lock - > lock_type ) ) {
2006-04-10 19:33:04 +04:00
continue ;
}
/* We could send specific lock info here... */
2006-07-15 04:34:08 +04:00
if ( brl_pending_overlap ( plock , pend_lock ) ) {
2006-04-10 19:33:04 +04:00
DEBUG ( 10 , ( " brl_unlock: sending unlock message to pid %s \n " ,
procid_str_static ( & pend_lock - > context . pid ) ) ) ;
2007-05-14 17:01:28 +04:00
messaging_send ( msg_ctx , pend_lock - > context . pid ,
MSG_SMB_UNLOCK , & data_blob_null ) ;
2006-04-10 19:33:04 +04:00
}
}
return True ;
}
2001-06-30 05:59:48 +04:00
2009-07-24 04:28:58 +04:00
bool smb_vfs_call_brl_unlock_windows ( struct vfs_handle_struct * handle ,
struct messaging_context * msg_ctx ,
struct byte_range_lock * br_lck ,
const struct lock_struct * plock )
{
VFS_FIND ( brl_unlock_windows ) ;
2011-12-04 08:45:04 +04:00
return handle - > fns - > brl_unlock_windows_fn ( handle , msg_ctx , br_lck ,
plock ) ;
2009-07-24 04:28:58 +04:00
}
2000-01-13 15:09:36 +03:00
/****************************************************************************
2006-04-10 19:33:04 +04:00
Unlock a range of bytes .
2000-01-13 15:09:36 +03:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2000-04-26 00:30:58 +04:00
2007-10-19 04:40:25 +04:00
bool brl_unlock ( struct messaging_context * msg_ctx ,
2007-05-14 17:01:28 +04:00
struct byte_range_lock * br_lck ,
2010-05-07 17:20:50 +04:00
uint64_t smblctx ,
2007-05-07 13:35:35 +04:00
struct server_id pid ,
2006-04-10 19:33:04 +04:00
br_off start ,
br_off size ,
enum brl_flavour lock_flav )
2000-01-13 15:09:36 +03:00
{
2006-04-10 19:33:04 +04:00
struct lock_struct lock ;
2000-01-13 15:09:36 +03:00
2010-05-07 17:20:50 +04:00
lock . context . smblctx = smblctx ;
2006-04-10 19:33:04 +04:00
lock . context . pid = pid ;
lock . context . tid = br_lck - > fsp - > conn - > cnum ;
lock . start = start ;
lock . size = size ;
lock . fnum = br_lck - > fsp - > fnum ;
lock . lock_type = UNLOCK_LOCK ;
lock . lock_flav = lock_flav ;
if ( lock_flav = = WINDOWS_LOCK ) {
2009-02-10 08:51:29 +03:00
return SMB_VFS_BRL_UNLOCK_WINDOWS ( br_lck - > fsp - > conn , msg_ctx ,
br_lck , & lock ) ;
2006-04-10 19:33:04 +04:00
} else {
2007-05-14 17:01:28 +04:00
return brl_unlock_posix ( msg_ctx , br_lck , & lock ) ;
2006-04-10 19:33:04 +04:00
}
}
2000-01-13 15:09:36 +03:00
2006-04-10 19:33:04 +04:00
/****************************************************************************
Test if we could add a lock if we wanted to .
Returns True if the region required is currently unlocked , False if locked .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2000-01-13 15:09:36 +03:00
2007-10-19 04:40:25 +04:00
bool brl_locktest ( struct byte_range_lock * br_lck ,
2010-05-07 17:20:50 +04:00
uint64_t smblctx ,
2007-05-07 13:35:35 +04:00
struct server_id pid ,
2006-04-10 19:33:04 +04:00
br_off start ,
br_off size ,
enum brl_type lock_type ,
enum brl_flavour lock_flav )
{
2007-10-19 04:40:25 +04:00
bool ret = True ;
2006-04-10 19:33:04 +04:00
unsigned int i ;
struct lock_struct lock ;
2007-05-06 00:43:06 +04:00
const struct lock_struct * locks = br_lck - > lock_data ;
2006-04-10 19:33:04 +04:00
files_struct * fsp = br_lck - > fsp ;
2000-01-13 15:09:36 +03:00
2010-05-07 17:20:50 +04:00
lock . context . smblctx = smblctx ;
2000-01-13 15:09:36 +03:00
lock . context . pid = pid ;
2006-04-10 19:33:04 +04:00
lock . context . tid = br_lck - > fsp - > conn - > cnum ;
2000-01-13 15:09:36 +03:00
lock . start = start ;
lock . size = size ;
2006-04-10 19:33:04 +04:00
lock . fnum = fsp - > fnum ;
2000-01-13 15:09:36 +03:00
lock . lock_type = lock_type ;
2006-04-10 19:33:04 +04:00
lock . lock_flav = lock_flav ;
/* Make sure existing locks don't conflict */
for ( i = 0 ; i < br_lck - > num_locks ; i + + ) {
/*
* Our own locks don ' t conflict .
*/
if ( brl_conflict_other ( & locks [ i ] , & lock ) ) {
return False ;
2000-01-13 15:09:36 +03:00
}
}
2006-04-10 19:33:04 +04:00
/*
* There is no lock held by an SMB daemon , check to
* see if there is a POSIX lock from a UNIX or NFS process .
* This only conflicts with Windows locks , not POSIX locks .
*/
2006-11-11 20:05:11 +03:00
if ( lp_posix_locking ( fsp - > conn - > params ) & & ( lock_flav = = WINDOWS_LOCK ) ) {
2006-04-10 19:33:04 +04:00
ret = is_posix_locked ( fsp , & start , & size , & lock_type , WINDOWS_LOCK ) ;
2012-06-14 14:14:32 +04:00
DEBUG ( 10 , ( " brl_locktest: posix start=%.0f len=%.0f %s for %s file %s \n " ,
2006-04-10 19:33:04 +04:00
( double ) start , ( double ) size , ret ? " locked " : " unlocked " ,
2012-06-14 14:14:32 +04:00
fsp_fnum_dbg ( fsp ) , fsp_str_dbg ( fsp ) ) ) ;
2006-04-10 19:33:04 +04:00
/* We need to return the inverse of is_posix_locked. */
ret = ! ret ;
}
2000-01-13 15:09:36 +03:00
/* no conflicts - we could have added it */
2006-04-10 19:33:04 +04:00
return ret ;
}
2000-01-13 15:09:36 +03:00
2006-04-10 19:33:04 +04:00
/****************************************************************************
Query for existing locks .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
NTSTATUS brl_lockquery ( struct byte_range_lock * br_lck ,
2010-05-07 17:20:50 +04:00
uint64_t * psmblctx ,
2007-05-07 13:35:35 +04:00
struct server_id pid ,
2006-04-10 19:33:04 +04:00
br_off * pstart ,
br_off * psize ,
enum brl_type * plock_type ,
enum brl_flavour lock_flav )
{
unsigned int i ;
struct lock_struct lock ;
2007-05-06 00:43:06 +04:00
const struct lock_struct * locks = br_lck - > lock_data ;
2006-04-10 19:33:04 +04:00
files_struct * fsp = br_lck - > fsp ;
2010-05-07 17:20:50 +04:00
lock . context . smblctx = * psmblctx ;
2006-04-10 19:33:04 +04:00
lock . context . pid = pid ;
lock . context . tid = br_lck - > fsp - > conn - > cnum ;
lock . start = * pstart ;
lock . size = * psize ;
lock . fnum = fsp - > fnum ;
lock . lock_type = * plock_type ;
lock . lock_flav = lock_flav ;
/* Make sure existing locks don't conflict */
for ( i = 0 ; i < br_lck - > num_locks ; i + + ) {
2006-07-15 04:05:47 +04:00
const struct lock_struct * exlock = & locks [ i ] ;
2007-10-19 04:40:25 +04:00
bool conflict = False ;
2006-04-10 19:33:04 +04:00
if ( exlock - > lock_flav = = WINDOWS_LOCK ) {
conflict = brl_conflict ( exlock , & lock ) ;
} else {
conflict = brl_conflict_posix ( exlock , & lock ) ;
}
if ( conflict ) {
2010-05-07 17:20:50 +04:00
* psmblctx = exlock - > context . smblctx ;
2006-04-10 19:33:04 +04:00
* pstart = exlock - > start ;
* psize = exlock - > size ;
* plock_type = exlock - > lock_type ;
return NT_STATUS_LOCK_NOT_GRANTED ;
}
}
/*
* There is no lock held by an SMB daemon , check to
* see if there is a POSIX lock from a UNIX or NFS process .
*/
2006-11-11 20:05:11 +03:00
if ( lp_posix_locking ( fsp - > conn - > params ) ) {
2007-10-19 04:40:25 +04:00
bool ret = is_posix_locked ( fsp , pstart , psize , plock_type , POSIX_LOCK ) ;
2006-04-10 19:33:04 +04:00
2012-06-14 14:14:32 +04:00
DEBUG ( 10 , ( " brl_lockquery: posix start=%.0f len=%.0f %s for %s file %s \n " ,
2006-04-10 19:33:04 +04:00
( double ) * pstart , ( double ) * psize , ret ? " locked " : " unlocked " ,
2012-06-14 14:14:32 +04:00
fsp_fnum_dbg ( fsp ) , fsp_str_dbg ( fsp ) ) ) ;
2006-04-10 19:33:04 +04:00
if ( ret ) {
2010-05-07 17:20:50 +04:00
/* Hmmm. No clue what to set smblctx to - use -1. */
* psmblctx = 0xFFFFFFFFFFFFFFFFLL ;
2006-04-10 19:33:04 +04:00
return NT_STATUS_LOCK_NOT_GRANTED ;
}
}
return NT_STATUS_OK ;
2000-01-13 15:09:36 +03:00
}
2000-01-14 07:32:57 +03:00
2009-07-24 04:28:58 +04:00
bool smb_vfs_call_brl_cancel_windows ( struct vfs_handle_struct * handle ,
struct byte_range_lock * br_lck ,
struct lock_struct * plock ,
struct blocking_lock_record * blr )
{
VFS_FIND ( brl_cancel_windows ) ;
2011-12-04 08:45:04 +04:00
return handle - > fns - > brl_cancel_windows_fn ( handle , br_lck , plock , blr ) ;
2009-07-24 04:28:58 +04:00
}
2000-01-14 07:32:57 +03:00
/****************************************************************************
2006-04-10 19:33:04 +04:00
Remove a particular pending lock .
2000-01-14 07:32:57 +03:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2007-10-19 04:40:25 +04:00
bool brl_lock_cancel ( struct byte_range_lock * br_lck ,
2010-05-07 17:20:50 +04:00
uint64_t smblctx ,
2007-05-07 13:35:35 +04:00
struct server_id pid ,
2006-04-10 19:33:04 +04:00
br_off start ,
br_off size ,
2009-02-10 08:51:29 +03:00
enum brl_flavour lock_flav ,
struct blocking_lock_record * blr )
{
bool ret ;
struct lock_struct lock ;
2010-05-07 17:20:50 +04:00
lock . context . smblctx = smblctx ;
2009-02-10 08:51:29 +03:00
lock . context . pid = pid ;
lock . context . tid = br_lck - > fsp - > conn - > cnum ;
lock . start = start ;
lock . size = size ;
lock . fnum = br_lck - > fsp - > fnum ;
lock . lock_flav = lock_flav ;
/* lock.lock_type doesn't matter */
if ( lock_flav = = WINDOWS_LOCK ) {
ret = SMB_VFS_BRL_CANCEL_WINDOWS ( br_lck - > fsp - > conn , br_lck ,
& lock , blr ) ;
} else {
ret = brl_lock_cancel_default ( br_lck , & lock ) ;
}
return ret ;
}
bool brl_lock_cancel_default ( struct byte_range_lock * br_lck ,
struct lock_struct * plock )
2000-01-14 07:32:57 +03:00
{
2006-04-10 19:33:04 +04:00
unsigned int i ;
2007-05-06 00:43:06 +04:00
struct lock_struct * locks = br_lck - > lock_data ;
2006-04-10 19:33:04 +04:00
2009-02-10 08:51:29 +03:00
SMB_ASSERT ( plock ) ;
2006-04-10 19:33:04 +04:00
for ( i = 0 ; i < br_lck - > num_locks ; i + + ) {
struct lock_struct * lock = & locks [ i ] ;
/* For pending locks we *always* care about the fnum. */
2009-02-10 08:51:29 +03:00
if ( brl_same_context ( & lock - > context , & plock - > context ) & &
lock - > fnum = = plock - > fnum & &
2006-07-29 23:14:24 +04:00
IS_PENDING_LOCK ( lock - > lock_type ) & &
2009-02-10 08:51:29 +03:00
lock - > lock_flav = = plock - > lock_flav & &
lock - > start = = plock - > start & &
lock - > size = = plock - > size ) {
2006-04-10 19:33:04 +04:00
break ;
}
}
2000-01-14 07:32:57 +03:00
2006-04-10 19:33:04 +04:00
if ( i = = br_lck - > num_locks ) {
/* Didn't find it. */
return False ;
}
2000-01-14 07:32:57 +03:00
2006-04-10 19:33:04 +04:00
if ( i < br_lck - > num_locks - 1 ) {
/* Found this particular pending lock - delete it */
memmove ( & locks [ i ] , & locks [ i + 1 ] ,
sizeof ( * locks ) * ( ( br_lck - > num_locks - 1 ) - i ) ) ;
}
2000-01-14 07:32:57 +03:00
2006-04-10 19:33:04 +04:00
br_lck - > num_locks - = 1 ;
br_lck - > modified = True ;
return True ;
}
2000-01-14 07:32:57 +03:00
2006-04-10 19:33:04 +04:00
/****************************************************************************
Remove any locks associated with a open file .
2006-07-11 22:01:26 +04:00
We return True if this process owns any other Windows locks on this
fd and so we should not immediately close the fd .
2006-04-10 19:33:04 +04:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2007-05-14 17:01:28 +04:00
void brl_close_fnum ( struct messaging_context * msg_ctx ,
struct byte_range_lock * br_lck )
2006-04-10 19:33:04 +04:00
{
files_struct * fsp = br_lck - > fsp ;
2012-06-06 17:28:14 +04:00
uint32_t tid = fsp - > conn - > cnum ;
2012-06-28 11:54:41 +04:00
uint64_t fnum = fsp - > fnum ;
2011-07-16 03:11:07 +04:00
unsigned int i ;
2007-05-06 00:43:06 +04:00
struct lock_struct * locks = br_lck - > lock_data ;
2011-12-15 14:50:43 +04:00
struct server_id pid = messaging_server_id ( fsp - > conn - > sconn - > msg_ctx ) ;
2011-07-16 03:11:07 +04:00
struct lock_struct * locks_copy ;
unsigned int num_locks_copy ;
/* Copy the current lock array. */
if ( br_lck - > num_locks ) {
locks_copy = ( struct lock_struct * ) talloc_memdup ( br_lck , locks , br_lck - > num_locks * sizeof ( struct lock_struct ) ) ;
if ( ! locks_copy ) {
smb_panic ( " brl_close_fnum: talloc failed " ) ;
2006-07-11 22:01:26 +04:00
}
2012-09-04 13:56:15 +04:00
} else {
2011-07-16 03:11:07 +04:00
locks_copy = NULL ;
2006-07-11 22:01:26 +04:00
}
2011-07-16 03:11:07 +04:00
num_locks_copy = br_lck - > num_locks ;
2006-07-11 22:01:26 +04:00
2011-07-16 03:11:07 +04:00
for ( i = 0 ; i < num_locks_copy ; i + + ) {
struct lock_struct * lock = & locks_copy [ i ] ;
2003-02-27 04:04:34 +03:00
2012-06-16 02:26:26 +04:00
if ( lock - > context . tid = = tid & & serverid_equal ( & lock - > context . pid , & pid ) & &
2011-07-16 03:11:07 +04:00
( lock - > fnum = = fnum ) ) {
brl_unlock ( msg_ctx ,
br_lck ,
lock - > context . smblctx ,
pid ,
lock - > start ,
lock - > size ,
lock - > lock_flav ) ;
2006-04-10 19:33:04 +04:00
}
2006-07-12 20:32:02 +04:00
}
2000-01-14 07:32:57 +03:00
}
2000-01-16 14:14:44 +03:00
2012-06-30 23:48:43 +04:00
bool brl_mark_disconnected ( struct files_struct * fsp )
{
uint32_t tid = fsp - > conn - > cnum ;
uint64_t smblctx = fsp - > op - > global - > open_persistent_id ;
uint64_t fnum = fsp - > fnum ;
unsigned int i ;
struct server_id self = messaging_server_id ( fsp - > conn - > sconn - > msg_ctx ) ;
struct byte_range_lock * br_lck = NULL ;
if ( ! fsp - > op - > global - > durable ) {
return false ;
}
if ( fsp - > current_lock_count = = 0 ) {
return true ;
}
br_lck = brl_get_locks ( talloc_tos ( ) , fsp ) ;
if ( br_lck = = NULL ) {
return false ;
}
for ( i = 0 ; i < br_lck - > num_locks ; i + + ) {
struct lock_struct * lock = & br_lck - > lock_data [ i ] ;
/*
* as this is a durable handle , we only expect locks
* of the current file handle !
*/
if ( lock - > context . smblctx ! = smblctx ) {
TALLOC_FREE ( br_lck ) ;
return false ;
}
if ( lock - > context . tid ! = tid ) {
TALLOC_FREE ( br_lck ) ;
return false ;
}
if ( ! serverid_equal ( & lock - > context . pid , & self ) ) {
TALLOC_FREE ( br_lck ) ;
return false ;
}
if ( lock - > fnum ! = fnum ) {
TALLOC_FREE ( br_lck ) ;
return false ;
}
server_id_set_disconnected ( & lock - > context . pid ) ;
lock - > context . tid = TID_FIELD_INVALID ;
lock - > fnum = FNUM_FIELD_INVALID ;
}
br_lck - > modified = true ;
TALLOC_FREE ( br_lck ) ;
return true ;
}
bool brl_reconnect_disconnected ( struct files_struct * fsp )
{
uint32_t tid = fsp - > conn - > cnum ;
uint64_t smblctx = fsp - > op - > global - > open_persistent_id ;
uint64_t fnum = fsp - > fnum ;
unsigned int i ;
struct server_id self = messaging_server_id ( fsp - > conn - > sconn - > msg_ctx ) ;
struct byte_range_lock * br_lck = NULL ;
if ( ! fsp - > op - > global - > durable ) {
return false ;
}
/* we want to validate ourself */
fsp - > lockdb_clean = true ;
br_lck = brl_get_locks ( talloc_tos ( ) , fsp ) ;
if ( br_lck = = NULL ) {
return false ;
}
if ( br_lck - > num_locks = = 0 ) {
TALLOC_FREE ( br_lck ) ;
return true ;
}
for ( i = 0 ; i < br_lck - > num_locks ; i + + ) {
struct lock_struct * lock = & br_lck - > lock_data [ i ] ;
/*
* as this is a durable handle we only expect locks
* of the current file handle !
*/
if ( lock - > context . smblctx ! = smblctx ) {
TALLOC_FREE ( br_lck ) ;
return false ;
}
if ( lock - > context . tid ! = TID_FIELD_INVALID ) {
TALLOC_FREE ( br_lck ) ;
return false ;
}
if ( ! server_id_is_disconnected ( & lock - > context . pid ) ) {
TALLOC_FREE ( br_lck ) ;
return false ;
}
if ( lock - > fnum ! = FNUM_FIELD_INVALID ) {
TALLOC_FREE ( br_lck ) ;
return false ;
}
lock - > context . pid = self ;
lock - > context . tid = tid ;
lock - > fnum = fnum ;
}
fsp - > current_lock_count = br_lck - > num_locks ;
br_lck - > modified = true ;
TALLOC_FREE ( br_lck ) ;
return true ;
}
2000-01-16 14:14:44 +03:00
/****************************************************************************
2006-05-03 20:07:21 +04:00
Ensure this set of lock entries is valid .
2000-01-16 14:14:44 +03:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2007-10-19 04:40:25 +04:00
static bool validate_lock_entries ( unsigned int * pnum_entries , struct lock_struct * * pplocks )
2000-01-16 14:14:44 +03:00
{
2006-05-03 06:14:09 +04:00
unsigned int i ;
unsigned int num_valid_entries = 0 ;
2006-05-03 20:07:21 +04:00
struct lock_struct * locks = * pplocks ;
2000-01-16 14:14:44 +03:00
2006-05-03 20:07:21 +04:00
for ( i = 0 ; i < * pnum_entries ; i + + ) {
2006-05-03 06:14:09 +04:00
struct lock_struct * lock_data = & locks [ i ] ;
s3: Fix a long-standing problem with recycled PIDs
When a samba server process dies hard, it has no chance to clean up its entries
in locking.tdb, brlock.tdb, connections.tdb and sessionid.tdb.
For locking.tdb and brlock.tdb Samba is robust by checking every time we read
an entry from the database if the corresponding process still exists. If it
does not exist anymore, the entry is deleted. This is not 100% failsafe though:
On systems with a limited PID space there is a non-zero chance that between the
smbd's death and the fresh access, the PID is recycled by another long-running
process. This renders all files that had been locked by the killed smbd
potentially unusable until the new process also dies.
This patch is supposed to fix the problem the following way: Every process ID
in every database is augmented by a random 64-bit number that is stored in a
serverid.tdb. Whenever we need to check if a process still exists we know its
PID and the 64-bit number. We look up the PID in serverid.tdb and compare the
64-bit number. If it's the same, the process still is a valid smbd holding the
lock. If it is different, a new smbd has taken over.
I believe this is safe against an smbd that has died hard and the PID has been
taken over by a non-samba process. This process would not have registered
itself with a fresh 64-bit number in serverid.tdb, so the old one still exists
in serverid.tdb. We protect against this case by the parent smbd taking care of
deregistering PIDs from serverid.tdb and the fact that serverid.tdb is
CLEAR_IF_FIRST.
CLEAR_IF_FIRST does not work in a cluster, so the automatic cleanup does not
work when all smbds are restarted. For this, "net serverid wipe" has to be run
before smbd starts up. As a convenience, "net serverid wipedbs" also cleans up
sessionid.tdb and connections.tdb.
While there, this also cleans up overloading connections.tdb with all the
process entries just for messaging_send_all().
Volker
2010-03-02 19:02:01 +03:00
if ( ! serverid_exists ( & lock_data - > context . pid ) ) {
2006-05-03 06:14:09 +04:00
/* This process no longer exists - mark this
entry as invalid by zeroing it . */
ZERO_STRUCTP ( lock_data ) ;
} else {
num_valid_entries + + ;
}
}
2006-05-03 20:07:21 +04:00
if ( num_valid_entries ! = * pnum_entries ) {
2006-05-03 06:14:09 +04:00
struct lock_struct * new_lock_data = NULL ;
if ( num_valid_entries ) {
new_lock_data = SMB_MALLOC_ARRAY ( struct lock_struct , num_valid_entries ) ;
if ( ! new_lock_data ) {
DEBUG ( 3 , ( " malloc fail \n " ) ) ;
2006-05-03 20:07:21 +04:00
return False ;
2006-05-03 06:14:09 +04:00
}
2006-05-03 20:07:21 +04:00
2006-05-03 06:14:09 +04:00
num_valid_entries = 0 ;
2006-05-03 20:07:21 +04:00
for ( i = 0 ; i < * pnum_entries ; i + + ) {
2006-05-03 06:14:09 +04:00
struct lock_struct * lock_data = & locks [ i ] ;
2010-05-07 17:20:50 +04:00
if ( lock_data - > context . smblctx & &
2006-05-03 06:14:09 +04:00
lock_data - > context . tid ) {
/* Valid (nonzero) entry - copy it. */
memcpy ( & new_lock_data [ num_valid_entries ] ,
lock_data , sizeof ( struct lock_struct ) ) ;
num_valid_entries + + ;
}
}
}
2006-05-03 20:07:21 +04:00
SAFE_FREE ( * pplocks ) ;
* pplocks = new_lock_data ;
* pnum_entries = num_valid_entries ;
}
return True ;
}
2007-05-29 17:26:44 +04:00
struct brl_forall_cb {
void ( * fn ) ( struct file_id id , struct server_id pid ,
enum brl_type lock_type ,
enum brl_flavour lock_flav ,
br_off start , br_off size ,
void * private_data ) ;
void * private_data ;
} ;
2006-05-03 20:07:21 +04:00
/****************************************************************************
Traverse the whole database with this function , calling traverse_callback
on each lock .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2012-08-08 19:46:59 +04:00
static int brl_traverse_fn ( struct db_record * rec , void * state )
2006-05-03 20:07:21 +04:00
{
2007-05-29 17:26:44 +04:00
struct brl_forall_cb * cb = ( struct brl_forall_cb * ) state ;
2006-05-03 20:07:21 +04:00
struct lock_struct * locks ;
2007-05-29 13:30:34 +04:00
struct file_id * key ;
2006-05-03 20:07:21 +04:00
unsigned int i ;
unsigned int num_locks = 0 ;
unsigned int orig_num_locks = 0 ;
2011-08-17 12:53:58 +04:00
TDB_DATA dbkey ;
TDB_DATA value ;
dbkey = dbwrap_record_get_key ( rec ) ;
value = dbwrap_record_get_value ( rec ) ;
2006-05-03 20:07:21 +04:00
/* In a traverse function we must make a copy of
dbuf before modifying it . */
2011-08-17 12:53:58 +04:00
locks = ( struct lock_struct * ) memdup ( value . dptr , value . dsize ) ;
2006-05-03 20:07:21 +04:00
if ( ! locks ) {
return - 1 ; /* Terminate traversal. */
}
2011-08-17 12:53:58 +04:00
key = ( struct file_id * ) dbkey . dptr ;
orig_num_locks = num_locks = value . dsize / sizeof ( * locks ) ;
2006-05-03 20:07:21 +04:00
/* Ensure the lock db is clean of entries from invalid processes. */
if ( ! validate_lock_entries ( & num_locks , & locks ) ) {
SAFE_FREE ( locks ) ;
return - 1 ; /* Terminate traversal */
}
if ( orig_num_locks ! = num_locks ) {
2007-05-27 21:12:08 +04:00
if ( num_locks ) {
TDB_DATA data ;
data . dptr = ( uint8_t * ) locks ;
data . dsize = num_locks * sizeof ( struct lock_struct ) ;
2011-08-17 12:53:58 +04:00
dbwrap_record_store ( rec , data , TDB_REPLACE ) ;
2006-05-03 06:14:09 +04:00
} else {
2011-08-17 12:53:58 +04:00
dbwrap_record_delete ( rec ) ;
2006-05-03 06:14:09 +04:00
}
}
2008-01-16 12:09:48 +03:00
if ( cb - > fn ) {
for ( i = 0 ; i < num_locks ; i + + ) {
cb - > fn ( * key ,
locks [ i ] . context . pid ,
locks [ i ] . lock_type ,
locks [ i ] . lock_flav ,
locks [ i ] . start ,
locks [ i ] . size ,
cb - > private_data ) ;
}
2000-01-16 14:14:44 +03:00
}
2006-05-03 20:07:21 +04:00
SAFE_FREE ( locks ) ;
2000-01-16 14:14:44 +03:00
return 0 ;
}
/*******************************************************************
2000-04-26 00:30:58 +04:00
Call the specified function on each lock in the database .
2000-01-16 14:14:44 +03:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2000-04-26 00:30:58 +04:00
2007-05-29 17:26:44 +04:00
int brl_forall ( void ( * fn ) ( struct file_id id , struct server_id pid ,
enum brl_type lock_type ,
enum brl_flavour lock_flav ,
br_off start , br_off size ,
void * private_data ) ,
void * private_data )
2000-01-16 14:14:44 +03:00
{
2007-05-29 17:26:44 +04:00
struct brl_forall_cb cb ;
2011-08-17 12:53:58 +04:00
NTSTATUS status ;
int count = 0 ;
2007-05-29 17:26:44 +04:00
2007-05-27 14:35:14 +04:00
if ( ! brlock_db ) {
2006-04-10 19:33:04 +04:00
return 0 ;
}
2007-05-29 17:26:44 +04:00
cb . fn = fn ;
cb . private_data = private_data ;
2012-08-08 19:46:59 +04:00
status = dbwrap_traverse ( brlock_db , brl_traverse_fn , & cb , & count ) ;
2011-08-17 12:53:58 +04:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
return - 1 ;
} else {
return count ;
}
2000-01-16 14:14:44 +03:00
}
2006-04-10 19:33:04 +04:00
/*******************************************************************
Store a potentially modified set of byte range lock data back into
the database .
Unlock the record .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2011-03-17 12:04:12 +03:00
static void byte_range_lock_flush ( struct byte_range_lock * br_lck )
2006-04-10 19:33:04 +04:00
{
2006-08-01 00:58:02 +04:00
if ( br_lck - > read_only ) {
SMB_ASSERT ( ! br_lck - > modified ) ;
}
2006-04-10 19:33:04 +04:00
if ( ! br_lck - > modified ) {
goto done ;
}
if ( br_lck - > num_locks = = 0 ) {
/* No locks - delete this entry. */
2011-08-17 12:53:58 +04:00
NTSTATUS status = dbwrap_record_delete ( br_lck - > record ) ;
2007-05-27 14:35:14 +04:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
DEBUG ( 0 , ( " delete_rec returned %s \n " ,
nt_errstr ( status ) ) ) ;
2007-06-16 01:58:49 +04:00
smb_panic ( " Could not delete byte range lock entry " ) ;
2006-04-10 19:33:04 +04:00
}
} else {
TDB_DATA data ;
2007-05-27 14:35:14 +04:00
NTSTATUS status ;
2007-03-29 13:35:51 +04:00
data . dptr = ( uint8 * ) br_lck - > lock_data ;
2006-04-10 19:33:04 +04:00
data . dsize = br_lck - > num_locks * sizeof ( struct lock_struct ) ;
2011-08-17 12:53:58 +04:00
status = dbwrap_record_store ( br_lck - > record , data , TDB_REPLACE ) ;
2007-05-27 14:35:14 +04:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
DEBUG ( 0 , ( " store returned %s \n " , nt_errstr ( status ) ) ) ;
2007-06-16 01:58:49 +04:00
smb_panic ( " Could not store byte range mode entry " ) ;
2006-04-10 19:33:04 +04:00
}
}
done :
2011-03-17 12:04:12 +03:00
br_lck - > read_only = true ;
br_lck - > modified = false ;
2007-05-27 14:35:14 +04:00
TALLOC_FREE ( br_lck - > record ) ;
2011-03-17 12:04:12 +03:00
}
static int byte_range_lock_destructor ( struct byte_range_lock * br_lck )
{
byte_range_lock_flush ( br_lck ) ;
SAFE_FREE ( br_lck - > lock_data ) ;
2006-04-10 19:33:04 +04:00
return 0 ;
}
/*******************************************************************
Fetch a set of byte range lock data from the database .
Leave the record locked .
2006-07-11 22:01:26 +04:00
TALLOC_FREE ( brl ) will release the lock in the destructor .
2006-04-10 19:33:04 +04:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2006-08-01 00:58:02 +04:00
static struct byte_range_lock * brl_get_locks_internal ( TALLOC_CTX * mem_ctx ,
2007-10-19 04:40:25 +04:00
files_struct * fsp , bool read_only )
2006-04-10 19:33:04 +04:00
{
2007-05-27 14:35:14 +04:00
TDB_DATA key , data ;
2011-06-07 05:38:41 +04:00
struct byte_range_lock * br_lck = talloc ( mem_ctx , struct byte_range_lock ) ;
2011-03-17 12:04:12 +03:00
bool do_read_only = read_only ;
2006-04-10 19:33:04 +04:00
if ( br_lck = = NULL ) {
return NULL ;
}
br_lck - > fsp = fsp ;
br_lck - > num_locks = 0 ;
br_lck - > modified = False ;
2007-05-29 13:30:34 +04:00
br_lck - > key = fsp - > file_id ;
2006-04-13 03:00:58 +04:00
2007-03-29 13:35:51 +04:00
key . dptr = ( uint8 * ) & br_lck - > key ;
2007-05-29 13:30:34 +04:00
key . dsize = sizeof ( struct file_id ) ;
2006-04-10 19:33:04 +04:00
2006-08-01 00:58:02 +04:00
if ( ! fsp - > lockdb_clean ) {
/* We must be read/write to clean
the dead entries . */
2011-03-17 12:04:12 +03:00
do_read_only = false ;
2006-08-01 00:58:02 +04:00
}
2011-03-17 12:04:12 +03:00
if ( do_read_only ) {
2011-08-17 12:53:58 +04:00
NTSTATUS status ;
status = dbwrap_fetch ( brlock_db , br_lck , key , & data ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
2007-05-27 14:35:14 +04:00
DEBUG ( 3 , ( " Could not fetch byte range lock record \n " ) ) ;
TALLOC_FREE ( br_lck ) ;
return NULL ;
}
br_lck - > record = NULL ;
2011-03-17 12:04:12 +03:00
} else {
2011-08-17 12:53:58 +04:00
br_lck - > record = dbwrap_fetch_locked ( brlock_db , br_lck , key ) ;
2007-05-27 14:35:14 +04:00
if ( br_lck - > record = = NULL ) {
2006-08-01 00:58:02 +04:00
DEBUG ( 3 , ( " Could not lock byte range lock entry \n " ) ) ;
TALLOC_FREE ( br_lck ) ;
return NULL ;
}
2007-05-27 14:35:14 +04:00
2011-08-17 12:53:58 +04:00
data = dbwrap_record_get_value ( br_lck - > record ) ;
2006-04-10 19:33:04 +04:00
}
2011-03-17 12:04:12 +03:00
br_lck - > read_only = do_read_only ;
2007-07-16 12:23:20 +04:00
br_lck - > lock_data = NULL ;
2007-05-27 14:35:14 +04:00
2006-07-11 22:01:26 +04:00
talloc_set_destructor ( br_lck , byte_range_lock_destructor ) ;
2006-04-10 19:33:04 +04:00
br_lck - > num_locks = data . dsize / sizeof ( struct lock_struct ) ;
2007-07-09 11:51:39 +04:00
2007-07-16 11:40:30 +04:00
if ( br_lck - > num_locks ! = 0 ) {
br_lck - > lock_data = SMB_MALLOC_ARRAY ( struct lock_struct ,
br_lck - > num_locks ) ;
if ( br_lck - > lock_data = = NULL ) {
DEBUG ( 0 , ( " malloc failed \n " ) ) ;
TALLOC_FREE ( br_lck ) ;
return NULL ;
}
memcpy ( br_lck - > lock_data , data . dptr , data . dsize ) ;
}
2011-05-28 12:24:20 +04:00
2006-05-03 06:14:09 +04:00
if ( ! fsp - > lockdb_clean ) {
2007-06-16 05:04:22 +04:00
int orig_num_locks = br_lck - > num_locks ;
2006-05-03 06:14:09 +04:00
/* This is the first time we've accessed this. */
/* Go through and ensure all entries exist - remove any that don't. */
/* Makes the lockdb self cleaning at low cost. */
2007-05-06 00:43:06 +04:00
if ( ! validate_lock_entries ( & br_lck - > num_locks ,
& br_lck - > lock_data ) ) {
2006-05-03 06:14:09 +04:00
SAFE_FREE ( br_lck - > lock_data ) ;
2006-07-11 22:01:26 +04:00
TALLOC_FREE ( br_lck ) ;
2006-05-03 20:07:21 +04:00
return NULL ;
2006-05-03 06:14:09 +04:00
}
2007-06-16 05:04:22 +04:00
/* Ensure invalid locks are cleaned up in the destructor. */
if ( orig_num_locks ! = br_lck - > num_locks ) {
br_lck - > modified = True ;
}
2006-05-03 06:14:09 +04:00
/* Mark the lockdb as "clean" as seen from this open file. */
fsp - > lockdb_clean = True ;
}
2006-04-10 19:33:04 +04:00
if ( DEBUGLEVEL > = 10 ) {
unsigned int i ;
2007-05-06 00:43:06 +04:00
struct lock_struct * locks = br_lck - > lock_data ;
2007-05-29 13:30:34 +04:00
DEBUG ( 10 , ( " brl_get_locks_internal: %u current locks on file_id %s \n " ,
2006-04-10 19:33:04 +04:00
br_lck - > num_locks ,
2007-09-10 14:56:07 +04:00
file_id_string_tos ( & fsp - > file_id ) ) ) ;
2006-04-10 19:33:04 +04:00
for ( i = 0 ; i < br_lck - > num_locks ; i + + ) {
print_lock_struct ( i , & locks [ i ] ) ;
}
}
2011-03-17 12:04:12 +03:00
if ( do_read_only ! = read_only ) {
/*
* this stores the record and gets rid of
* the write lock that is needed for a cleanup
*/
byte_range_lock_flush ( br_lck ) ;
}
2006-04-10 19:33:04 +04:00
return br_lck ;
}
2006-08-01 00:58:02 +04:00
struct byte_range_lock * brl_get_locks ( TALLOC_CTX * mem_ctx ,
files_struct * fsp )
{
return brl_get_locks_internal ( mem_ctx , fsp , False ) ;
}
2009-11-16 11:40:47 +03:00
struct byte_range_lock * brl_get_locks_readonly ( files_struct * fsp )
2006-08-01 00:58:02 +04:00
{
2009-11-16 11:40:47 +03:00
struct byte_range_lock * br_lock ;
if ( lp_clustering ( ) ) {
return brl_get_locks_internal ( talloc_tos ( ) , fsp , true ) ;
}
if ( ( fsp - > brlock_rec ! = NULL )
2011-08-17 12:53:58 +04:00
& & ( dbwrap_get_seqnum ( brlock_db ) = = fsp - > brlock_seqnum ) ) {
2009-11-16 11:40:47 +03:00
return fsp - > brlock_rec ;
}
TALLOC_FREE ( fsp - > brlock_rec ) ;
2011-03-17 12:08:56 +03:00
br_lock = brl_get_locks_internal ( talloc_tos ( ) , fsp , true ) ;
2009-11-16 11:40:47 +03:00
if ( br_lock = = NULL ) {
return NULL ;
}
2011-08-17 12:53:58 +04:00
fsp - > brlock_seqnum = dbwrap_get_seqnum ( brlock_db ) ;
2009-11-16 11:40:47 +03:00
2011-03-17 12:08:56 +03:00
fsp - > brlock_rec = talloc_move ( fsp , & br_lock ) ;
2009-11-16 11:40:47 +03:00
return fsp - > brlock_rec ;
2006-08-01 00:58:02 +04:00
}
2007-05-29 18:49:19 +04:00
struct brl_revalidate_state {
ssize_t array_size ;
uint32 num_pids ;
struct server_id * pids ;
} ;
/*
* Collect PIDs of all processes with pending entries
*/
static void brl_revalidate_collect ( struct file_id id , struct server_id pid ,
enum brl_type lock_type ,
enum brl_flavour lock_flav ,
br_off start , br_off size ,
void * private_data )
{
struct brl_revalidate_state * state =
( struct brl_revalidate_state * ) private_data ;
if ( ! IS_PENDING_LOCK ( lock_type ) ) {
return ;
}
add_to_large_array ( state , sizeof ( pid ) , ( void * ) & pid ,
& state - > pids , & state - > num_pids ,
& state - > array_size ) ;
}
/*
* qsort callback to sort the processes
*/
static int compare_procids ( const void * p1 , const void * p2 )
{
2011-05-06 01:22:11 +04:00
const struct server_id * i1 = ( const struct server_id * ) p1 ;
const struct server_id * i2 = ( const struct server_id * ) p2 ;
2007-05-29 18:49:19 +04:00
if ( i1 - > pid < i2 - > pid ) return - 1 ;
if ( i2 - > pid > i2 - > pid ) return 1 ;
return 0 ;
}
/*
* Send a MSG_SMB_UNLOCK message to all processes with pending byte range
* locks so that they retry . Mainly used in the cluster code after a node has
* died .
*
* Done in two steps to avoid double - sends : First we collect all entries in an
* array , then qsort that array and only send to non - dupes .
*/
2011-12-13 12:24:31 +04:00
void brl_revalidate ( struct messaging_context * msg_ctx ,
void * private_data ,
uint32_t msg_type ,
struct server_id server_id ,
DATA_BLOB * data )
2007-05-29 18:49:19 +04:00
{
struct brl_revalidate_state * state ;
uint32 i ;
struct server_id last_pid ;
2011-06-07 05:44:43 +04:00
if ( ! ( state = talloc_zero ( NULL , struct brl_revalidate_state ) ) ) {
2007-05-29 18:49:19 +04:00
DEBUG ( 0 , ( " talloc failed \n " ) ) ;
return ;
}
brl_forall ( brl_revalidate_collect , state ) ;
if ( state - > array_size = = - 1 ) {
DEBUG ( 0 , ( " talloc failed \n " ) ) ;
goto done ;
}
if ( state - > num_pids = = 0 ) {
goto done ;
}
2010-02-14 02:02:35 +03:00
TYPESAFE_QSORT ( state - > pids , state - > num_pids , compare_procids ) ;
2007-05-29 18:49:19 +04:00
ZERO_STRUCT ( last_pid ) ;
for ( i = 0 ; i < state - > num_pids ; i + + ) {
2012-06-16 02:26:26 +04:00
if ( serverid_equal ( & last_pid , & state - > pids [ i ] ) ) {
2007-05-29 18:49:19 +04:00
/*
* We ' ve seen that one already
*/
continue ;
}
messaging_send ( msg_ctx , state - > pids [ i ] , MSG_SMB_UNLOCK ,
& data_blob_null ) ;
last_pid = state - > pids [ i ] ;
}
done :
TALLOC_FREE ( state ) ;
return ;
}