2009-10-25 18:12:12 +03:00
/*
Unix SMB / CIFS implementation .
global locks based on dbwrap and messaging
Copyright ( C ) 2009 by Volker Lendecke
This program is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation ; either version 3 of the License , or
( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
2019-10-10 17:30:14 +03:00
# include "replace.h"
2011-02-26 01:20:06 +03:00
# include "system/filesys.h"
2017-01-01 23:00:55 +03:00
# include "lib/util/server_id.h"
2019-10-10 17:30:14 +03:00
# include "lib/util/debug.h"
# include "lib/util/talloc_stack.h"
# include "lib/util/samba_util.h"
# include "lib/util_path.h"
2011-07-07 19:42:08 +04:00
# include "dbwrap/dbwrap.h"
2011-07-06 18:40:21 +04:00
# include "dbwrap/dbwrap_open.h"
2012-02-15 19:38:43 +04:00
# include "dbwrap/dbwrap_watch.h"
2009-10-25 18:12:12 +03:00
# include "g_lock.h"
2011-05-05 13:25:29 +04:00
# include "util_tdb.h"
2012-02-15 19:38:43 +04:00
# include "../lib/util/tevent_ntstatus.h"
2011-03-24 17:31:06 +03:00
# include "messages.h"
2012-08-22 14:35:29 +04:00
# include "serverid.h"
2009-10-25 18:12:12 +03:00
struct g_lock_ctx {
struct db_context * db ;
struct messaging_context * msg ;
} ;
2017-06-28 14:36:53 +03:00
struct g_lock {
2019-11-19 19:29:18 +03:00
struct server_id exclusive ;
size_t num_shared ;
uint8_t * shared ;
2019-10-30 18:12:11 +03:00
uint64_t data_seqnum ;
2017-06-28 14:36:53 +03:00
size_t datalen ;
2019-11-19 19:29:18 +03:00
uint8_t * data ;
2017-06-28 14:36:53 +03:00
} ;
static bool g_lock_parse ( uint8_t * buf , size_t buflen , struct g_lock * lck )
{
2019-11-19 19:29:18 +03:00
struct server_id exclusive ;
size_t num_shared , shared_len ;
2019-10-30 18:12:11 +03:00
uint64_t data_seqnum ;
if ( buflen < ( SERVER_ID_BUF_LENGTH + /* exclusive */
sizeof ( uint64_t ) + /* seqnum */
sizeof ( uint32_t ) ) ) { /* num_shared */
struct g_lock ret = { . exclusive . pid = 0 } ;
generate_random_buffer (
( uint8_t * ) & ret . data_seqnum ,
sizeof ( ret . data_seqnum ) ) ;
* lck = ret ;
2017-06-28 14:36:53 +03:00
return true ;
}
2019-11-19 19:29:18 +03:00
server_id_get ( & exclusive , buf ) ;
buf + = SERVER_ID_BUF_LENGTH ;
buflen - = SERVER_ID_BUF_LENGTH ;
2017-06-28 14:36:53 +03:00
2019-10-30 18:12:11 +03:00
data_seqnum = BVAL ( buf , 0 ) ;
buf + = sizeof ( uint64_t ) ;
buflen - = sizeof ( uint64_t ) ;
2019-11-19 19:29:18 +03:00
num_shared = IVAL ( buf , 0 ) ;
2017-10-26 10:43:56 +03:00
buf + = sizeof ( uint32_t ) ;
buflen - = sizeof ( uint32_t ) ;
2019-11-19 19:29:18 +03:00
if ( num_shared > buflen / SERVER_ID_BUF_LENGTH ) {
2019-10-30 18:12:11 +03:00
DBG_DEBUG ( " num_shared=%zu, buflen=%zu \n " ,
num_shared ,
buflen ) ;
2017-06-28 14:36:53 +03:00
return false ;
}
2019-11-19 19:29:18 +03:00
shared_len = num_shared * SERVER_ID_BUF_LENGTH ;
2017-06-28 14:36:53 +03:00
* lck = ( struct g_lock ) {
2019-11-19 19:29:18 +03:00
. exclusive = exclusive ,
. num_shared = num_shared ,
. shared = buf ,
2019-10-30 18:12:11 +03:00
. data_seqnum = data_seqnum ,
2019-11-19 19:29:18 +03:00
. datalen = buflen - shared_len ,
. data = buf + shared_len ,
2017-06-28 14:36:53 +03:00
} ;
return true ;
}
2019-11-19 19:29:18 +03:00
static void g_lock_get_shared ( const struct g_lock * lck ,
size_t i ,
struct server_id * shared )
2017-06-28 14:36:53 +03:00
{
2019-11-19 19:29:18 +03:00
if ( i > = lck - > num_shared ) {
2017-06-28 14:36:53 +03:00
abort ( ) ;
}
2019-11-19 19:29:18 +03:00
server_id_get ( shared , lck - > shared + i * SERVER_ID_BUF_LENGTH ) ;
2017-06-28 14:36:53 +03:00
}
2019-11-19 19:29:18 +03:00
static void g_lock_del_shared ( struct g_lock * lck , size_t i )
2017-06-28 14:36:53 +03:00
{
2019-11-19 19:29:18 +03:00
if ( i > = lck - > num_shared ) {
2017-06-28 14:36:53 +03:00
abort ( ) ;
}
2019-11-19 19:29:18 +03:00
lck - > num_shared - = 1 ;
if ( i < lck - > num_shared ) {
memcpy ( lck - > shared + i * SERVER_ID_BUF_LENGTH ,
lck - > shared + lck - > num_shared * SERVER_ID_BUF_LENGTH ,
SERVER_ID_BUF_LENGTH ) ;
2017-06-28 14:36:53 +03:00
}
}
2019-11-19 19:29:18 +03:00
static NTSTATUS g_lock_store (
struct db_record * rec ,
struct g_lock * lck ,
2020-04-29 16:28:03 +03:00
struct server_id * new_shared ,
const TDB_DATA * new_dbufs ,
size_t num_new_dbufs )
2017-06-28 14:36:53 +03:00
{
2019-11-19 19:29:18 +03:00
uint8_t exclusive [ SERVER_ID_BUF_LENGTH ] ;
2019-10-30 18:12:11 +03:00
uint8_t seqnum_buf [ sizeof ( uint64_t ) ] ;
2019-11-19 19:29:18 +03:00
uint8_t sizebuf [ sizeof ( uint32_t ) ] ;
2019-10-30 18:12:11 +03:00
uint8_t new_shared_buf [ SERVER_ID_BUF_LENGTH ] ;
2017-06-28 14:36:53 +03:00
2020-04-29 16:28:03 +03:00
struct TDB_DATA dbufs [ 6 + num_new_dbufs ] ;
dbufs [ 0 ] = ( TDB_DATA ) {
. dptr = exclusive , . dsize = sizeof ( exclusive ) ,
} ;
dbufs [ 1 ] = ( TDB_DATA ) {
. dptr = seqnum_buf , . dsize = sizeof ( seqnum_buf ) ,
} ;
dbufs [ 2 ] = ( TDB_DATA ) {
. dptr = sizebuf , . dsize = sizeof ( sizebuf ) ,
} ;
dbufs [ 3 ] = ( TDB_DATA ) {
. dptr = lck - > shared ,
. dsize = lck - > num_shared * SERVER_ID_BUF_LENGTH ,
2017-06-28 14:36:53 +03:00
} ;
2020-04-29 16:28:03 +03:00
dbufs [ 4 ] = ( TDB_DATA ) { 0 } ;
dbufs [ 5 ] = ( TDB_DATA ) {
. dptr = lck - > data , . dsize = lck - > datalen ,
} ;
if ( num_new_dbufs ! = 0 ) {
memcpy ( & dbufs [ 6 ] ,
new_dbufs ,
num_new_dbufs * sizeof ( TDB_DATA ) ) ;
}
2017-06-28 14:36:53 +03:00
2019-11-19 19:29:18 +03:00
server_id_put ( exclusive , lck - > exclusive ) ;
2019-10-30 18:12:11 +03:00
SBVAL ( seqnum_buf , 0 , lck - > data_seqnum ) ;
2019-11-19 19:29:18 +03:00
if ( new_shared ! = NULL ) {
if ( lck - > num_shared > = UINT32_MAX ) {
return NT_STATUS_BUFFER_OVERFLOW ;
}
2019-10-30 18:12:11 +03:00
server_id_put ( new_shared_buf , * new_shared ) ;
2017-06-28 14:36:53 +03:00
2019-10-30 18:12:11 +03:00
dbufs [ 4 ] = ( TDB_DATA ) {
. dptr = new_shared_buf ,
. dsize = sizeof ( new_shared_buf ) ,
2017-06-28 14:36:53 +03:00
} ;
2019-11-19 19:29:18 +03:00
lck - > num_shared + = 1 ;
2017-06-28 14:36:53 +03:00
}
2019-11-19 19:29:18 +03:00
SIVAL ( sizebuf , 0 , lck - > num_shared ) ;
2017-06-28 14:36:53 +03:00
return dbwrap_record_storev ( rec , dbufs , ARRAY_SIZE ( dbufs ) , 0 ) ;
}
2019-11-05 18:36:44 +03:00
struct g_lock_ctx * g_lock_ctx_init_backend (
TALLOC_CTX * mem_ctx ,
struct messaging_context * msg ,
struct db_context * * backend )
2009-10-25 18:12:12 +03:00
{
struct g_lock_ctx * result ;
result = talloc ( mem_ctx , struct g_lock_ctx ) ;
if ( result = = NULL ) {
return NULL ;
}
result - > msg = msg ;
2019-11-05 18:36:44 +03:00
result - > db = db_open_watched ( result , backend , msg ) ;
if ( result - > db = = NULL ) {
DBG_WARNING ( " db_open_watched failed \n " ) ;
2014-11-02 22:21:40 +03:00
TALLOC_FREE ( result ) ;
return NULL ;
}
2019-11-05 18:36:44 +03:00
return result ;
}
struct g_lock_ctx * g_lock_ctx_init ( TALLOC_CTX * mem_ctx ,
struct messaging_context * msg )
{
char * db_path = NULL ;
struct db_context * backend = NULL ;
struct g_lock_ctx * ctx = NULL ;
db_path = lock_path ( mem_ctx , " g_lock.tdb " ) ;
if ( db_path = = NULL ) {
return NULL ;
}
2014-11-02 22:21:40 +03:00
2019-11-05 18:36:44 +03:00
backend = db_open (
mem_ctx ,
db_path ,
0 ,
TDB_CLEAR_IF_FIRST | TDB_INCOMPATIBLE_HASH ,
O_RDWR | O_CREAT ,
0600 ,
DBWRAP_LOCK_ORDER_3 ,
DBWRAP_FLAG_NONE ) ;
2014-11-02 22:21:40 +03:00
TALLOC_FREE ( db_path ) ;
2016-07-13 08:26:52 +03:00
if ( backend = = NULL ) {
2018-08-16 12:34:36 +03:00
DBG_WARNING ( " Could not open g_lock.tdb \n " ) ;
2009-10-25 18:12:12 +03:00
return NULL ;
}
2016-07-13 08:26:52 +03:00
2019-11-05 18:36:44 +03:00
ctx = g_lock_ctx_init_backend ( mem_ctx , msg , & backend ) ;
return ctx ;
2009-10-25 18:12:12 +03:00
}
2019-11-19 19:29:18 +03:00
static NTSTATUS g_lock_cleanup_dead (
struct db_record * rec ,
struct g_lock * lck ,
struct server_id * dead_blocker )
2009-10-25 18:12:12 +03:00
{
2019-11-19 19:29:18 +03:00
bool modified = false ;
bool exclusive_died ;
NTSTATUS status = NT_STATUS_OK ;
struct server_id_buf tmp ;
if ( dead_blocker = = NULL ) {
return NT_STATUS_OK ;
2009-10-25 18:12:12 +03:00
}
2019-11-19 19:29:18 +03:00
exclusive_died = server_id_equal ( dead_blocker , & lck - > exclusive ) ;
if ( exclusive_died ) {
DBG_DEBUG ( " Exclusive holder %s died \n " ,
server_id_str_buf ( lck - > exclusive , & tmp ) ) ;
lck - > exclusive . pid = 0 ;
modified = true ;
}
if ( lck - > num_shared ! = 0 ) {
bool shared_died ;
struct server_id shared ;
g_lock_get_shared ( lck , 0 , & shared ) ;
shared_died = server_id_equal ( dead_blocker , & shared ) ;
if ( shared_died ) {
DBG_DEBUG ( " Shared holder %s died \n " ,
server_id_str_buf ( shared , & tmp ) ) ;
g_lock_del_shared ( lck , 0 ) ;
modified = true ;
}
}
if ( modified ) {
2020-04-29 16:28:03 +03:00
status = g_lock_store ( rec , lck , NULL , NULL , 0 ) ;
2019-11-19 19:29:18 +03:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " g_lock_store() failed: %s \n " ,
nt_errstr ( status ) ) ;
}
}
return status ;
}
static ssize_t g_lock_find_shared (
struct g_lock * lck ,
const struct server_id * self )
{
size_t i ;
for ( i = 0 ; i < lck - > num_shared ; i + + ) {
struct server_id shared ;
bool same ;
g_lock_get_shared ( lck , i , & shared ) ;
same = server_id_equal ( self , & shared ) ;
if ( same ) {
return i ;
}
}
return - 1 ;
2009-10-25 18:12:12 +03:00
}
2019-11-22 13:55:52 +03:00
static void g_lock_cleanup_shared ( struct g_lock * lck )
{
size_t i ;
struct server_id check ;
bool exists ;
if ( lck - > num_shared = = 0 ) {
return ;
}
/*
* Read locks can stay around forever if the process dies . Do
* a heuristic check for process existence : Check one random
* process for existence . Hopefully this will keep runaway
* read locks under control .
*/
i = generate_random ( ) % lck - > num_shared ;
g_lock_get_shared ( lck , i , & check ) ;
exists = serverid_exists ( & check ) ;
if ( ! exists ) {
struct server_id_buf tmp ;
DBG_DEBUG ( " Shared locker %s died -- removing \n " ,
server_id_str_buf ( check , & tmp ) ) ;
g_lock_del_shared ( lck , i ) ;
}
}
2019-11-19 19:29:18 +03:00
struct g_lock_lock_state {
struct tevent_context * ev ;
struct g_lock_ctx * ctx ;
TDB_DATA key ;
enum g_lock_type type ;
bool retry ;
} ;
struct g_lock_lock_fn_state {
struct g_lock_lock_state * req_state ;
struct server_id * dead_blocker ;
struct tevent_req * watch_req ;
NTSTATUS status ;
} ;
static int g_lock_lock_state_destructor ( struct g_lock_lock_state * s ) ;
2019-10-23 12:21:16 +03:00
static NTSTATUS g_lock_trylock (
struct db_record * rec ,
2019-11-19 19:29:18 +03:00
struct g_lock_lock_fn_state * state ,
2019-10-23 12:21:16 +03:00
TDB_DATA data ,
struct server_id * blocker )
2010-02-15 18:57:16 +03:00
{
2019-11-19 19:29:18 +03:00
struct g_lock_lock_state * req_state = state - > req_state ;
struct server_id self = messaging_server_id ( req_state - > ctx - > msg ) ;
enum g_lock_type type = req_state - > type ;
bool retry = req_state - > retry ;
struct g_lock lck = { . exclusive . pid = 0 } ;
struct server_id_buf tmp ;
2012-02-15 19:38:43 +04:00
NTSTATUS status ;
2017-06-28 20:39:33 +03:00
bool ok ;
2010-02-15 18:57:16 +03:00
2017-06-28 20:39:33 +03:00
ok = g_lock_parse ( data . dptr , data . dsize , & lck ) ;
if ( ! ok ) {
2019-11-19 19:29:18 +03:00
DBG_DEBUG ( " g_lock_parse failed \n " ) ;
2017-06-28 20:39:33 +03:00
return NT_STATUS_INTERNAL_DB_CORRUPTION ;
2012-02-15 19:38:43 +04:00
}
2010-02-15 18:57:16 +03:00
2019-11-19 19:29:18 +03:00
status = g_lock_cleanup_dead ( rec , & lck , state - > dead_blocker ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " g_lock_cleanup_dead() failed: %s \n " ,
nt_errstr ( status ) ) ;
return status ;
}
if ( lck . exclusive . pid ! = 0 ) {
bool self_exclusive = server_id_equal ( & self , & lck . exclusive ) ;
2017-06-28 20:39:33 +03:00
2019-11-19 19:29:18 +03:00
if ( ! self_exclusive ) {
2019-11-22 13:56:55 +03:00
bool exists = serverid_exists ( & lck . exclusive ) ;
2019-11-19 19:29:18 +03:00
if ( ! exists ) {
lck . exclusive = ( struct server_id ) { . pid = 0 } ;
goto noexclusive ;
}
2017-06-28 20:39:33 +03:00
2019-11-19 19:29:18 +03:00
DBG_DEBUG ( " %s has an exclusive lock \n " ,
server_id_str_buf ( lck . exclusive , & tmp ) ) ;
2017-05-22 18:05:57 +03:00
2019-11-19 19:29:18 +03:00
if ( type = = G_LOCK_DOWNGRADE ) {
struct server_id_buf tmp2 ;
DBG_DEBUG ( " %s: Trying to downgrade %s \n " ,
server_id_str_buf ( self , & tmp ) ,
server_id_str_buf (
lck . exclusive , & tmp2 ) ) ;
return NT_STATUS_NOT_LOCKED ;
}
if ( type = = G_LOCK_UPGRADE ) {
ssize_t shared_idx ;
shared_idx = g_lock_find_shared ( & lck , & self ) ;
if ( shared_idx = = - 1 ) {
DBG_DEBUG ( " Trying to upgrade %s "
" without "
" existing shared lock \n " ,
server_id_str_buf (
self , & tmp ) ) ;
return NT_STATUS_NOT_LOCKED ;
}
/*
* We ' re trying to upgrade , and the
* exlusive lock is taken by someone
* else . This means that someone else
* is waiting for us to give up our
* shared lock . If we now also wait
* for someone to give their shared
* lock , we will deadlock .
*/
DBG_DEBUG ( " Trying to upgrade %s while "
" someone else is also "
" trying to upgrade \n " ,
server_id_str_buf ( self , & tmp ) ) ;
return NT_STATUS_POSSIBLE_DEADLOCK ;
}
* blocker = lck . exclusive ;
return NT_STATUS_LOCK_NOT_GRANTED ;
2017-05-22 18:05:57 +03:00
}
2019-11-19 19:29:18 +03:00
if ( type = = G_LOCK_DOWNGRADE ) {
DBG_DEBUG ( " Downgrading %s from WRITE to READ \n " ,
server_id_str_buf ( self , & tmp ) ) ;
2017-12-20 10:25:19 +03:00
2019-11-19 19:29:18 +03:00
lck . exclusive = ( struct server_id ) { . pid = 0 } ;
goto do_shared ;
}
2017-06-28 20:39:33 +03:00
2019-11-19 19:29:18 +03:00
if ( ! retry ) {
DBG_DEBUG ( " %s already locked by self \n " ,
server_id_str_buf ( self , & tmp ) ) ;
return NT_STATUS_WAS_LOCKED ;
}
2017-05-19 17:57:00 +03:00
2019-11-19 19:29:18 +03:00
if ( lck . num_shared ! = 0 ) {
g_lock_get_shared ( & lck , 0 , blocker ) ;
2018-08-13 16:07:06 +03:00
2019-11-19 19:29:18 +03:00
DBG_DEBUG ( " Continue waiting for shared lock %s \n " ,
server_id_str_buf ( * blocker , & tmp ) ) ;
return NT_STATUS_LOCK_NOT_GRANTED ;
2009-10-25 18:12:12 +03:00
}
2019-11-19 19:29:18 +03:00
2019-12-20 18:20:00 +03:00
talloc_set_destructor ( req_state , NULL ) ;
2019-11-19 19:29:18 +03:00
/*
* Retry after a conflicting lock was released
*/
return NT_STATUS_OK ;
2018-08-13 16:07:06 +03:00
}
2019-11-19 19:29:18 +03:00
noexclusive :
2018-08-13 16:07:06 +03:00
2019-11-19 19:29:18 +03:00
if ( type = = G_LOCK_UPGRADE ) {
ssize_t shared_idx = g_lock_find_shared ( & lck , & self ) ;
2018-08-13 16:07:06 +03:00
2019-11-19 19:29:18 +03:00
if ( shared_idx = = - 1 ) {
DBG_DEBUG ( " Trying to upgrade %s without "
" existing shared lock \n " ,
server_id_str_buf ( self , & tmp ) ) ;
return NT_STATUS_NOT_LOCKED ;
}
2018-08-13 16:07:06 +03:00
2019-11-19 19:29:18 +03:00
g_lock_del_shared ( & lck , shared_idx ) ;
type = G_LOCK_WRITE ;
}
2017-05-19 17:57:00 +03:00
2019-11-19 19:29:18 +03:00
if ( type = = G_LOCK_WRITE ) {
ssize_t shared_idx = g_lock_find_shared ( & lck , & self ) ;
2009-10-25 18:12:12 +03:00
2019-11-19 19:29:18 +03:00
if ( shared_idx ! = - 1 ) {
DBG_DEBUG ( " Trying to writelock existing shared %s \n " ,
server_id_str_buf ( self , & tmp ) ) ;
return NT_STATUS_WAS_LOCKED ;
}
2012-08-22 14:35:29 +04:00
2019-11-19 19:29:18 +03:00
lck . exclusive = self ;
2012-02-15 19:38:43 +04:00
2020-04-29 16:28:03 +03:00
status = g_lock_store ( rec , & lck , NULL , NULL , 0 ) ;
2019-11-19 19:29:18 +03:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " g_lock_store() failed: %s \n " ,
nt_errstr ( status ) ) ;
return status ;
2009-10-25 18:12:12 +03:00
}
2019-11-19 19:29:18 +03:00
if ( lck . num_shared ! = 0 ) {
talloc_set_destructor (
req_state , g_lock_lock_state_destructor ) ;
2012-02-15 19:38:43 +04:00
2019-11-19 19:29:18 +03:00
g_lock_get_shared ( & lck , 0 , blocker ) ;
2017-12-20 10:41:09 +03:00
2019-11-19 19:29:18 +03:00
DBG_DEBUG ( " Waiting for %zu shared locks, "
" picking blocker %s \n " ,
lck . num_shared ,
server_id_str_buf ( * blocker , & tmp ) ) ;
2017-12-20 10:41:09 +03:00
2019-11-19 19:29:18 +03:00
return NT_STATUS_LOCK_NOT_GRANTED ;
}
talloc_set_destructor ( req_state , NULL ) ;
return NT_STATUS_OK ;
}
2018-08-13 16:07:06 +03:00
2019-11-19 19:29:18 +03:00
do_shared :
2018-08-13 16:07:06 +03:00
2019-11-19 19:29:18 +03:00
if ( lck . num_shared = = 0 ) {
2020-04-29 16:28:03 +03:00
status = g_lock_store ( rec , & lck , & self , NULL , 0 ) ;
2019-11-19 19:29:18 +03:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " g_lock_store() failed: %s \n " ,
nt_errstr ( status ) ) ;
2012-02-15 19:38:43 +04:00
}
2019-11-19 19:29:18 +03:00
return status ;
2012-02-15 19:38:43 +04:00
}
2009-10-25 18:12:12 +03:00
2019-11-22 13:55:52 +03:00
g_lock_cleanup_shared ( & lck ) ;
2019-11-19 19:29:18 +03:00
2020-04-29 16:28:03 +03:00
status = g_lock_store ( rec , & lck , & self , NULL , 0 ) ;
2019-11-19 19:29:18 +03:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " g_lock_store() failed: %s \n " ,
nt_errstr ( status ) ) ;
return status ;
}
return NT_STATUS_OK ;
}
2017-06-28 21:01:34 +03:00
2019-10-23 12:34:47 +03:00
static void g_lock_lock_fn (
struct db_record * rec ,
TDB_DATA value ,
void * private_data )
2017-06-28 21:01:34 +03:00
{
struct g_lock_lock_fn_state * state = private_data ;
2019-07-10 14:22:43 +03:00
struct server_id blocker = { 0 } ;
2017-06-28 21:01:34 +03:00
2019-11-19 19:29:18 +03:00
state - > status = g_lock_trylock ( rec , state , value , & blocker ) ;
2017-06-28 21:01:34 +03:00
if ( ! NT_STATUS_EQUAL ( state - > status , NT_STATUS_LOCK_NOT_GRANTED ) ) {
return ;
}
state - > watch_req = dbwrap_watched_watch_send (
2019-11-19 19:29:18 +03:00
state - > req_state , state - > req_state - > ev , rec , blocker ) ;
if ( state - > watch_req = = NULL ) {
state - > status = NT_STATUS_NO_MEMORY ;
}
}
static int g_lock_lock_state_destructor ( struct g_lock_lock_state * s )
{
NTSTATUS status = g_lock_unlock ( s - > ctx , s - > key ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " g_lock_unlock failed: %s \n " , nt_errstr ( status ) ) ;
}
return 0 ;
2017-06-28 21:01:34 +03:00
}
2019-11-19 19:29:18 +03:00
static void g_lock_lock_retry ( struct tevent_req * subreq ) ;
2012-02-15 19:38:43 +04:00
struct tevent_req * g_lock_lock_send ( TALLOC_CTX * mem_ctx ,
struct tevent_context * ev ,
struct g_lock_ctx * ctx ,
2017-12-03 22:47:02 +03:00
TDB_DATA key ,
2012-02-15 19:38:43 +04:00
enum g_lock_type type )
2009-10-25 18:12:12 +03:00
{
2017-06-28 21:01:34 +03:00
struct tevent_req * req ;
2012-02-15 19:38:43 +04:00
struct g_lock_lock_state * state ;
2017-06-28 21:01:34 +03:00
struct g_lock_lock_fn_state fn_state ;
2012-02-15 19:38:43 +04:00
NTSTATUS status ;
2019-11-19 19:29:18 +03:00
bool ok ;
2012-02-15 19:38:43 +04:00
req = tevent_req_create ( mem_ctx , & state , struct g_lock_lock_state ) ;
if ( req = = NULL ) {
return NULL ;
}
state - > ev = ev ;
state - > ctx = ctx ;
2017-12-03 22:47:02 +03:00
state - > key = key ;
2012-02-15 19:38:43 +04:00
state - > type = type ;
2009-10-25 18:12:12 +03:00
2017-06-28 21:01:34 +03:00
fn_state = ( struct g_lock_lock_fn_state ) {
2019-11-19 19:29:18 +03:00
. req_state = state ,
2017-06-28 21:01:34 +03:00
} ;
2017-12-03 22:47:02 +03:00
status = dbwrap_do_locked ( ctx - > db , key , g_lock_lock_fn , & fn_state ) ;
2017-06-28 21:01:34 +03:00
if ( tevent_req_nterror ( req , status ) ) {
DBG_DEBUG ( " dbwrap_do_locked failed: %s \n " ,
nt_errstr ( status ) ) ;
2012-02-15 19:38:43 +04:00
return tevent_req_post ( req , ev ) ;
2009-10-25 18:12:12 +03:00
}
2017-06-28 21:01:34 +03:00
if ( NT_STATUS_IS_OK ( fn_state . status ) ) {
2012-02-15 19:38:43 +04:00
tevent_req_done ( req ) ;
return tevent_req_post ( req , ev ) ;
2009-10-25 18:12:12 +03:00
}
2017-06-28 21:01:34 +03:00
if ( ! NT_STATUS_EQUAL ( fn_state . status , NT_STATUS_LOCK_NOT_GRANTED ) ) {
tevent_req_nterror ( req , fn_state . status ) ;
2012-02-15 19:38:43 +04:00
return tevent_req_post ( req , ev ) ;
2009-10-25 18:12:12 +03:00
}
2017-06-28 21:01:34 +03:00
if ( tevent_req_nomem ( fn_state . watch_req , req ) ) {
2012-02-15 19:38:43 +04:00
return tevent_req_post ( req , ev ) ;
2009-10-25 18:12:12 +03:00
}
2017-06-28 21:01:34 +03:00
2019-11-19 19:29:18 +03:00
ok = tevent_req_set_endtime (
fn_state . watch_req ,
state - > ev ,
timeval_current_ofs ( 5 + generate_random ( ) % 5 , 0 ) ) ;
if ( ! ok ) {
tevent_req_oom ( req ) ;
2012-08-10 19:00:38 +04:00
return tevent_req_post ( req , ev ) ;
}
2017-06-28 21:01:34 +03:00
tevent_req_set_callback ( fn_state . watch_req , g_lock_lock_retry , req ) ;
2019-11-19 19:29:18 +03:00
2012-02-15 19:38:43 +04:00
return req ;
2009-10-25 18:12:12 +03:00
}
2012-02-15 19:38:43 +04:00
static void g_lock_lock_retry ( struct tevent_req * subreq )
2009-10-25 18:12:12 +03:00
{
2012-02-15 19:38:43 +04:00
struct tevent_req * req = tevent_req_callback_data (
subreq , struct tevent_req ) ;
struct g_lock_lock_state * state = tevent_req_data (
req , struct g_lock_lock_state ) ;
2017-06-30 23:20:41 +03:00
struct g_lock_lock_fn_state fn_state ;
2019-11-19 19:29:18 +03:00
struct server_id blocker ;
bool blockerdead ;
2009-10-25 18:12:12 +03:00
NTSTATUS status ;
2019-11-19 19:29:18 +03:00
status = dbwrap_watched_watch_recv ( subreq , & blockerdead , & blocker ) ;
2017-06-30 23:20:41 +03:00
DBG_DEBUG ( " watch_recv returned %s \n " , nt_errstr ( status ) ) ;
2012-02-15 19:38:43 +04:00
TALLOC_FREE ( subreq ) ;
2012-08-10 19:00:38 +04:00
2017-06-30 23:20:41 +03:00
if ( ! NT_STATUS_IS_OK ( status ) & &
! NT_STATUS_EQUAL ( status , NT_STATUS_IO_TIMEOUT ) ) {
tevent_req_nterror ( req , status ) ;
return ;
2012-08-10 19:00:38 +04:00
}
2019-11-19 19:29:18 +03:00
state - > retry = true ;
2017-06-30 23:20:41 +03:00
fn_state = ( struct g_lock_lock_fn_state ) {
2019-11-19 19:29:18 +03:00
. req_state = state ,
. dead_blocker = blockerdead ? & blocker : NULL ,
2017-06-30 23:20:41 +03:00
} ;
2017-12-03 22:47:02 +03:00
status = dbwrap_do_locked ( state - > ctx - > db , state - > key ,
2017-06-30 23:20:41 +03:00
g_lock_lock_fn , & fn_state ) ;
2012-02-15 19:38:43 +04:00
if ( tevent_req_nterror ( req , status ) ) {
2017-06-30 23:20:41 +03:00
DBG_DEBUG ( " dbwrap_do_locked failed: %s \n " ,
nt_errstr ( status ) ) ;
2012-02-15 19:38:43 +04:00
return ;
2009-10-25 18:12:12 +03:00
}
2017-06-30 23:20:41 +03:00
if ( NT_STATUS_IS_OK ( fn_state . status ) ) {
2012-02-15 19:38:43 +04:00
tevent_req_done ( req ) ;
return ;
2009-10-25 18:12:12 +03:00
}
2017-06-30 23:20:41 +03:00
if ( ! NT_STATUS_EQUAL ( fn_state . status , NT_STATUS_LOCK_NOT_GRANTED ) ) {
tevent_req_nterror ( req , fn_state . status ) ;
2012-02-15 19:38:43 +04:00
return ;
2009-10-25 18:12:12 +03:00
}
2017-06-30 23:20:41 +03:00
if ( tevent_req_nomem ( fn_state . watch_req , req ) ) {
2012-02-15 19:38:43 +04:00
return ;
2009-10-25 18:12:12 +03:00
}
2017-06-30 23:20:41 +03:00
2012-08-10 19:00:38 +04:00
if ( ! tevent_req_set_endtime (
2017-06-30 23:20:41 +03:00
fn_state . watch_req , state - > ev ,
2019-10-09 22:38:42 +03:00
timeval_current_ofs ( 5 + generate_random ( ) % 5 , 0 ) ) ) {
2012-08-10 19:00:38 +04:00
return ;
}
2017-06-30 23:20:41 +03:00
tevent_req_set_callback ( fn_state . watch_req , g_lock_lock_retry , req ) ;
2009-10-25 18:12:12 +03:00
}
2012-02-15 19:38:43 +04:00
NTSTATUS g_lock_lock_recv ( struct tevent_req * req )
2009-10-25 18:12:12 +03:00
{
2012-02-15 19:38:43 +04:00
return tevent_req_simple_recv_ntstatus ( req ) ;
}
2009-10-25 18:12:12 +03:00
2019-11-22 14:02:22 +03:00
struct g_lock_lock_simple_state {
struct server_id me ;
enum g_lock_type type ;
NTSTATUS status ;
} ;
static void g_lock_lock_simple_fn (
struct db_record * rec ,
TDB_DATA value ,
void * private_data )
{
struct g_lock_lock_simple_state * state = private_data ;
struct g_lock lck = { . exclusive . pid = 0 } ;
bool ok ;
ok = g_lock_parse ( value . dptr , value . dsize , & lck ) ;
if ( ! ok ) {
DBG_DEBUG ( " g_lock_parse failed \n " ) ;
state - > status = NT_STATUS_INTERNAL_DB_CORRUPTION ;
return ;
}
if ( lck . exclusive . pid ! = 0 ) {
goto not_granted ;
}
if ( state - > type = = G_LOCK_WRITE ) {
if ( lck . num_shared ! = 0 ) {
goto not_granted ;
}
lck . exclusive = state - > me ;
2020-04-29 16:28:03 +03:00
state - > status = g_lock_store ( rec , & lck , NULL , NULL , 0 ) ;
2019-11-22 14:02:22 +03:00
return ;
}
if ( state - > type = = G_LOCK_READ ) {
g_lock_cleanup_shared ( & lck ) ;
2020-04-29 16:28:03 +03:00
state - > status = g_lock_store ( rec , & lck , & state - > me , NULL , 0 ) ;
2019-11-22 14:02:22 +03:00
return ;
}
not_granted :
state - > status = NT_STATUS_LOCK_NOT_GRANTED ;
}
2017-12-03 22:47:02 +03:00
NTSTATUS g_lock_lock ( struct g_lock_ctx * ctx , TDB_DATA key ,
2012-02-15 19:38:43 +04:00
enum g_lock_type type , struct timeval timeout )
{
2019-11-22 14:02:22 +03:00
TALLOC_CTX * frame ;
2012-02-15 19:38:43 +04:00
struct tevent_context * ev ;
struct tevent_req * req ;
struct timeval end ;
2019-11-22 14:02:22 +03:00
NTSTATUS status ;
if ( ( type = = G_LOCK_READ ) | | ( type = = G_LOCK_WRITE ) ) {
/*
* This is an abstraction violation : Normally we do
* the sync wrappers around async functions with full
* nested event contexts . However , this is used in
* very hot code paths , so avoid the event context
* creation for the good path where there ' s no lock
* contention . My benchmark gave a factor of 2
* improvement for lock / unlock .
*/
struct g_lock_lock_simple_state state = {
. me = messaging_server_id ( ctx - > msg ) ,
. type = type ,
} ;
status = dbwrap_do_locked (
ctx - > db , key , g_lock_lock_simple_fn , & state ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " dbwrap_do_locked() failed: %s \n " ,
nt_errstr ( status ) ) ;
return status ;
}
if ( NT_STATUS_IS_OK ( state . status ) ) {
return NT_STATUS_OK ;
}
if ( ! NT_STATUS_EQUAL (
state . status , NT_STATUS_LOCK_NOT_GRANTED ) ) {
return state . status ;
}
/*
* Fall back to the full g_lock_trylock logic ,
* g_lock_lock_simple_fn ( ) called above only covers
* the uncontended path .
*/
}
frame = talloc_stackframe ( ) ;
status = NT_STATUS_NO_MEMORY ;
2009-10-25 18:12:12 +03:00
2013-02-18 12:10:34 +04:00
ev = samba_tevent_context_init ( frame ) ;
2012-02-15 19:38:43 +04:00
if ( ev = = NULL ) {
goto fail ;
}
2017-12-03 22:47:02 +03:00
req = g_lock_lock_send ( frame , ev , ctx , key , type ) ;
2012-02-15 19:38:43 +04:00
if ( req = = NULL ) {
goto fail ;
}
end = timeval_current_ofs ( timeout . tv_sec , timeout . tv_usec ) ;
if ( ! tevent_req_set_endtime ( req , ev , end ) ) {
goto fail ;
}
if ( ! tevent_req_poll_ntstatus ( req , ev , & status ) ) {
goto fail ;
}
status = g_lock_lock_recv ( req ) ;
fail :
TALLOC_FREE ( frame ) ;
return status ;
2009-10-25 18:12:12 +03:00
}
2017-06-28 16:39:49 +03:00
struct g_lock_unlock_state {
struct server_id self ;
2009-10-25 18:12:12 +03:00
NTSTATUS status ;
2017-06-28 16:39:49 +03:00
} ;
2009-10-25 18:12:12 +03:00
2019-10-23 12:34:47 +03:00
static void g_lock_unlock_fn (
struct db_record * rec ,
TDB_DATA value ,
void * private_data )
2017-06-28 16:39:49 +03:00
{
struct g_lock_unlock_state * state = private_data ;
2019-11-19 19:29:18 +03:00
struct server_id_buf tmp ;
2017-06-28 16:39:49 +03:00
struct g_lock lck ;
size_t i ;
2019-11-19 19:29:18 +03:00
bool ok , exclusive ;
2009-10-25 18:12:12 +03:00
2017-06-28 16:39:49 +03:00
ok = g_lock_parse ( value . dptr , value . dsize , & lck ) ;
if ( ! ok ) {
2019-11-19 19:29:18 +03:00
DBG_DEBUG ( " g_lock_parse() failed \n " ) ;
state - > status = NT_STATUS_INTERNAL_DB_CORRUPTION ;
2017-06-28 16:39:49 +03:00
return ;
2009-10-25 18:12:12 +03:00
}
2019-11-19 19:29:18 +03:00
exclusive = server_id_equal ( & state - > self , & lck . exclusive ) ;
for ( i = 0 ; i < lck . num_shared ; i + + ) {
struct server_id shared ;
g_lock_get_shared ( & lck , i , & shared ) ;
if ( server_id_equal ( & state - > self , & shared ) ) {
2009-10-25 18:12:12 +03:00
break ;
}
}
2019-11-19 19:29:18 +03:00
if ( i < lck . num_shared ) {
if ( exclusive ) {
DBG_DEBUG ( " %s both exclusive and shared (%zu) \n " ,
server_id_str_buf ( state - > self , & tmp ) ,
i ) ;
state - > status = NT_STATUS_INTERNAL_DB_CORRUPTION ;
return ;
}
g_lock_del_shared ( & lck , i ) ;
} else {
if ( ! exclusive ) {
DBG_DEBUG ( " Lock %s not found, num_rec=%zu \n " ,
server_id_str_buf ( state - > self , & tmp ) ,
lck . num_shared ) ;
state - > status = NT_STATUS_NOT_FOUND ;
return ;
}
lck . exclusive = ( struct server_id ) { . pid = 0 } ;
}
2009-10-25 18:12:12 +03:00
2019-11-19 19:29:18 +03:00
if ( ( lck . exclusive . pid = = 0 ) & &
( lck . num_shared = = 0 ) & &
( lck . datalen = = 0 ) ) {
2017-06-28 16:39:49 +03:00
state - > status = dbwrap_record_delete ( rec ) ;
return ;
2009-10-25 18:12:12 +03:00
}
2019-11-19 19:29:18 +03:00
2020-04-29 16:28:03 +03:00
state - > status = g_lock_store ( rec , & lck , NULL , NULL , 0 ) ;
2017-06-28 16:39:49 +03:00
}
2017-12-03 22:47:02 +03:00
NTSTATUS g_lock_unlock ( struct g_lock_ctx * ctx , TDB_DATA key )
2017-06-28 16:39:49 +03:00
{
struct g_lock_unlock_state state = {
2019-11-19 19:29:18 +03:00
. self = messaging_server_id ( ctx - > msg ) ,
2017-06-28 16:39:49 +03:00
} ;
NTSTATUS status ;
2017-12-03 22:47:02 +03:00
status = dbwrap_do_locked ( ctx - > db , key , g_lock_unlock_fn , & state ) ;
2009-10-25 18:12:12 +03:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
2017-06-28 16:39:49 +03:00
DBG_WARNING ( " dbwrap_do_locked failed: %s \n " ,
nt_errstr ( status ) ) ;
return status ;
}
if ( ! NT_STATUS_IS_OK ( state . status ) ) {
DBG_WARNING ( " g_lock_unlock_fn failed: %s \n " ,
nt_errstr ( state . status ) ) ;
return state . status ;
2009-10-25 18:12:12 +03:00
}
2017-06-28 16:39:49 +03:00
return NT_STATUS_OK ;
2009-10-25 18:12:12 +03:00
}
2017-06-30 20:42:50 +03:00
struct g_lock_write_data_state {
2017-12-03 22:47:02 +03:00
TDB_DATA key ;
2017-06-30 20:42:50 +03:00
struct server_id self ;
const uint8_t * data ;
size_t datalen ;
2017-05-23 13:32:24 +03:00
NTSTATUS status ;
2017-06-30 20:42:50 +03:00
} ;
2017-05-23 13:32:24 +03:00
2019-10-23 12:34:47 +03:00
static void g_lock_write_data_fn (
struct db_record * rec ,
TDB_DATA value ,
void * private_data )
2017-06-30 20:42:50 +03:00
{
struct g_lock_write_data_state * state = private_data ;
struct g_lock lck ;
2019-11-19 19:29:18 +03:00
bool exclusive ;
2017-06-30 20:42:50 +03:00
bool ok ;
2017-05-23 13:32:24 +03:00
2017-06-30 20:42:50 +03:00
ok = g_lock_parse ( value . dptr , value . dsize , & lck ) ;
if ( ! ok ) {
2017-12-03 22:47:02 +03:00
DBG_DEBUG ( " g_lock_parse for %s failed \n " ,
hex_encode_talloc ( talloc_tos ( ) ,
state - > key . dptr ,
state - > key . dsize ) ) ;
2017-06-30 20:42:50 +03:00
state - > status = NT_STATUS_INTERNAL_DB_CORRUPTION ;
return ;
2017-05-23 13:32:24 +03:00
}
2019-11-19 19:29:18 +03:00
exclusive = server_id_equal ( & state - > self , & lck . exclusive ) ;
/*
* Make sure we ' re really exclusive . We are marked as
* exclusive when we are waiting for an exclusive lock
*/
exclusive & = ( lck . num_shared = = 0 ) ;
if ( ! exclusive ) {
2017-05-23 13:32:24 +03:00
DBG_DEBUG ( " Not locked by us \n " ) ;
2017-06-30 20:42:50 +03:00
state - > status = NT_STATUS_NOT_LOCKED ;
return ;
2017-05-23 13:32:24 +03:00
}
2019-10-30 18:12:11 +03:00
lck . data_seqnum + = 1 ;
2017-06-30 20:42:50 +03:00
lck . data = discard_const_p ( uint8_t , state - > data ) ;
lck . datalen = state - > datalen ;
2020-04-29 16:28:03 +03:00
state - > status = g_lock_store ( rec , & lck , NULL , NULL , 0 ) ;
2017-06-30 20:42:50 +03:00
}
2017-05-23 13:32:24 +03:00
2017-12-03 22:47:02 +03:00
NTSTATUS g_lock_write_data ( struct g_lock_ctx * ctx , TDB_DATA key ,
2017-06-30 20:42:50 +03:00
const uint8_t * buf , size_t buflen )
{
struct g_lock_write_data_state state = {
2017-12-03 22:47:02 +03:00
. key = key , . self = messaging_server_id ( ctx - > msg ) ,
2017-06-30 20:42:50 +03:00
. data = buf , . datalen = buflen
} ;
NTSTATUS status ;
2017-12-03 22:47:02 +03:00
status = dbwrap_do_locked ( ctx - > db , key ,
2017-06-30 20:42:50 +03:00
g_lock_write_data_fn , & state ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_WARNING ( " dbwrap_do_locked failed: %s \n " ,
nt_errstr ( status ) ) ;
return status ;
}
if ( ! NT_STATUS_IS_OK ( state . status ) ) {
DBG_WARNING ( " g_lock_write_data_fn failed: %s \n " ,
nt_errstr ( state . status ) ) ;
return state . status ;
}
return NT_STATUS_OK ;
2017-05-23 13:32:24 +03:00
}
2009-10-25 18:12:12 +03:00
struct g_lock_locks_state {
2017-12-03 22:47:02 +03:00
int ( * fn ) ( TDB_DATA key , void * private_data ) ;
2009-10-25 18:12:12 +03:00
void * private_data ;
} ;
static int g_lock_locks_fn ( struct db_record * rec , void * priv )
{
2011-08-17 13:21:31 +04:00
TDB_DATA key ;
2009-10-25 18:12:12 +03:00
struct g_lock_locks_state * state = ( struct g_lock_locks_state * ) priv ;
2011-08-17 13:21:31 +04:00
key = dbwrap_record_get_key ( rec ) ;
2017-12-03 22:47:02 +03:00
return state - > fn ( key , state - > private_data ) ;
2009-10-25 18:12:12 +03:00
}
int g_lock_locks ( struct g_lock_ctx * ctx ,
2017-12-03 22:47:02 +03:00
int ( * fn ) ( TDB_DATA key , void * private_data ) ,
2009-10-25 18:12:12 +03:00
void * private_data )
{
struct g_lock_locks_state state ;
2011-08-17 13:21:31 +04:00
NTSTATUS status ;
int count ;
2009-10-25 18:12:12 +03:00
state . fn = fn ;
state . private_data = private_data ;
2011-08-17 13:21:31 +04:00
status = dbwrap_traverse_read ( ctx - > db , g_lock_locks_fn , & state , & count ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
return - 1 ;
}
2017-05-17 17:43:01 +03:00
return count ;
2009-10-25 18:12:12 +03:00
}
2017-06-30 23:09:12 +03:00
struct g_lock_dump_state {
TALLOC_CTX * mem_ctx ;
2017-12-03 22:47:02 +03:00
TDB_DATA key ;
2019-10-25 14:35:39 +03:00
void ( * fn ) ( struct server_id exclusive ,
size_t num_shared ,
struct server_id * shared ,
2017-06-30 23:09:12 +03:00
const uint8_t * data ,
size_t datalen ,
void * private_data ) ;
void * private_data ;
NTSTATUS status ;
} ;
static void g_lock_dump_fn ( TDB_DATA key , TDB_DATA data ,
void * private_data )
{
struct g_lock_dump_state * state = private_data ;
2019-11-19 19:29:18 +03:00
struct g_lock lck = ( struct g_lock ) { . exclusive . pid = 0 } ;
2019-10-25 14:35:39 +03:00
struct server_id * shared = NULL ;
2017-06-30 23:09:12 +03:00
size_t i ;
bool ok ;
ok = g_lock_parse ( data . dptr , data . dsize , & lck ) ;
if ( ! ok ) {
DBG_DEBUG ( " g_lock_parse failed for %s \n " ,
2017-12-03 22:47:02 +03:00
hex_encode_talloc ( talloc_tos ( ) ,
state - > key . dptr ,
state - > key . dsize ) ) ;
2017-06-30 23:09:12 +03:00
state - > status = NT_STATUS_INTERNAL_DB_CORRUPTION ;
return ;
}
2019-11-19 19:29:18 +03:00
shared = talloc_array (
state - > mem_ctx , struct server_id , lck . num_shared ) ;
if ( shared = = NULL ) {
2017-06-30 23:09:12 +03:00
DBG_DEBUG ( " talloc failed \n " ) ;
state - > status = NT_STATUS_NO_MEMORY ;
2019-11-19 19:29:18 +03:00
return ;
2017-06-30 23:09:12 +03:00
}
2019-11-19 19:29:18 +03:00
for ( i = 0 ; i < lck . num_shared ; i + + ) {
g_lock_get_shared ( & lck , i , & shared [ i ] ) ;
2019-10-25 14:35:39 +03:00
}
2017-06-30 23:09:12 +03:00
2019-11-19 19:29:18 +03:00
state - > fn ( lck . exclusive ,
lck . num_shared ,
2019-10-25 14:35:39 +03:00
shared ,
lck . data ,
lck . datalen ,
state - > private_data ) ;
2017-06-30 23:09:12 +03:00
2019-11-19 19:29:18 +03:00
TALLOC_FREE ( shared ) ;
2017-06-30 23:09:12 +03:00
state - > status = NT_STATUS_OK ;
}
2017-12-03 22:47:02 +03:00
NTSTATUS g_lock_dump ( struct g_lock_ctx * ctx , TDB_DATA key ,
2019-10-25 14:35:39 +03:00
void ( * fn ) ( struct server_id exclusive ,
size_t num_shared ,
struct server_id * shared ,
2017-05-18 16:27:46 +03:00
const uint8_t * data ,
size_t datalen ,
void * private_data ) ,
2009-10-25 18:12:12 +03:00
void * private_data )
{
2017-06-30 23:09:12 +03:00
struct g_lock_dump_state state = {
2017-12-03 22:47:02 +03:00
. mem_ctx = ctx , . key = key ,
2017-06-30 23:09:12 +03:00
. fn = fn , . private_data = private_data
} ;
2011-08-17 13:21:31 +04:00
NTSTATUS status ;
2009-10-25 18:12:12 +03:00
2017-12-03 22:47:02 +03:00
status = dbwrap_parse_record ( ctx - > db , key , g_lock_dump_fn , & state ) ;
2011-08-17 13:21:31 +04:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
2017-06-30 23:09:12 +03:00
DBG_DEBUG ( " dbwrap_parse_record returned %s \n " ,
nt_errstr ( status ) ) ;
2011-08-17 13:21:31 +04:00
return status ;
2009-10-25 18:12:12 +03:00
}
2017-06-30 23:09:12 +03:00
if ( ! NT_STATUS_IS_OK ( state . status ) ) {
DBG_DEBUG ( " g_lock_dump_fn returned %s \n " ,
nt_errstr ( state . status ) ) ;
return state . status ;
2009-10-25 18:12:12 +03:00
}
return NT_STATUS_OK ;
}
2019-10-30 18:12:11 +03:00
2019-11-05 18:36:59 +03:00
int g_lock_seqnum ( struct g_lock_ctx * ctx )
{
return dbwrap_get_seqnum ( ctx - > db ) ;
}
2019-10-30 18:12:11 +03:00
struct g_lock_watch_data_state {
struct tevent_context * ev ;
struct g_lock_ctx * ctx ;
TDB_DATA key ;
struct server_id blocker ;
bool blockerdead ;
uint64_t data_seqnum ;
NTSTATUS status ;
} ;
static void g_lock_watch_data_done ( struct tevent_req * subreq ) ;
static void g_lock_watch_data_send_fn (
struct db_record * rec ,
TDB_DATA value ,
void * private_data )
{
struct tevent_req * req = talloc_get_type_abort (
private_data , struct tevent_req ) ;
struct g_lock_watch_data_state * state = tevent_req_data (
req , struct g_lock_watch_data_state ) ;
struct tevent_req * subreq = NULL ;
struct g_lock lck ;
bool ok ;
ok = g_lock_parse ( value . dptr , value . dsize , & lck ) ;
if ( ! ok ) {
state - > status = NT_STATUS_INTERNAL_DB_CORRUPTION ;
return ;
}
state - > data_seqnum = lck . data_seqnum ;
DBG_DEBUG ( " state->data_seqnum=% " PRIu64 " \n " , state - > data_seqnum ) ;
subreq = dbwrap_watched_watch_send (
state , state - > ev , rec , state - > blocker ) ;
if ( subreq = = NULL ) {
state - > status = NT_STATUS_NO_MEMORY ;
return ;
}
tevent_req_set_callback ( subreq , g_lock_watch_data_done , req ) ;
state - > status = NT_STATUS_EVENT_PENDING ;
}
struct tevent_req * g_lock_watch_data_send (
TALLOC_CTX * mem_ctx ,
struct tevent_context * ev ,
struct g_lock_ctx * ctx ,
TDB_DATA key ,
struct server_id blocker )
{
struct tevent_req * req = NULL ;
struct g_lock_watch_data_state * state = NULL ;
NTSTATUS status ;
req = tevent_req_create (
mem_ctx , & state , struct g_lock_watch_data_state ) ;
if ( req = = NULL ) {
return NULL ;
}
state - > ev = ev ;
state - > ctx = ctx ;
state - > blocker = blocker ;
state - > key = tdb_data_talloc_copy ( state , key ) ;
if ( tevent_req_nomem ( state - > key . dptr , req ) ) {
return tevent_req_post ( req , ev ) ;
}
status = dbwrap_do_locked (
ctx - > db , key , g_lock_watch_data_send_fn , req ) ;
if ( tevent_req_nterror ( req , status ) ) {
DBG_DEBUG ( " dbwrap_do_locked returned %s \n " , nt_errstr ( status ) ) ;
return tevent_req_post ( req , ev ) ;
}
if ( NT_STATUS_IS_OK ( state - > status ) ) {
tevent_req_done ( req ) ;
return tevent_req_post ( req , ev ) ;
}
return req ;
}
static void g_lock_watch_data_done_fn (
struct db_record * rec ,
TDB_DATA value ,
void * private_data )
{
struct tevent_req * req = talloc_get_type_abort (
private_data , struct tevent_req ) ;
struct g_lock_watch_data_state * state = tevent_req_data (
req , struct g_lock_watch_data_state ) ;
struct tevent_req * subreq = NULL ;
struct g_lock lck ;
bool ok ;
ok = g_lock_parse ( value . dptr , value . dsize , & lck ) ;
if ( ! ok ) {
state - > status = NT_STATUS_INTERNAL_DB_CORRUPTION ;
return ;
}
if ( lck . data_seqnum ! = state - > data_seqnum ) {
DBG_DEBUG ( " lck.data_seqnum=% " PRIu64 " , "
" state->data_seqnum=% " PRIu64 " \n " ,
lck . data_seqnum ,
state - > data_seqnum ) ;
state - > status = NT_STATUS_OK ;
return ;
}
subreq = dbwrap_watched_watch_send (
state , state - > ev , rec , state - > blocker ) ;
if ( subreq = = NULL ) {
state - > status = NT_STATUS_NO_MEMORY ;
return ;
}
tevent_req_set_callback ( subreq , g_lock_watch_data_done , req ) ;
state - > status = NT_STATUS_EVENT_PENDING ;
}
static void g_lock_watch_data_done ( struct tevent_req * subreq )
{
struct tevent_req * req = tevent_req_callback_data (
subreq , struct tevent_req ) ;
struct g_lock_watch_data_state * state = tevent_req_data (
req , struct g_lock_watch_data_state ) ;
NTSTATUS status ;
status = dbwrap_watched_watch_recv (
subreq , & state - > blockerdead , & state - > blocker ) ;
TALLOC_FREE ( subreq ) ;
if ( tevent_req_nterror ( req , status ) ) {
DBG_DEBUG ( " dbwrap_watched_watch_recv returned %s \n " ,
nt_errstr ( status ) ) ;
return ;
}
status = dbwrap_do_locked (
state - > ctx - > db , state - > key , g_lock_watch_data_done_fn , req ) ;
if ( tevent_req_nterror ( req , status ) ) {
DBG_DEBUG ( " dbwrap_do_locked returned %s \n " , nt_errstr ( status ) ) ;
return ;
}
if ( NT_STATUS_EQUAL ( state - > status , NT_STATUS_EVENT_PENDING ) ) {
return ;
}
if ( tevent_req_nterror ( req , state - > status ) ) {
return ;
}
tevent_req_done ( req ) ;
}
NTSTATUS g_lock_watch_data_recv (
struct tevent_req * req ,
bool * blockerdead ,
struct server_id * blocker )
{
struct g_lock_watch_data_state * state = tevent_req_data (
req , struct g_lock_watch_data_state ) ;
NTSTATUS status ;
if ( tevent_req_is_nterror ( req , & status ) ) {
return status ;
}
if ( blockerdead ! = NULL ) {
* blockerdead = state - > blockerdead ;
}
if ( blocker ! = NULL ) {
* blocker = state - > blocker ;
}
return NT_STATUS_OK ;
}
2019-11-04 18:03:52 +03:00
static void g_lock_wake_watchers_fn (
struct db_record * rec ,
TDB_DATA value ,
void * private_data )
{
struct g_lock lck = { . exclusive . pid = 0 } ;
NTSTATUS status ;
bool ok ;
ok = g_lock_parse ( value . dptr , value . dsize , & lck ) ;
if ( ! ok ) {
DBG_WARNING ( " g_lock_parse failed \n " ) ;
return ;
}
lck . data_seqnum + = 1 ;
2020-04-29 16:28:03 +03:00
status = g_lock_store ( rec , & lck , NULL , NULL , 0 ) ;
2019-11-04 18:03:52 +03:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_WARNING ( " g_lock_store failed: %s \n " , nt_errstr ( status ) ) ;
return ;
}
}
void g_lock_wake_watchers ( struct g_lock_ctx * ctx , TDB_DATA key )
{
NTSTATUS status ;
status = dbwrap_do_locked ( ctx - > db , key , g_lock_wake_watchers_fn , NULL ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " dbwrap_do_locked returned %s \n " ,
nt_errstr ( status ) ) ;
}
}