2009-10-25 16:12:12 +01:00
/*
Unix SMB / CIFS implementation .
global locks based on dbwrap and messaging
Copyright ( C ) 2009 by Volker Lendecke
This program is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation ; either version 3 of the License , or
( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
2019-10-10 16:30:14 +02:00
# include "replace.h"
2011-02-25 23:20:06 +01:00
# include "system/filesys.h"
2017-01-01 20:00:55 +00:00
# include "lib/util/server_id.h"
2019-10-10 16:30:14 +02:00
# include "lib/util/debug.h"
# include "lib/util/talloc_stack.h"
# include "lib/util/samba_util.h"
# include "lib/util_path.h"
2011-07-07 17:42:08 +02:00
# include "dbwrap/dbwrap.h"
2011-07-06 16:40:21 +02:00
# include "dbwrap/dbwrap_open.h"
2012-02-15 16:38:43 +01:00
# include "dbwrap/dbwrap_watch.h"
2009-10-25 16:12:12 +01:00
# include "g_lock.h"
2011-05-05 11:25:29 +02:00
# include "util_tdb.h"
2012-02-15 16:38:43 +01:00
# include "../lib/util/tevent_ntstatus.h"
2011-03-24 15:31:06 +01:00
# include "messages.h"
2012-08-22 12:35:29 +02:00
# include "serverid.h"
2009-10-25 16:12:12 +01:00
struct g_lock_ctx {
struct db_context * db ;
struct messaging_context * msg ;
} ;
2017-06-28 13:36:53 +02:00
struct g_lock {
2019-11-19 17:29:18 +01:00
struct server_id exclusive ;
size_t num_shared ;
uint8_t * shared ;
2017-06-28 13:36:53 +02:00
size_t datalen ;
2019-11-19 17:29:18 +01:00
uint8_t * data ;
2017-06-28 13:36:53 +02:00
} ;
static bool g_lock_parse ( uint8_t * buf , size_t buflen , struct g_lock * lck )
{
2019-11-19 17:29:18 +01:00
struct server_id exclusive ;
size_t num_shared , shared_len ;
2017-06-28 13:36:53 +02:00
2019-11-19 17:29:18 +01:00
if ( buflen < ( SERVER_ID_BUF_LENGTH + sizeof ( uint32_t ) ) ) {
* lck = ( struct g_lock ) { . exclusive . pid = 0 } ;
2017-06-28 13:36:53 +02:00
return true ;
}
2019-11-19 17:29:18 +01:00
server_id_get ( & exclusive , buf ) ;
buf + = SERVER_ID_BUF_LENGTH ;
buflen - = SERVER_ID_BUF_LENGTH ;
2017-06-28 13:36:53 +02:00
2019-11-19 17:29:18 +01:00
num_shared = IVAL ( buf , 0 ) ;
2017-10-26 09:43:56 +02:00
buf + = sizeof ( uint32_t ) ;
buflen - = sizeof ( uint32_t ) ;
2019-11-19 17:29:18 +01:00
if ( num_shared > buflen / SERVER_ID_BUF_LENGTH ) {
2017-06-28 13:36:53 +02:00
return false ;
}
2019-11-19 17:29:18 +01:00
shared_len = num_shared * SERVER_ID_BUF_LENGTH ;
2017-06-28 13:36:53 +02:00
* lck = ( struct g_lock ) {
2019-11-19 17:29:18 +01:00
. exclusive = exclusive ,
. num_shared = num_shared ,
. shared = buf ,
. datalen = buflen - shared_len ,
. data = buf + shared_len ,
2017-06-28 13:36:53 +02:00
} ;
return true ;
}
2019-11-19 17:29:18 +01:00
static void g_lock_get_shared ( const struct g_lock * lck ,
size_t i ,
struct server_id * shared )
2017-06-28 13:36:53 +02:00
{
2019-11-19 17:29:18 +01:00
if ( i > = lck - > num_shared ) {
2017-06-28 13:36:53 +02:00
abort ( ) ;
}
2019-11-19 17:29:18 +01:00
server_id_get ( shared , lck - > shared + i * SERVER_ID_BUF_LENGTH ) ;
2017-06-28 13:36:53 +02:00
}
2019-11-19 17:29:18 +01:00
static void g_lock_del_shared ( struct g_lock * lck , size_t i )
2017-06-28 13:36:53 +02:00
{
2019-11-19 17:29:18 +01:00
if ( i > = lck - > num_shared ) {
2017-06-28 13:36:53 +02:00
abort ( ) ;
}
2019-11-19 17:29:18 +01:00
lck - > num_shared - = 1 ;
if ( i < lck - > num_shared ) {
memcpy ( lck - > shared + i * SERVER_ID_BUF_LENGTH ,
lck - > shared + lck - > num_shared * SERVER_ID_BUF_LENGTH ,
SERVER_ID_BUF_LENGTH ) ;
2017-06-28 13:36:53 +02:00
}
}
2019-11-19 17:29:18 +01:00
static NTSTATUS g_lock_store (
struct db_record * rec ,
struct g_lock * lck ,
struct server_id * new_shared )
2017-06-28 13:36:53 +02:00
{
2019-11-19 17:29:18 +01:00
uint8_t exclusive [ SERVER_ID_BUF_LENGTH ] ;
uint8_t sizebuf [ sizeof ( uint32_t ) ] ;
uint8_t shared [ SERVER_ID_BUF_LENGTH ] ;
2017-06-28 13:36:53 +02:00
struct TDB_DATA dbufs [ ] = {
2019-11-19 17:29:18 +01:00
{ . dptr = exclusive , . dsize = sizeof ( exclusive ) } ,
2017-06-28 13:36:53 +02:00
{ . dptr = sizebuf , . dsize = sizeof ( sizebuf ) } ,
2019-11-19 17:29:18 +01:00
{ . dptr = lck - > shared ,
. dsize = lck - > num_shared * SERVER_ID_BUF_LENGTH } ,
2017-06-28 13:36:53 +02:00
{ 0 } ,
{ . dptr = lck - > data , . dsize = lck - > datalen }
} ;
2019-11-19 17:29:18 +01:00
server_id_put ( exclusive , lck - > exclusive ) ;
if ( new_shared ! = NULL ) {
if ( lck - > num_shared > = UINT32_MAX ) {
return NT_STATUS_BUFFER_OVERFLOW ;
}
server_id_put ( shared , * new_shared ) ;
2017-06-28 13:36:53 +02:00
2019-11-19 17:29:18 +01:00
dbufs [ 3 ] = ( TDB_DATA ) {
. dptr = shared , . dsize = sizeof ( shared ) ,
2017-06-28 13:36:53 +02:00
} ;
2019-11-19 17:29:18 +01:00
lck - > num_shared + = 1 ;
2017-06-28 13:36:53 +02:00
}
2019-11-19 17:29:18 +01:00
SIVAL ( sizebuf , 0 , lck - > num_shared ) ;
2017-06-28 13:36:53 +02:00
return dbwrap_record_storev ( rec , dbufs , ARRAY_SIZE ( dbufs ) , 0 ) ;
}
2019-11-05 16:36:44 +01:00
struct g_lock_ctx * g_lock_ctx_init_backend (
TALLOC_CTX * mem_ctx ,
struct messaging_context * msg ,
struct db_context * * backend )
2009-10-25 16:12:12 +01:00
{
struct g_lock_ctx * result ;
result = talloc ( mem_ctx , struct g_lock_ctx ) ;
if ( result = = NULL ) {
return NULL ;
}
result - > msg = msg ;
2019-11-05 16:36:44 +01:00
result - > db = db_open_watched ( result , backend , msg ) ;
if ( result - > db = = NULL ) {
DBG_WARNING ( " db_open_watched failed \n " ) ;
2014-11-02 20:21:40 +01:00
TALLOC_FREE ( result ) ;
return NULL ;
}
2019-11-05 16:36:44 +01:00
return result ;
}
struct g_lock_ctx * g_lock_ctx_init ( TALLOC_CTX * mem_ctx ,
struct messaging_context * msg )
{
char * db_path = NULL ;
struct db_context * backend = NULL ;
struct g_lock_ctx * ctx = NULL ;
db_path = lock_path ( mem_ctx , " g_lock.tdb " ) ;
if ( db_path = = NULL ) {
return NULL ;
}
2014-11-02 20:21:40 +01:00
2019-11-05 16:36:44 +01:00
backend = db_open (
mem_ctx ,
db_path ,
0 ,
TDB_CLEAR_IF_FIRST | TDB_INCOMPATIBLE_HASH ,
O_RDWR | O_CREAT ,
0600 ,
DBWRAP_LOCK_ORDER_3 ,
DBWRAP_FLAG_NONE ) ;
2014-11-02 20:21:40 +01:00
TALLOC_FREE ( db_path ) ;
2016-07-13 07:26:52 +02:00
if ( backend = = NULL ) {
2018-08-16 11:34:36 +02:00
DBG_WARNING ( " Could not open g_lock.tdb \n " ) ;
2009-10-25 16:12:12 +01:00
return NULL ;
}
2016-07-13 07:26:52 +02:00
2019-11-05 16:36:44 +01:00
ctx = g_lock_ctx_init_backend ( mem_ctx , msg , & backend ) ;
return ctx ;
2009-10-25 16:12:12 +01:00
}
2019-11-19 17:29:18 +01:00
static NTSTATUS g_lock_cleanup_dead (
struct db_record * rec ,
struct g_lock * lck ,
struct server_id * dead_blocker )
2009-10-25 16:12:12 +01:00
{
2019-11-19 17:29:18 +01:00
bool modified = false ;
bool exclusive_died ;
NTSTATUS status = NT_STATUS_OK ;
struct server_id_buf tmp ;
if ( dead_blocker = = NULL ) {
return NT_STATUS_OK ;
2009-10-25 16:12:12 +01:00
}
2019-11-19 17:29:18 +01:00
exclusive_died = server_id_equal ( dead_blocker , & lck - > exclusive ) ;
if ( exclusive_died ) {
DBG_DEBUG ( " Exclusive holder %s died \n " ,
server_id_str_buf ( lck - > exclusive , & tmp ) ) ;
lck - > exclusive . pid = 0 ;
modified = true ;
}
if ( lck - > num_shared ! = 0 ) {
bool shared_died ;
struct server_id shared ;
g_lock_get_shared ( lck , 0 , & shared ) ;
shared_died = server_id_equal ( dead_blocker , & shared ) ;
if ( shared_died ) {
DBG_DEBUG ( " Shared holder %s died \n " ,
server_id_str_buf ( shared , & tmp ) ) ;
g_lock_del_shared ( lck , 0 ) ;
modified = true ;
}
}
if ( modified ) {
status = g_lock_store ( rec , lck , NULL ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " g_lock_store() failed: %s \n " ,
nt_errstr ( status ) ) ;
}
}
return status ;
}
static ssize_t g_lock_find_shared (
struct g_lock * lck ,
const struct server_id * self )
{
size_t i ;
for ( i = 0 ; i < lck - > num_shared ; i + + ) {
struct server_id shared ;
bool same ;
g_lock_get_shared ( lck , i , & shared ) ;
same = server_id_equal ( self , & shared ) ;
if ( same ) {
return i ;
}
}
return - 1 ;
2009-10-25 16:12:12 +01:00
}
2019-11-22 11:55:52 +01:00
static void g_lock_cleanup_shared ( struct g_lock * lck )
{
size_t i ;
struct server_id check ;
bool exists ;
if ( lck - > num_shared = = 0 ) {
return ;
}
/*
* Read locks can stay around forever if the process dies . Do
* a heuristic check for process existence : Check one random
* process for existence . Hopefully this will keep runaway
* read locks under control .
*/
i = generate_random ( ) % lck - > num_shared ;
g_lock_get_shared ( lck , i , & check ) ;
exists = serverid_exists ( & check ) ;
if ( ! exists ) {
struct server_id_buf tmp ;
DBG_DEBUG ( " Shared locker %s died -- removing \n " ,
server_id_str_buf ( check , & tmp ) ) ;
g_lock_del_shared ( lck , i ) ;
}
}
2019-11-19 17:29:18 +01:00
struct g_lock_lock_state {
struct tevent_context * ev ;
struct g_lock_ctx * ctx ;
TDB_DATA key ;
enum g_lock_type type ;
bool retry ;
} ;
struct g_lock_lock_fn_state {
struct g_lock_lock_state * req_state ;
struct server_id * dead_blocker ;
struct tevent_req * watch_req ;
NTSTATUS status ;
} ;
static int g_lock_lock_state_destructor ( struct g_lock_lock_state * s ) ;
2019-10-23 11:21:16 +02:00
static NTSTATUS g_lock_trylock (
struct db_record * rec ,
2019-11-19 17:29:18 +01:00
struct g_lock_lock_fn_state * state ,
2019-10-23 11:21:16 +02:00
TDB_DATA data ,
struct server_id * blocker )
2010-02-15 16:57:16 +01:00
{
2019-11-19 17:29:18 +01:00
struct g_lock_lock_state * req_state = state - > req_state ;
struct server_id self = messaging_server_id ( req_state - > ctx - > msg ) ;
enum g_lock_type type = req_state - > type ;
bool retry = req_state - > retry ;
struct g_lock lck = { . exclusive . pid = 0 } ;
struct server_id_buf tmp ;
2012-02-15 16:38:43 +01:00
NTSTATUS status ;
2017-06-28 19:39:33 +02:00
bool ok ;
2010-02-15 16:57:16 +01:00
2017-06-28 19:39:33 +02:00
ok = g_lock_parse ( data . dptr , data . dsize , & lck ) ;
if ( ! ok ) {
2019-11-19 17:29:18 +01:00
DBG_DEBUG ( " g_lock_parse failed \n " ) ;
2017-06-28 19:39:33 +02:00
return NT_STATUS_INTERNAL_DB_CORRUPTION ;
2012-02-15 16:38:43 +01:00
}
2010-02-15 16:57:16 +01:00
2019-11-19 17:29:18 +01:00
status = g_lock_cleanup_dead ( rec , & lck , state - > dead_blocker ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " g_lock_cleanup_dead() failed: %s \n " ,
nt_errstr ( status ) ) ;
return status ;
}
if ( lck . exclusive . pid ! = 0 ) {
bool self_exclusive = server_id_equal ( & self , & lck . exclusive ) ;
2017-06-28 19:39:33 +02:00
2019-11-19 17:29:18 +01:00
if ( ! self_exclusive ) {
2019-11-22 11:56:55 +01:00
bool exists = serverid_exists ( & lck . exclusive ) ;
2019-11-19 17:29:18 +01:00
if ( ! exists ) {
lck . exclusive = ( struct server_id ) { . pid = 0 } ;
goto noexclusive ;
}
2017-06-28 19:39:33 +02:00
2019-11-19 17:29:18 +01:00
DBG_DEBUG ( " %s has an exclusive lock \n " ,
server_id_str_buf ( lck . exclusive , & tmp ) ) ;
2017-05-22 17:05:57 +02:00
2019-11-19 17:29:18 +01:00
if ( type = = G_LOCK_DOWNGRADE ) {
struct server_id_buf tmp2 ;
DBG_DEBUG ( " %s: Trying to downgrade %s \n " ,
server_id_str_buf ( self , & tmp ) ,
server_id_str_buf (
lck . exclusive , & tmp2 ) ) ;
return NT_STATUS_NOT_LOCKED ;
}
if ( type = = G_LOCK_UPGRADE ) {
ssize_t shared_idx ;
shared_idx = g_lock_find_shared ( & lck , & self ) ;
if ( shared_idx = = - 1 ) {
DBG_DEBUG ( " Trying to upgrade %s "
" without "
" existing shared lock \n " ,
server_id_str_buf (
self , & tmp ) ) ;
return NT_STATUS_NOT_LOCKED ;
}
/*
* We ' re trying to upgrade , and the
* exlusive lock is taken by someone
* else . This means that someone else
* is waiting for us to give up our
* shared lock . If we now also wait
* for someone to give their shared
* lock , we will deadlock .
*/
DBG_DEBUG ( " Trying to upgrade %s while "
" someone else is also "
" trying to upgrade \n " ,
server_id_str_buf ( self , & tmp ) ) ;
return NT_STATUS_POSSIBLE_DEADLOCK ;
}
* blocker = lck . exclusive ;
return NT_STATUS_LOCK_NOT_GRANTED ;
2017-05-22 17:05:57 +02:00
}
2019-11-19 17:29:18 +01:00
if ( type = = G_LOCK_DOWNGRADE ) {
DBG_DEBUG ( " Downgrading %s from WRITE to READ \n " ,
server_id_str_buf ( self , & tmp ) ) ;
2017-12-20 08:25:19 +01:00
2019-11-19 17:29:18 +01:00
lck . exclusive = ( struct server_id ) { . pid = 0 } ;
goto do_shared ;
}
2017-06-28 19:39:33 +02:00
2019-11-19 17:29:18 +01:00
if ( ! retry ) {
DBG_DEBUG ( " %s already locked by self \n " ,
server_id_str_buf ( self , & tmp ) ) ;
return NT_STATUS_WAS_LOCKED ;
}
2017-05-19 16:57:00 +02:00
2019-11-19 17:29:18 +01:00
if ( lck . num_shared ! = 0 ) {
g_lock_get_shared ( & lck , 0 , blocker ) ;
2018-08-13 15:07:06 +02:00
2019-11-19 17:29:18 +01:00
DBG_DEBUG ( " Continue waiting for shared lock %s \n " ,
server_id_str_buf ( * blocker , & tmp ) ) ;
return NT_STATUS_LOCK_NOT_GRANTED ;
2009-10-25 16:12:12 +01:00
}
2019-11-19 17:29:18 +01:00
2019-12-20 16:20:00 +01:00
talloc_set_destructor ( req_state , NULL ) ;
2019-11-19 17:29:18 +01:00
/*
* Retry after a conflicting lock was released
*/
return NT_STATUS_OK ;
2018-08-13 15:07:06 +02:00
}
2019-11-19 17:29:18 +01:00
noexclusive :
2018-08-13 15:07:06 +02:00
2019-11-19 17:29:18 +01:00
if ( type = = G_LOCK_UPGRADE ) {
ssize_t shared_idx = g_lock_find_shared ( & lck , & self ) ;
2018-08-13 15:07:06 +02:00
2019-11-19 17:29:18 +01:00
if ( shared_idx = = - 1 ) {
DBG_DEBUG ( " Trying to upgrade %s without "
" existing shared lock \n " ,
server_id_str_buf ( self , & tmp ) ) ;
return NT_STATUS_NOT_LOCKED ;
}
2018-08-13 15:07:06 +02:00
2019-11-19 17:29:18 +01:00
g_lock_del_shared ( & lck , shared_idx ) ;
type = G_LOCK_WRITE ;
}
2017-05-19 16:57:00 +02:00
2019-11-19 17:29:18 +01:00
if ( type = = G_LOCK_WRITE ) {
ssize_t shared_idx = g_lock_find_shared ( & lck , & self ) ;
2009-10-25 16:12:12 +01:00
2019-11-19 17:29:18 +01:00
if ( shared_idx ! = - 1 ) {
DBG_DEBUG ( " Trying to writelock existing shared %s \n " ,
server_id_str_buf ( self , & tmp ) ) ;
return NT_STATUS_WAS_LOCKED ;
}
2012-08-22 12:35:29 +02:00
2019-11-19 17:29:18 +01:00
lck . exclusive = self ;
2012-02-15 16:38:43 +01:00
2019-11-19 17:29:18 +01:00
status = g_lock_store ( rec , & lck , NULL ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " g_lock_store() failed: %s \n " ,
nt_errstr ( status ) ) ;
return status ;
2009-10-25 16:12:12 +01:00
}
2019-11-19 17:29:18 +01:00
if ( lck . num_shared ! = 0 ) {
talloc_set_destructor (
req_state , g_lock_lock_state_destructor ) ;
2012-02-15 16:38:43 +01:00
2019-11-19 17:29:18 +01:00
g_lock_get_shared ( & lck , 0 , blocker ) ;
2017-12-20 08:41:09 +01:00
2019-11-19 17:29:18 +01:00
DBG_DEBUG ( " Waiting for %zu shared locks, "
" picking blocker %s \n " ,
lck . num_shared ,
server_id_str_buf ( * blocker , & tmp ) ) ;
2017-12-20 08:41:09 +01:00
2019-11-19 17:29:18 +01:00
return NT_STATUS_LOCK_NOT_GRANTED ;
}
talloc_set_destructor ( req_state , NULL ) ;
return NT_STATUS_OK ;
}
2018-08-13 15:07:06 +02:00
2019-11-19 17:29:18 +01:00
do_shared :
2018-08-13 15:07:06 +02:00
2019-11-19 17:29:18 +01:00
if ( lck . num_shared = = 0 ) {
status = g_lock_store ( rec , & lck , & self ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " g_lock_store() failed: %s \n " ,
nt_errstr ( status ) ) ;
2012-02-15 16:38:43 +01:00
}
2019-11-19 17:29:18 +01:00
return status ;
2012-02-15 16:38:43 +01:00
}
2009-10-25 16:12:12 +01:00
2019-11-22 11:55:52 +01:00
g_lock_cleanup_shared ( & lck ) ;
2019-11-19 17:29:18 +01:00
status = g_lock_store ( rec , & lck , & self ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " g_lock_store() failed: %s \n " ,
nt_errstr ( status ) ) ;
return status ;
}
return NT_STATUS_OK ;
}
2017-06-28 20:01:34 +02:00
2019-10-23 11:34:47 +02:00
static void g_lock_lock_fn (
struct db_record * rec ,
TDB_DATA value ,
void * private_data )
2017-06-28 20:01:34 +02:00
{
struct g_lock_lock_fn_state * state = private_data ;
2019-07-10 11:22:43 +00:00
struct server_id blocker = { 0 } ;
2017-06-28 20:01:34 +02:00
2019-11-19 17:29:18 +01:00
state - > status = g_lock_trylock ( rec , state , value , & blocker ) ;
2017-06-28 20:01:34 +02:00
if ( ! NT_STATUS_EQUAL ( state - > status , NT_STATUS_LOCK_NOT_GRANTED ) ) {
return ;
}
state - > watch_req = dbwrap_watched_watch_send (
2019-11-19 17:29:18 +01:00
state - > req_state , state - > req_state - > ev , rec , blocker ) ;
if ( state - > watch_req = = NULL ) {
state - > status = NT_STATUS_NO_MEMORY ;
}
}
static int g_lock_lock_state_destructor ( struct g_lock_lock_state * s )
{
NTSTATUS status = g_lock_unlock ( s - > ctx , s - > key ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " g_lock_unlock failed: %s \n " , nt_errstr ( status ) ) ;
}
return 0 ;
2017-06-28 20:01:34 +02:00
}
2019-11-19 17:29:18 +01:00
static void g_lock_lock_retry ( struct tevent_req * subreq ) ;
2012-02-15 16:38:43 +01:00
struct tevent_req * g_lock_lock_send ( TALLOC_CTX * mem_ctx ,
struct tevent_context * ev ,
struct g_lock_ctx * ctx ,
2017-12-03 20:47:02 +01:00
TDB_DATA key ,
2012-02-15 16:38:43 +01:00
enum g_lock_type type )
2009-10-25 16:12:12 +01:00
{
2017-06-28 20:01:34 +02:00
struct tevent_req * req ;
2012-02-15 16:38:43 +01:00
struct g_lock_lock_state * state ;
2017-06-28 20:01:34 +02:00
struct g_lock_lock_fn_state fn_state ;
2012-02-15 16:38:43 +01:00
NTSTATUS status ;
2019-11-19 17:29:18 +01:00
bool ok ;
2012-02-15 16:38:43 +01:00
req = tevent_req_create ( mem_ctx , & state , struct g_lock_lock_state ) ;
if ( req = = NULL ) {
return NULL ;
}
state - > ev = ev ;
state - > ctx = ctx ;
2017-12-03 20:47:02 +01:00
state - > key = key ;
2012-02-15 16:38:43 +01:00
state - > type = type ;
2009-10-25 16:12:12 +01:00
2017-06-28 20:01:34 +02:00
fn_state = ( struct g_lock_lock_fn_state ) {
2019-11-19 17:29:18 +01:00
. req_state = state ,
2017-06-28 20:01:34 +02:00
} ;
2017-12-03 20:47:02 +01:00
status = dbwrap_do_locked ( ctx - > db , key , g_lock_lock_fn , & fn_state ) ;
2017-06-28 20:01:34 +02:00
if ( tevent_req_nterror ( req , status ) ) {
DBG_DEBUG ( " dbwrap_do_locked failed: %s \n " ,
nt_errstr ( status ) ) ;
2012-02-15 16:38:43 +01:00
return tevent_req_post ( req , ev ) ;
2009-10-25 16:12:12 +01:00
}
2017-06-28 20:01:34 +02:00
if ( NT_STATUS_IS_OK ( fn_state . status ) ) {
2012-02-15 16:38:43 +01:00
tevent_req_done ( req ) ;
return tevent_req_post ( req , ev ) ;
2009-10-25 16:12:12 +01:00
}
2017-06-28 20:01:34 +02:00
if ( ! NT_STATUS_EQUAL ( fn_state . status , NT_STATUS_LOCK_NOT_GRANTED ) ) {
tevent_req_nterror ( req , fn_state . status ) ;
2012-02-15 16:38:43 +01:00
return tevent_req_post ( req , ev ) ;
2009-10-25 16:12:12 +01:00
}
2017-06-28 20:01:34 +02:00
if ( tevent_req_nomem ( fn_state . watch_req , req ) ) {
2012-02-15 16:38:43 +01:00
return tevent_req_post ( req , ev ) ;
2009-10-25 16:12:12 +01:00
}
2017-06-28 20:01:34 +02:00
2019-11-19 17:29:18 +01:00
ok = tevent_req_set_endtime (
fn_state . watch_req ,
state - > ev ,
timeval_current_ofs ( 5 + generate_random ( ) % 5 , 0 ) ) ;
if ( ! ok ) {
tevent_req_oom ( req ) ;
2012-08-10 17:00:38 +02:00
return tevent_req_post ( req , ev ) ;
}
2017-06-28 20:01:34 +02:00
tevent_req_set_callback ( fn_state . watch_req , g_lock_lock_retry , req ) ;
2019-11-19 17:29:18 +01:00
2012-02-15 16:38:43 +01:00
return req ;
2009-10-25 16:12:12 +01:00
}
2012-02-15 16:38:43 +01:00
static void g_lock_lock_retry ( struct tevent_req * subreq )
2009-10-25 16:12:12 +01:00
{
2012-02-15 16:38:43 +01:00
struct tevent_req * req = tevent_req_callback_data (
subreq , struct tevent_req ) ;
struct g_lock_lock_state * state = tevent_req_data (
req , struct g_lock_lock_state ) ;
2017-06-30 22:20:41 +02:00
struct g_lock_lock_fn_state fn_state ;
2019-11-19 17:29:18 +01:00
struct server_id blocker ;
bool blockerdead ;
2009-10-25 16:12:12 +01:00
NTSTATUS status ;
2019-11-19 17:29:18 +01:00
status = dbwrap_watched_watch_recv ( subreq , & blockerdead , & blocker ) ;
2017-06-30 22:20:41 +02:00
DBG_DEBUG ( " watch_recv returned %s \n " , nt_errstr ( status ) ) ;
2012-02-15 16:38:43 +01:00
TALLOC_FREE ( subreq ) ;
2012-08-10 17:00:38 +02:00
2017-06-30 22:20:41 +02:00
if ( ! NT_STATUS_IS_OK ( status ) & &
! NT_STATUS_EQUAL ( status , NT_STATUS_IO_TIMEOUT ) ) {
tevent_req_nterror ( req , status ) ;
return ;
2012-08-10 17:00:38 +02:00
}
2019-11-19 17:29:18 +01:00
state - > retry = true ;
2017-06-30 22:20:41 +02:00
fn_state = ( struct g_lock_lock_fn_state ) {
2019-11-19 17:29:18 +01:00
. req_state = state ,
. dead_blocker = blockerdead ? & blocker : NULL ,
2017-06-30 22:20:41 +02:00
} ;
2017-12-03 20:47:02 +01:00
status = dbwrap_do_locked ( state - > ctx - > db , state - > key ,
2017-06-30 22:20:41 +02:00
g_lock_lock_fn , & fn_state ) ;
2012-02-15 16:38:43 +01:00
if ( tevent_req_nterror ( req , status ) ) {
2017-06-30 22:20:41 +02:00
DBG_DEBUG ( " dbwrap_do_locked failed: %s \n " ,
nt_errstr ( status ) ) ;
2012-02-15 16:38:43 +01:00
return ;
2009-10-25 16:12:12 +01:00
}
2017-06-30 22:20:41 +02:00
if ( NT_STATUS_IS_OK ( fn_state . status ) ) {
2012-02-15 16:38:43 +01:00
tevent_req_done ( req ) ;
return ;
2009-10-25 16:12:12 +01:00
}
2017-06-30 22:20:41 +02:00
if ( ! NT_STATUS_EQUAL ( fn_state . status , NT_STATUS_LOCK_NOT_GRANTED ) ) {
tevent_req_nterror ( req , fn_state . status ) ;
2012-02-15 16:38:43 +01:00
return ;
2009-10-25 16:12:12 +01:00
}
2017-06-30 22:20:41 +02:00
if ( tevent_req_nomem ( fn_state . watch_req , req ) ) {
2012-02-15 16:38:43 +01:00
return ;
2009-10-25 16:12:12 +01:00
}
2017-06-30 22:20:41 +02:00
2012-08-10 17:00:38 +02:00
if ( ! tevent_req_set_endtime (
2017-06-30 22:20:41 +02:00
fn_state . watch_req , state - > ev ,
2019-10-09 21:38:42 +02:00
timeval_current_ofs ( 5 + generate_random ( ) % 5 , 0 ) ) ) {
2012-08-10 17:00:38 +02:00
return ;
}
2017-06-30 22:20:41 +02:00
tevent_req_set_callback ( fn_state . watch_req , g_lock_lock_retry , req ) ;
2009-10-25 16:12:12 +01:00
}
2012-02-15 16:38:43 +01:00
NTSTATUS g_lock_lock_recv ( struct tevent_req * req )
2009-10-25 16:12:12 +01:00
{
2012-02-15 16:38:43 +01:00
return tevent_req_simple_recv_ntstatus ( req ) ;
}
2009-10-25 16:12:12 +01:00
2019-11-22 12:02:22 +01:00
struct g_lock_lock_simple_state {
struct server_id me ;
enum g_lock_type type ;
NTSTATUS status ;
} ;
static void g_lock_lock_simple_fn (
struct db_record * rec ,
TDB_DATA value ,
void * private_data )
{
struct g_lock_lock_simple_state * state = private_data ;
struct g_lock lck = { . exclusive . pid = 0 } ;
bool ok ;
ok = g_lock_parse ( value . dptr , value . dsize , & lck ) ;
if ( ! ok ) {
DBG_DEBUG ( " g_lock_parse failed \n " ) ;
state - > status = NT_STATUS_INTERNAL_DB_CORRUPTION ;
return ;
}
if ( lck . exclusive . pid ! = 0 ) {
goto not_granted ;
}
if ( state - > type = = G_LOCK_WRITE ) {
if ( lck . num_shared ! = 0 ) {
goto not_granted ;
}
lck . exclusive = state - > me ;
state - > status = g_lock_store ( rec , & lck , NULL ) ;
return ;
}
if ( state - > type = = G_LOCK_READ ) {
g_lock_cleanup_shared ( & lck ) ;
state - > status = g_lock_store ( rec , & lck , & state - > me ) ;
return ;
}
not_granted :
state - > status = NT_STATUS_LOCK_NOT_GRANTED ;
}
2017-12-03 20:47:02 +01:00
NTSTATUS g_lock_lock ( struct g_lock_ctx * ctx , TDB_DATA key ,
2012-02-15 16:38:43 +01:00
enum g_lock_type type , struct timeval timeout )
{
2019-11-22 12:02:22 +01:00
TALLOC_CTX * frame ;
2012-02-15 16:38:43 +01:00
struct tevent_context * ev ;
struct tevent_req * req ;
struct timeval end ;
2019-11-22 12:02:22 +01:00
NTSTATUS status ;
if ( ( type = = G_LOCK_READ ) | | ( type = = G_LOCK_WRITE ) ) {
/*
* This is an abstraction violation : Normally we do
* the sync wrappers around async functions with full
* nested event contexts . However , this is used in
* very hot code paths , so avoid the event context
* creation for the good path where there ' s no lock
* contention . My benchmark gave a factor of 2
* improvement for lock / unlock .
*/
struct g_lock_lock_simple_state state = {
. me = messaging_server_id ( ctx - > msg ) ,
. type = type ,
} ;
status = dbwrap_do_locked (
ctx - > db , key , g_lock_lock_simple_fn , & state ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " dbwrap_do_locked() failed: %s \n " ,
nt_errstr ( status ) ) ;
return status ;
}
if ( NT_STATUS_IS_OK ( state . status ) ) {
return NT_STATUS_OK ;
}
if ( ! NT_STATUS_EQUAL (
state . status , NT_STATUS_LOCK_NOT_GRANTED ) ) {
return state . status ;
}
/*
* Fall back to the full g_lock_trylock logic ,
* g_lock_lock_simple_fn ( ) called above only covers
* the uncontended path .
*/
}
frame = talloc_stackframe ( ) ;
status = NT_STATUS_NO_MEMORY ;
2009-10-25 16:12:12 +01:00
2013-02-18 09:10:34 +01:00
ev = samba_tevent_context_init ( frame ) ;
2012-02-15 16:38:43 +01:00
if ( ev = = NULL ) {
goto fail ;
}
2017-12-03 20:47:02 +01:00
req = g_lock_lock_send ( frame , ev , ctx , key , type ) ;
2012-02-15 16:38:43 +01:00
if ( req = = NULL ) {
goto fail ;
}
end = timeval_current_ofs ( timeout . tv_sec , timeout . tv_usec ) ;
if ( ! tevent_req_set_endtime ( req , ev , end ) ) {
goto fail ;
}
if ( ! tevent_req_poll_ntstatus ( req , ev , & status ) ) {
goto fail ;
}
status = g_lock_lock_recv ( req ) ;
fail :
TALLOC_FREE ( frame ) ;
return status ;
2009-10-25 16:12:12 +01:00
}
2017-06-28 15:39:49 +02:00
struct g_lock_unlock_state {
struct server_id self ;
2009-10-25 16:12:12 +01:00
NTSTATUS status ;
2017-06-28 15:39:49 +02:00
} ;
2009-10-25 16:12:12 +01:00
2019-10-23 11:34:47 +02:00
static void g_lock_unlock_fn (
struct db_record * rec ,
TDB_DATA value ,
void * private_data )
2017-06-28 15:39:49 +02:00
{
struct g_lock_unlock_state * state = private_data ;
2019-11-19 17:29:18 +01:00
struct server_id_buf tmp ;
2017-06-28 15:39:49 +02:00
struct g_lock lck ;
size_t i ;
2019-11-19 17:29:18 +01:00
bool ok , exclusive ;
2009-10-25 16:12:12 +01:00
2017-06-28 15:39:49 +02:00
ok = g_lock_parse ( value . dptr , value . dsize , & lck ) ;
if ( ! ok ) {
2019-11-19 17:29:18 +01:00
DBG_DEBUG ( " g_lock_parse() failed \n " ) ;
state - > status = NT_STATUS_INTERNAL_DB_CORRUPTION ;
2017-06-28 15:39:49 +02:00
return ;
2009-10-25 16:12:12 +01:00
}
2019-11-19 17:29:18 +01:00
exclusive = server_id_equal ( & state - > self , & lck . exclusive ) ;
for ( i = 0 ; i < lck . num_shared ; i + + ) {
struct server_id shared ;
g_lock_get_shared ( & lck , i , & shared ) ;
if ( server_id_equal ( & state - > self , & shared ) ) {
2009-10-25 16:12:12 +01:00
break ;
}
}
2019-11-19 17:29:18 +01:00
if ( i < lck . num_shared ) {
if ( exclusive ) {
DBG_DEBUG ( " %s both exclusive and shared (%zu) \n " ,
server_id_str_buf ( state - > self , & tmp ) ,
i ) ;
state - > status = NT_STATUS_INTERNAL_DB_CORRUPTION ;
return ;
}
g_lock_del_shared ( & lck , i ) ;
} else {
if ( ! exclusive ) {
DBG_DEBUG ( " Lock %s not found, num_rec=%zu \n " ,
server_id_str_buf ( state - > self , & tmp ) ,
lck . num_shared ) ;
state - > status = NT_STATUS_NOT_FOUND ;
return ;
}
lck . exclusive = ( struct server_id ) { . pid = 0 } ;
}
2009-10-25 16:12:12 +01:00
2019-11-19 17:29:18 +01:00
if ( ( lck . exclusive . pid = = 0 ) & &
( lck . num_shared = = 0 ) & &
( lck . datalen = = 0 ) ) {
2017-06-28 15:39:49 +02:00
state - > status = dbwrap_record_delete ( rec ) ;
return ;
2009-10-25 16:12:12 +01:00
}
2019-11-19 17:29:18 +01:00
2017-06-28 15:39:49 +02:00
state - > status = g_lock_store ( rec , & lck , NULL ) ;
}
2017-12-03 20:47:02 +01:00
NTSTATUS g_lock_unlock ( struct g_lock_ctx * ctx , TDB_DATA key )
2017-06-28 15:39:49 +02:00
{
struct g_lock_unlock_state state = {
2019-11-19 17:29:18 +01:00
. self = messaging_server_id ( ctx - > msg ) ,
2017-06-28 15:39:49 +02:00
} ;
NTSTATUS status ;
2017-12-03 20:47:02 +01:00
status = dbwrap_do_locked ( ctx - > db , key , g_lock_unlock_fn , & state ) ;
2009-10-25 16:12:12 +01:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
2017-06-28 15:39:49 +02:00
DBG_WARNING ( " dbwrap_do_locked failed: %s \n " ,
nt_errstr ( status ) ) ;
return status ;
}
if ( ! NT_STATUS_IS_OK ( state . status ) ) {
DBG_WARNING ( " g_lock_unlock_fn failed: %s \n " ,
nt_errstr ( state . status ) ) ;
return state . status ;
2009-10-25 16:12:12 +01:00
}
2017-06-28 15:39:49 +02:00
return NT_STATUS_OK ;
2009-10-25 16:12:12 +01:00
}
2017-06-30 19:42:50 +02:00
struct g_lock_write_data_state {
2017-12-03 20:47:02 +01:00
TDB_DATA key ;
2017-06-30 19:42:50 +02:00
struct server_id self ;
const uint8_t * data ;
size_t datalen ;
2017-05-23 12:32:24 +02:00
NTSTATUS status ;
2017-06-30 19:42:50 +02:00
} ;
2017-05-23 12:32:24 +02:00
2019-10-23 11:34:47 +02:00
static void g_lock_write_data_fn (
struct db_record * rec ,
TDB_DATA value ,
void * private_data )
2017-06-30 19:42:50 +02:00
{
struct g_lock_write_data_state * state = private_data ;
struct g_lock lck ;
2019-11-19 17:29:18 +01:00
bool exclusive ;
2017-06-30 19:42:50 +02:00
bool ok ;
2017-05-23 12:32:24 +02:00
2017-06-30 19:42:50 +02:00
ok = g_lock_parse ( value . dptr , value . dsize , & lck ) ;
if ( ! ok ) {
2017-12-03 20:47:02 +01:00
DBG_DEBUG ( " g_lock_parse for %s failed \n " ,
hex_encode_talloc ( talloc_tos ( ) ,
state - > key . dptr ,
state - > key . dsize ) ) ;
2017-06-30 19:42:50 +02:00
state - > status = NT_STATUS_INTERNAL_DB_CORRUPTION ;
return ;
2017-05-23 12:32:24 +02:00
}
2019-11-19 17:29:18 +01:00
exclusive = server_id_equal ( & state - > self , & lck . exclusive ) ;
/*
* Make sure we ' re really exclusive . We are marked as
* exclusive when we are waiting for an exclusive lock
*/
exclusive & = ( lck . num_shared = = 0 ) ;
if ( ! exclusive ) {
2017-05-23 12:32:24 +02:00
DBG_DEBUG ( " Not locked by us \n " ) ;
2017-06-30 19:42:50 +02:00
state - > status = NT_STATUS_NOT_LOCKED ;
return ;
2017-05-23 12:32:24 +02:00
}
2017-06-30 19:42:50 +02:00
lck . data = discard_const_p ( uint8_t , state - > data ) ;
lck . datalen = state - > datalen ;
state - > status = g_lock_store ( rec , & lck , NULL ) ;
}
2017-05-23 12:32:24 +02:00
2017-12-03 20:47:02 +01:00
NTSTATUS g_lock_write_data ( struct g_lock_ctx * ctx , TDB_DATA key ,
2017-06-30 19:42:50 +02:00
const uint8_t * buf , size_t buflen )
{
struct g_lock_write_data_state state = {
2017-12-03 20:47:02 +01:00
. key = key , . self = messaging_server_id ( ctx - > msg ) ,
2017-06-30 19:42:50 +02:00
. data = buf , . datalen = buflen
} ;
NTSTATUS status ;
2017-12-03 20:47:02 +01:00
status = dbwrap_do_locked ( ctx - > db , key ,
2017-06-30 19:42:50 +02:00
g_lock_write_data_fn , & state ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_WARNING ( " dbwrap_do_locked failed: %s \n " ,
nt_errstr ( status ) ) ;
return status ;
}
if ( ! NT_STATUS_IS_OK ( state . status ) ) {
DBG_WARNING ( " g_lock_write_data_fn failed: %s \n " ,
nt_errstr ( state . status ) ) ;
return state . status ;
}
return NT_STATUS_OK ;
2017-05-23 12:32:24 +02:00
}
2009-10-25 16:12:12 +01:00
struct g_lock_locks_state {
2017-12-03 20:47:02 +01:00
int ( * fn ) ( TDB_DATA key , void * private_data ) ;
2009-10-25 16:12:12 +01:00
void * private_data ;
} ;
static int g_lock_locks_fn ( struct db_record * rec , void * priv )
{
2011-08-17 11:21:31 +02:00
TDB_DATA key ;
2009-10-25 16:12:12 +01:00
struct g_lock_locks_state * state = ( struct g_lock_locks_state * ) priv ;
2011-08-17 11:21:31 +02:00
key = dbwrap_record_get_key ( rec ) ;
2017-12-03 20:47:02 +01:00
return state - > fn ( key , state - > private_data ) ;
2009-10-25 16:12:12 +01:00
}
int g_lock_locks ( struct g_lock_ctx * ctx ,
2017-12-03 20:47:02 +01:00
int ( * fn ) ( TDB_DATA key , void * private_data ) ,
2009-10-25 16:12:12 +01:00
void * private_data )
{
struct g_lock_locks_state state ;
2011-08-17 11:21:31 +02:00
NTSTATUS status ;
int count ;
2009-10-25 16:12:12 +01:00
state . fn = fn ;
state . private_data = private_data ;
2011-08-17 11:21:31 +02:00
status = dbwrap_traverse_read ( ctx - > db , g_lock_locks_fn , & state , & count ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
return - 1 ;
}
2017-05-17 16:43:01 +02:00
return count ;
2009-10-25 16:12:12 +01:00
}
2017-06-30 22:09:12 +02:00
struct g_lock_dump_state {
TALLOC_CTX * mem_ctx ;
2017-12-03 20:47:02 +01:00
TDB_DATA key ;
2019-10-25 13:35:39 +02:00
void ( * fn ) ( struct server_id exclusive ,
size_t num_shared ,
struct server_id * shared ,
2017-06-30 22:09:12 +02:00
const uint8_t * data ,
size_t datalen ,
void * private_data ) ;
void * private_data ;
NTSTATUS status ;
} ;
static void g_lock_dump_fn ( TDB_DATA key , TDB_DATA data ,
void * private_data )
{
struct g_lock_dump_state * state = private_data ;
2019-11-19 17:29:18 +01:00
struct g_lock lck = ( struct g_lock ) { . exclusive . pid = 0 } ;
2019-10-25 13:35:39 +02:00
struct server_id * shared = NULL ;
2017-06-30 22:09:12 +02:00
size_t i ;
bool ok ;
ok = g_lock_parse ( data . dptr , data . dsize , & lck ) ;
if ( ! ok ) {
DBG_DEBUG ( " g_lock_parse failed for %s \n " ,
2017-12-03 20:47:02 +01:00
hex_encode_talloc ( talloc_tos ( ) ,
state - > key . dptr ,
state - > key . dsize ) ) ;
2017-06-30 22:09:12 +02:00
state - > status = NT_STATUS_INTERNAL_DB_CORRUPTION ;
return ;
}
2019-11-19 17:29:18 +01:00
shared = talloc_array (
state - > mem_ctx , struct server_id , lck . num_shared ) ;
if ( shared = = NULL ) {
2017-06-30 22:09:12 +02:00
DBG_DEBUG ( " talloc failed \n " ) ;
state - > status = NT_STATUS_NO_MEMORY ;
2019-11-19 17:29:18 +01:00
return ;
2017-06-30 22:09:12 +02:00
}
2019-11-19 17:29:18 +01:00
for ( i = 0 ; i < lck . num_shared ; i + + ) {
g_lock_get_shared ( & lck , i , & shared [ i ] ) ;
2019-10-25 13:35:39 +02:00
}
2017-06-30 22:09:12 +02:00
2019-11-19 17:29:18 +01:00
state - > fn ( lck . exclusive ,
lck . num_shared ,
2019-10-25 13:35:39 +02:00
shared ,
lck . data ,
lck . datalen ,
state - > private_data ) ;
2017-06-30 22:09:12 +02:00
2019-11-19 17:29:18 +01:00
TALLOC_FREE ( shared ) ;
2017-06-30 22:09:12 +02:00
state - > status = NT_STATUS_OK ;
}
2017-12-03 20:47:02 +01:00
NTSTATUS g_lock_dump ( struct g_lock_ctx * ctx , TDB_DATA key ,
2019-10-25 13:35:39 +02:00
void ( * fn ) ( struct server_id exclusive ,
size_t num_shared ,
struct server_id * shared ,
2017-05-18 15:27:46 +02:00
const uint8_t * data ,
size_t datalen ,
void * private_data ) ,
2009-10-25 16:12:12 +01:00
void * private_data )
{
2017-06-30 22:09:12 +02:00
struct g_lock_dump_state state = {
2017-12-03 20:47:02 +01:00
. mem_ctx = ctx , . key = key ,
2017-06-30 22:09:12 +02:00
. fn = fn , . private_data = private_data
} ;
2011-08-17 11:21:31 +02:00
NTSTATUS status ;
2009-10-25 16:12:12 +01:00
2017-12-03 20:47:02 +01:00
status = dbwrap_parse_record ( ctx - > db , key , g_lock_dump_fn , & state ) ;
2011-08-17 11:21:31 +02:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
2017-06-30 22:09:12 +02:00
DBG_DEBUG ( " dbwrap_parse_record returned %s \n " ,
nt_errstr ( status ) ) ;
2011-08-17 11:21:31 +02:00
return status ;
2009-10-25 16:12:12 +01:00
}
2017-06-30 22:09:12 +02:00
if ( ! NT_STATUS_IS_OK ( state . status ) ) {
DBG_DEBUG ( " g_lock_dump_fn returned %s \n " ,
nt_errstr ( state . status ) ) ;
return state . status ;
2009-10-25 16:12:12 +01:00
}
return NT_STATUS_OK ;
}