2009-10-25 16:12:12 +01:00
/*
Unix SMB / CIFS implementation .
global locks based on dbwrap and messaging
Copyright ( C ) 2009 by Volker Lendecke
This program is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation ; either version 3 of the License , or
( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
2019-10-10 16:30:14 +02:00
# include "replace.h"
2011-02-25 23:20:06 +01:00
# include "system/filesys.h"
2017-01-01 20:00:55 +00:00
# include "lib/util/server_id.h"
2019-10-10 16:30:14 +02:00
# include "lib/util/debug.h"
# include "lib/util/talloc_stack.h"
# include "lib/util/samba_util.h"
# include "lib/util_path.h"
2011-07-07 17:42:08 +02:00
# include "dbwrap/dbwrap.h"
2011-07-06 16:40:21 +02:00
# include "dbwrap/dbwrap_open.h"
2012-02-15 16:38:43 +01:00
# include "dbwrap/dbwrap_watch.h"
2009-10-25 16:12:12 +01:00
# include "g_lock.h"
2011-05-05 11:25:29 +02:00
# include "util_tdb.h"
2012-02-15 16:38:43 +01:00
# include "../lib/util/tevent_ntstatus.h"
2011-03-24 15:31:06 +01:00
# include "messages.h"
2012-08-22 12:35:29 +02:00
# include "serverid.h"
2009-10-25 16:12:12 +01:00
struct g_lock_ctx {
struct db_context * db ;
struct messaging_context * msg ;
2019-11-21 15:20:07 +01:00
enum dbwrap_lock_order lock_order ;
2009-10-25 16:12:12 +01:00
} ;
2017-06-28 13:36:53 +02:00
struct g_lock {
2019-11-19 17:29:18 +01:00
struct server_id exclusive ;
size_t num_shared ;
uint8_t * shared ;
s3:g_lock: avoid very expensive generate_random_buffer() in g_lock_parse()
We don't require a sequence number that is incremented,
we just need a value that's not reused.
We use the new generate_unique_u64(), which is much cheaper!
Using smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 500000
under valgrind --tool=callgrind...
This change replaces this:
13,129,925,659 PROGRAM TOTALS
4,125,752,958 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
1,257,005,866 ???:_nettle_aes_encrypt [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
571,503,429 ???:_nettle_aes_set_key [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
by this:
6,877,826,377 PROGRAM TOTALS
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
12,500,033 bin/default/../../lib/util/genrand_util.c:generate_unique_u64
...
8,996,970 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
time smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 5000000
gives:
537426 locks/sec
real 0m19,071s
user 0m15,061s
sys 0m3,999s
vs.
900956 locks/sec
real 0m11,155s
user 0m8,293s
sys 0m2,860s
Signed-off-by: Stefan Metzmacher <metze@samba.org>
Reviewed-by: Volker Lendecke <vl@samba.org>
2020-05-19 02:58:23 +02:00
uint64_t unique_data_epoch ;
2017-06-28 13:36:53 +02:00
size_t datalen ;
2019-11-19 17:29:18 +01:00
uint8_t * data ;
2017-06-28 13:36:53 +02:00
} ;
static bool g_lock_parse ( uint8_t * buf , size_t buflen , struct g_lock * lck )
{
2019-11-19 17:29:18 +01:00
struct server_id exclusive ;
size_t num_shared , shared_len ;
s3:g_lock: avoid very expensive generate_random_buffer() in g_lock_parse()
We don't require a sequence number that is incremented,
we just need a value that's not reused.
We use the new generate_unique_u64(), which is much cheaper!
Using smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 500000
under valgrind --tool=callgrind...
This change replaces this:
13,129,925,659 PROGRAM TOTALS
4,125,752,958 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
1,257,005,866 ???:_nettle_aes_encrypt [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
571,503,429 ???:_nettle_aes_set_key [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
by this:
6,877,826,377 PROGRAM TOTALS
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
12,500,033 bin/default/../../lib/util/genrand_util.c:generate_unique_u64
...
8,996,970 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
time smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 5000000
gives:
537426 locks/sec
real 0m19,071s
user 0m15,061s
sys 0m3,999s
vs.
900956 locks/sec
real 0m11,155s
user 0m8,293s
sys 0m2,860s
Signed-off-by: Stefan Metzmacher <metze@samba.org>
Reviewed-by: Volker Lendecke <vl@samba.org>
2020-05-19 02:58:23 +02:00
uint64_t unique_data_epoch ;
2019-10-30 16:12:11 +01:00
if ( buflen < ( SERVER_ID_BUF_LENGTH + /* exclusive */
sizeof ( uint64_t ) + /* seqnum */
sizeof ( uint32_t ) ) ) { /* num_shared */
s3:g_lock: avoid very expensive generate_random_buffer() in g_lock_parse()
We don't require a sequence number that is incremented,
we just need a value that's not reused.
We use the new generate_unique_u64(), which is much cheaper!
Using smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 500000
under valgrind --tool=callgrind...
This change replaces this:
13,129,925,659 PROGRAM TOTALS
4,125,752,958 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
1,257,005,866 ???:_nettle_aes_encrypt [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
571,503,429 ???:_nettle_aes_set_key [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
by this:
6,877,826,377 PROGRAM TOTALS
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
12,500,033 bin/default/../../lib/util/genrand_util.c:generate_unique_u64
...
8,996,970 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
time smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 5000000
gives:
537426 locks/sec
real 0m19,071s
user 0m15,061s
sys 0m3,999s
vs.
900956 locks/sec
real 0m11,155s
user 0m8,293s
sys 0m2,860s
Signed-off-by: Stefan Metzmacher <metze@samba.org>
Reviewed-by: Volker Lendecke <vl@samba.org>
2020-05-19 02:58:23 +02:00
struct g_lock ret = {
. exclusive . pid = 0 ,
. unique_data_epoch = generate_unique_u64 ( 0 ) ,
} ;
2019-10-30 16:12:11 +01:00
* lck = ret ;
2017-06-28 13:36:53 +02:00
return true ;
}
2019-11-19 17:29:18 +01:00
server_id_get ( & exclusive , buf ) ;
buf + = SERVER_ID_BUF_LENGTH ;
buflen - = SERVER_ID_BUF_LENGTH ;
2017-06-28 13:36:53 +02:00
s3:g_lock: avoid very expensive generate_random_buffer() in g_lock_parse()
We don't require a sequence number that is incremented,
we just need a value that's not reused.
We use the new generate_unique_u64(), which is much cheaper!
Using smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 500000
under valgrind --tool=callgrind...
This change replaces this:
13,129,925,659 PROGRAM TOTALS
4,125,752,958 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
1,257,005,866 ???:_nettle_aes_encrypt [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
571,503,429 ???:_nettle_aes_set_key [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
by this:
6,877,826,377 PROGRAM TOTALS
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
12,500,033 bin/default/../../lib/util/genrand_util.c:generate_unique_u64
...
8,996,970 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
time smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 5000000
gives:
537426 locks/sec
real 0m19,071s
user 0m15,061s
sys 0m3,999s
vs.
900956 locks/sec
real 0m11,155s
user 0m8,293s
sys 0m2,860s
Signed-off-by: Stefan Metzmacher <metze@samba.org>
Reviewed-by: Volker Lendecke <vl@samba.org>
2020-05-19 02:58:23 +02:00
unique_data_epoch = BVAL ( buf , 0 ) ;
2019-10-30 16:12:11 +01:00
buf + = sizeof ( uint64_t ) ;
buflen - = sizeof ( uint64_t ) ;
2019-11-19 17:29:18 +01:00
num_shared = IVAL ( buf , 0 ) ;
2017-10-26 09:43:56 +02:00
buf + = sizeof ( uint32_t ) ;
buflen - = sizeof ( uint32_t ) ;
2019-11-19 17:29:18 +01:00
if ( num_shared > buflen / SERVER_ID_BUF_LENGTH ) {
2019-10-30 16:12:11 +01:00
DBG_DEBUG ( " num_shared=%zu, buflen=%zu \n " ,
num_shared ,
buflen ) ;
2017-06-28 13:36:53 +02:00
return false ;
}
2019-11-19 17:29:18 +01:00
shared_len = num_shared * SERVER_ID_BUF_LENGTH ;
2017-06-28 13:36:53 +02:00
* lck = ( struct g_lock ) {
2019-11-19 17:29:18 +01:00
. exclusive = exclusive ,
. num_shared = num_shared ,
. shared = buf ,
s3:g_lock: avoid very expensive generate_random_buffer() in g_lock_parse()
We don't require a sequence number that is incremented,
we just need a value that's not reused.
We use the new generate_unique_u64(), which is much cheaper!
Using smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 500000
under valgrind --tool=callgrind...
This change replaces this:
13,129,925,659 PROGRAM TOTALS
4,125,752,958 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
1,257,005,866 ???:_nettle_aes_encrypt [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
571,503,429 ???:_nettle_aes_set_key [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
by this:
6,877,826,377 PROGRAM TOTALS
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
12,500,033 bin/default/../../lib/util/genrand_util.c:generate_unique_u64
...
8,996,970 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
time smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 5000000
gives:
537426 locks/sec
real 0m19,071s
user 0m15,061s
sys 0m3,999s
vs.
900956 locks/sec
real 0m11,155s
user 0m8,293s
sys 0m2,860s
Signed-off-by: Stefan Metzmacher <metze@samba.org>
Reviewed-by: Volker Lendecke <vl@samba.org>
2020-05-19 02:58:23 +02:00
. unique_data_epoch = unique_data_epoch ,
2019-11-19 17:29:18 +01:00
. datalen = buflen - shared_len ,
. data = buf + shared_len ,
2017-06-28 13:36:53 +02:00
} ;
return true ;
}
2019-11-19 17:29:18 +01:00
static void g_lock_get_shared ( const struct g_lock * lck ,
size_t i ,
struct server_id * shared )
2017-06-28 13:36:53 +02:00
{
2019-11-19 17:29:18 +01:00
if ( i > = lck - > num_shared ) {
2017-06-28 13:36:53 +02:00
abort ( ) ;
}
2019-11-19 17:29:18 +01:00
server_id_get ( shared , lck - > shared + i * SERVER_ID_BUF_LENGTH ) ;
2017-06-28 13:36:53 +02:00
}
2019-11-19 17:29:18 +01:00
static void g_lock_del_shared ( struct g_lock * lck , size_t i )
2017-06-28 13:36:53 +02:00
{
2019-11-19 17:29:18 +01:00
if ( i > = lck - > num_shared ) {
2017-06-28 13:36:53 +02:00
abort ( ) ;
}
2019-11-19 17:29:18 +01:00
lck - > num_shared - = 1 ;
if ( i < lck - > num_shared ) {
memcpy ( lck - > shared + i * SERVER_ID_BUF_LENGTH ,
lck - > shared + lck - > num_shared * SERVER_ID_BUF_LENGTH ,
SERVER_ID_BUF_LENGTH ) ;
2017-06-28 13:36:53 +02:00
}
}
2019-11-19 17:29:18 +01:00
static NTSTATUS g_lock_store (
struct db_record * rec ,
struct g_lock * lck ,
2020-04-29 15:28:03 +02:00
struct server_id * new_shared ,
const TDB_DATA * new_dbufs ,
size_t num_new_dbufs )
2017-06-28 13:36:53 +02:00
{
2019-11-19 17:29:18 +01:00
uint8_t exclusive [ SERVER_ID_BUF_LENGTH ] ;
2019-10-30 16:12:11 +01:00
uint8_t seqnum_buf [ sizeof ( uint64_t ) ] ;
2019-11-19 17:29:18 +01:00
uint8_t sizebuf [ sizeof ( uint32_t ) ] ;
2019-10-30 16:12:11 +01:00
uint8_t new_shared_buf [ SERVER_ID_BUF_LENGTH ] ;
2017-06-28 13:36:53 +02:00
2020-04-29 15:28:03 +02:00
struct TDB_DATA dbufs [ 6 + num_new_dbufs ] ;
dbufs [ 0 ] = ( TDB_DATA ) {
. dptr = exclusive , . dsize = sizeof ( exclusive ) ,
} ;
dbufs [ 1 ] = ( TDB_DATA ) {
. dptr = seqnum_buf , . dsize = sizeof ( seqnum_buf ) ,
} ;
dbufs [ 2 ] = ( TDB_DATA ) {
. dptr = sizebuf , . dsize = sizeof ( sizebuf ) ,
} ;
dbufs [ 3 ] = ( TDB_DATA ) {
. dptr = lck - > shared ,
. dsize = lck - > num_shared * SERVER_ID_BUF_LENGTH ,
2017-06-28 13:36:53 +02:00
} ;
2020-04-29 15:28:03 +02:00
dbufs [ 4 ] = ( TDB_DATA ) { 0 } ;
dbufs [ 5 ] = ( TDB_DATA ) {
. dptr = lck - > data , . dsize = lck - > datalen ,
} ;
if ( num_new_dbufs ! = 0 ) {
memcpy ( & dbufs [ 6 ] ,
new_dbufs ,
num_new_dbufs * sizeof ( TDB_DATA ) ) ;
}
2017-06-28 13:36:53 +02:00
2019-11-19 17:29:18 +01:00
server_id_put ( exclusive , lck - > exclusive ) ;
s3:g_lock: avoid very expensive generate_random_buffer() in g_lock_parse()
We don't require a sequence number that is incremented,
we just need a value that's not reused.
We use the new generate_unique_u64(), which is much cheaper!
Using smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 500000
under valgrind --tool=callgrind...
This change replaces this:
13,129,925,659 PROGRAM TOTALS
4,125,752,958 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
1,257,005,866 ???:_nettle_aes_encrypt [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
571,503,429 ???:_nettle_aes_set_key [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
by this:
6,877,826,377 PROGRAM TOTALS
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
12,500,033 bin/default/../../lib/util/genrand_util.c:generate_unique_u64
...
8,996,970 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
time smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 5000000
gives:
537426 locks/sec
real 0m19,071s
user 0m15,061s
sys 0m3,999s
vs.
900956 locks/sec
real 0m11,155s
user 0m8,293s
sys 0m2,860s
Signed-off-by: Stefan Metzmacher <metze@samba.org>
Reviewed-by: Volker Lendecke <vl@samba.org>
2020-05-19 02:58:23 +02:00
SBVAL ( seqnum_buf , 0 , lck - > unique_data_epoch ) ;
2019-11-19 17:29:18 +01:00
if ( new_shared ! = NULL ) {
if ( lck - > num_shared > = UINT32_MAX ) {
return NT_STATUS_BUFFER_OVERFLOW ;
}
2019-10-30 16:12:11 +01:00
server_id_put ( new_shared_buf , * new_shared ) ;
2017-06-28 13:36:53 +02:00
2019-10-30 16:12:11 +01:00
dbufs [ 4 ] = ( TDB_DATA ) {
. dptr = new_shared_buf ,
. dsize = sizeof ( new_shared_buf ) ,
2017-06-28 13:36:53 +02:00
} ;
2019-11-19 17:29:18 +01:00
lck - > num_shared + = 1 ;
2017-06-28 13:36:53 +02:00
}
2019-11-19 17:29:18 +01:00
SIVAL ( sizebuf , 0 , lck - > num_shared ) ;
2017-06-28 13:36:53 +02:00
return dbwrap_record_storev ( rec , dbufs , ARRAY_SIZE ( dbufs ) , 0 ) ;
}
2019-11-05 16:36:44 +01:00
struct g_lock_ctx * g_lock_ctx_init_backend (
TALLOC_CTX * mem_ctx ,
struct messaging_context * msg ,
struct db_context * * backend )
2009-10-25 16:12:12 +01:00
{
struct g_lock_ctx * result ;
result = talloc ( mem_ctx , struct g_lock_ctx ) ;
if ( result = = NULL ) {
return NULL ;
}
result - > msg = msg ;
2019-11-21 15:20:07 +01:00
result - > lock_order = DBWRAP_LOCK_ORDER_NONE ;
2009-10-25 16:12:12 +01:00
2019-11-05 16:36:44 +01:00
result - > db = db_open_watched ( result , backend , msg ) ;
if ( result - > db = = NULL ) {
DBG_WARNING ( " db_open_watched failed \n " ) ;
2014-11-02 20:21:40 +01:00
TALLOC_FREE ( result ) ;
return NULL ;
}
2019-11-05 16:36:44 +01:00
return result ;
}
2019-11-21 15:20:07 +01:00
void g_lock_set_lock_order ( struct g_lock_ctx * ctx ,
enum dbwrap_lock_order lock_order )
{
ctx - > lock_order = lock_order ;
}
2019-11-05 16:36:44 +01:00
struct g_lock_ctx * g_lock_ctx_init ( TALLOC_CTX * mem_ctx ,
struct messaging_context * msg )
{
char * db_path = NULL ;
struct db_context * backend = NULL ;
struct g_lock_ctx * ctx = NULL ;
db_path = lock_path ( mem_ctx , " g_lock.tdb " ) ;
if ( db_path = = NULL ) {
return NULL ;
}
2014-11-02 20:21:40 +01:00
2019-11-05 16:36:44 +01:00
backend = db_open (
mem_ctx ,
db_path ,
0 ,
TDB_CLEAR_IF_FIRST | TDB_INCOMPATIBLE_HASH ,
O_RDWR | O_CREAT ,
0600 ,
DBWRAP_LOCK_ORDER_3 ,
DBWRAP_FLAG_NONE ) ;
2014-11-02 20:21:40 +01:00
TALLOC_FREE ( db_path ) ;
2016-07-13 07:26:52 +02:00
if ( backend = = NULL ) {
2018-08-16 11:34:36 +02:00
DBG_WARNING ( " Could not open g_lock.tdb \n " ) ;
2009-10-25 16:12:12 +01:00
return NULL ;
}
2016-07-13 07:26:52 +02:00
2019-11-05 16:36:44 +01:00
ctx = g_lock_ctx_init_backend ( mem_ctx , msg , & backend ) ;
return ctx ;
2009-10-25 16:12:12 +01:00
}
2019-11-19 17:29:18 +01:00
static NTSTATUS g_lock_cleanup_dead (
struct db_record * rec ,
struct g_lock * lck ,
struct server_id * dead_blocker )
2009-10-25 16:12:12 +01:00
{
2019-11-19 17:29:18 +01:00
bool modified = false ;
bool exclusive_died ;
NTSTATUS status = NT_STATUS_OK ;
struct server_id_buf tmp ;
if ( dead_blocker = = NULL ) {
return NT_STATUS_OK ;
2009-10-25 16:12:12 +01:00
}
2019-11-19 17:29:18 +01:00
exclusive_died = server_id_equal ( dead_blocker , & lck - > exclusive ) ;
if ( exclusive_died ) {
DBG_DEBUG ( " Exclusive holder %s died \n " ,
server_id_str_buf ( lck - > exclusive , & tmp ) ) ;
lck - > exclusive . pid = 0 ;
modified = true ;
}
if ( lck - > num_shared ! = 0 ) {
bool shared_died ;
struct server_id shared ;
g_lock_get_shared ( lck , 0 , & shared ) ;
shared_died = server_id_equal ( dead_blocker , & shared ) ;
if ( shared_died ) {
DBG_DEBUG ( " Shared holder %s died \n " ,
server_id_str_buf ( shared , & tmp ) ) ;
g_lock_del_shared ( lck , 0 ) ;
modified = true ;
}
}
if ( modified ) {
2020-04-29 15:28:03 +02:00
status = g_lock_store ( rec , lck , NULL , NULL , 0 ) ;
2019-11-19 17:29:18 +01:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " g_lock_store() failed: %s \n " ,
nt_errstr ( status ) ) ;
}
}
return status ;
}
static ssize_t g_lock_find_shared (
struct g_lock * lck ,
const struct server_id * self )
{
size_t i ;
for ( i = 0 ; i < lck - > num_shared ; i + + ) {
struct server_id shared ;
bool same ;
g_lock_get_shared ( lck , i , & shared ) ;
same = server_id_equal ( self , & shared ) ;
if ( same ) {
return i ;
}
}
return - 1 ;
2009-10-25 16:12:12 +01:00
}
2019-11-22 11:55:52 +01:00
static void g_lock_cleanup_shared ( struct g_lock * lck )
{
size_t i ;
struct server_id check ;
bool exists ;
if ( lck - > num_shared = = 0 ) {
return ;
}
/*
* Read locks can stay around forever if the process dies . Do
* a heuristic check for process existence : Check one random
* process for existence . Hopefully this will keep runaway
* read locks under control .
*/
i = generate_random ( ) % lck - > num_shared ;
g_lock_get_shared ( lck , i , & check ) ;
exists = serverid_exists ( & check ) ;
if ( ! exists ) {
struct server_id_buf tmp ;
DBG_DEBUG ( " Shared locker %s died -- removing \n " ,
server_id_str_buf ( check , & tmp ) ) ;
g_lock_del_shared ( lck , i ) ;
}
}
2019-11-19 17:29:18 +01:00
struct g_lock_lock_state {
struct tevent_context * ev ;
struct g_lock_ctx * ctx ;
TDB_DATA key ;
enum g_lock_type type ;
bool retry ;
} ;
struct g_lock_lock_fn_state {
struct g_lock_lock_state * req_state ;
struct server_id * dead_blocker ;
struct tevent_req * watch_req ;
NTSTATUS status ;
} ;
static int g_lock_lock_state_destructor ( struct g_lock_lock_state * s ) ;
2019-10-23 11:21:16 +02:00
static NTSTATUS g_lock_trylock (
struct db_record * rec ,
2019-11-19 17:29:18 +01:00
struct g_lock_lock_fn_state * state ,
2019-10-23 11:21:16 +02:00
TDB_DATA data ,
struct server_id * blocker )
2010-02-15 16:57:16 +01:00
{
2019-11-19 17:29:18 +01:00
struct g_lock_lock_state * req_state = state - > req_state ;
struct server_id self = messaging_server_id ( req_state - > ctx - > msg ) ;
enum g_lock_type type = req_state - > type ;
bool retry = req_state - > retry ;
struct g_lock lck = { . exclusive . pid = 0 } ;
struct server_id_buf tmp ;
2012-02-15 16:38:43 +01:00
NTSTATUS status ;
2017-06-28 19:39:33 +02:00
bool ok ;
2010-02-15 16:57:16 +01:00
2017-06-28 19:39:33 +02:00
ok = g_lock_parse ( data . dptr , data . dsize , & lck ) ;
if ( ! ok ) {
2019-11-19 17:29:18 +01:00
DBG_DEBUG ( " g_lock_parse failed \n " ) ;
2017-06-28 19:39:33 +02:00
return NT_STATUS_INTERNAL_DB_CORRUPTION ;
2012-02-15 16:38:43 +01:00
}
2010-02-15 16:57:16 +01:00
2019-11-19 17:29:18 +01:00
status = g_lock_cleanup_dead ( rec , & lck , state - > dead_blocker ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " g_lock_cleanup_dead() failed: %s \n " ,
nt_errstr ( status ) ) ;
return status ;
}
if ( lck . exclusive . pid ! = 0 ) {
bool self_exclusive = server_id_equal ( & self , & lck . exclusive ) ;
2017-06-28 19:39:33 +02:00
2019-11-19 17:29:18 +01:00
if ( ! self_exclusive ) {
2019-11-22 11:56:55 +01:00
bool exists = serverid_exists ( & lck . exclusive ) ;
2019-11-19 17:29:18 +01:00
if ( ! exists ) {
lck . exclusive = ( struct server_id ) { . pid = 0 } ;
goto noexclusive ;
}
2017-06-28 19:39:33 +02:00
2019-11-19 17:29:18 +01:00
DBG_DEBUG ( " %s has an exclusive lock \n " ,
server_id_str_buf ( lck . exclusive , & tmp ) ) ;
2017-05-22 17:05:57 +02:00
2019-11-19 17:29:18 +01:00
if ( type = = G_LOCK_DOWNGRADE ) {
struct server_id_buf tmp2 ;
DBG_DEBUG ( " %s: Trying to downgrade %s \n " ,
server_id_str_buf ( self , & tmp ) ,
server_id_str_buf (
lck . exclusive , & tmp2 ) ) ;
return NT_STATUS_NOT_LOCKED ;
}
if ( type = = G_LOCK_UPGRADE ) {
ssize_t shared_idx ;
shared_idx = g_lock_find_shared ( & lck , & self ) ;
if ( shared_idx = = - 1 ) {
DBG_DEBUG ( " Trying to upgrade %s "
" without "
" existing shared lock \n " ,
server_id_str_buf (
self , & tmp ) ) ;
return NT_STATUS_NOT_LOCKED ;
}
/*
* We ' re trying to upgrade , and the
* exlusive lock is taken by someone
* else . This means that someone else
* is waiting for us to give up our
* shared lock . If we now also wait
* for someone to give their shared
* lock , we will deadlock .
*/
DBG_DEBUG ( " Trying to upgrade %s while "
" someone else is also "
" trying to upgrade \n " ,
server_id_str_buf ( self , & tmp ) ) ;
return NT_STATUS_POSSIBLE_DEADLOCK ;
}
2021-02-04 18:46:59 +01:00
DBG_DEBUG ( " Waiting for lck.exclusive=%s \n " ,
server_id_str_buf ( lck . exclusive , & tmp ) ) ;
2019-11-19 17:29:18 +01:00
* blocker = lck . exclusive ;
return NT_STATUS_LOCK_NOT_GRANTED ;
2017-05-22 17:05:57 +02:00
}
2019-11-19 17:29:18 +01:00
if ( type = = G_LOCK_DOWNGRADE ) {
DBG_DEBUG ( " Downgrading %s from WRITE to READ \n " ,
server_id_str_buf ( self , & tmp ) ) ;
2017-12-20 08:25:19 +01:00
2019-11-19 17:29:18 +01:00
lck . exclusive = ( struct server_id ) { . pid = 0 } ;
goto do_shared ;
}
2017-06-28 19:39:33 +02:00
2019-11-19 17:29:18 +01:00
if ( ! retry ) {
DBG_DEBUG ( " %s already locked by self \n " ,
server_id_str_buf ( self , & tmp ) ) ;
return NT_STATUS_WAS_LOCKED ;
}
2017-05-19 16:57:00 +02:00
2019-11-19 17:29:18 +01:00
if ( lck . num_shared ! = 0 ) {
g_lock_get_shared ( & lck , 0 , blocker ) ;
2018-08-13 15:07:06 +02:00
2019-11-19 17:29:18 +01:00
DBG_DEBUG ( " Continue waiting for shared lock %s \n " ,
server_id_str_buf ( * blocker , & tmp ) ) ;
return NT_STATUS_LOCK_NOT_GRANTED ;
2009-10-25 16:12:12 +01:00
}
2019-11-19 17:29:18 +01:00
2019-12-20 16:20:00 +01:00
talloc_set_destructor ( req_state , NULL ) ;
2019-11-19 17:29:18 +01:00
/*
* Retry after a conflicting lock was released
*/
return NT_STATUS_OK ;
2018-08-13 15:07:06 +02:00
}
2019-11-19 17:29:18 +01:00
noexclusive :
2018-08-13 15:07:06 +02:00
2019-11-19 17:29:18 +01:00
if ( type = = G_LOCK_UPGRADE ) {
ssize_t shared_idx = g_lock_find_shared ( & lck , & self ) ;
2018-08-13 15:07:06 +02:00
2019-11-19 17:29:18 +01:00
if ( shared_idx = = - 1 ) {
DBG_DEBUG ( " Trying to upgrade %s without "
" existing shared lock \n " ,
server_id_str_buf ( self , & tmp ) ) ;
return NT_STATUS_NOT_LOCKED ;
}
2018-08-13 15:07:06 +02:00
2019-11-19 17:29:18 +01:00
g_lock_del_shared ( & lck , shared_idx ) ;
type = G_LOCK_WRITE ;
}
2017-05-19 16:57:00 +02:00
2019-11-19 17:29:18 +01:00
if ( type = = G_LOCK_WRITE ) {
ssize_t shared_idx = g_lock_find_shared ( & lck , & self ) ;
2009-10-25 16:12:12 +01:00
2019-11-19 17:29:18 +01:00
if ( shared_idx ! = - 1 ) {
DBG_DEBUG ( " Trying to writelock existing shared %s \n " ,
server_id_str_buf ( self , & tmp ) ) ;
return NT_STATUS_WAS_LOCKED ;
}
2012-08-22 12:35:29 +02:00
2019-11-19 17:29:18 +01:00
lck . exclusive = self ;
2012-02-15 16:38:43 +01:00
2020-04-29 15:28:03 +02:00
status = g_lock_store ( rec , & lck , NULL , NULL , 0 ) ;
2019-11-19 17:29:18 +01:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " g_lock_store() failed: %s \n " ,
nt_errstr ( status ) ) ;
return status ;
2009-10-25 16:12:12 +01:00
}
2019-11-19 17:29:18 +01:00
if ( lck . num_shared ! = 0 ) {
talloc_set_destructor (
req_state , g_lock_lock_state_destructor ) ;
2012-02-15 16:38:43 +01:00
2019-11-19 17:29:18 +01:00
g_lock_get_shared ( & lck , 0 , blocker ) ;
2017-12-20 08:41:09 +01:00
2019-11-19 17:29:18 +01:00
DBG_DEBUG ( " Waiting for %zu shared locks, "
" picking blocker %s \n " ,
lck . num_shared ,
server_id_str_buf ( * blocker , & tmp ) ) ;
2017-12-20 08:41:09 +01:00
2019-11-19 17:29:18 +01:00
return NT_STATUS_LOCK_NOT_GRANTED ;
}
talloc_set_destructor ( req_state , NULL ) ;
return NT_STATUS_OK ;
}
2018-08-13 15:07:06 +02:00
2019-11-19 17:29:18 +01:00
do_shared :
2018-08-13 15:07:06 +02:00
2019-11-19 17:29:18 +01:00
if ( lck . num_shared = = 0 ) {
2020-04-29 15:28:03 +02:00
status = g_lock_store ( rec , & lck , & self , NULL , 0 ) ;
2019-11-19 17:29:18 +01:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " g_lock_store() failed: %s \n " ,
nt_errstr ( status ) ) ;
2012-02-15 16:38:43 +01:00
}
2019-11-19 17:29:18 +01:00
return status ;
2012-02-15 16:38:43 +01:00
}
2009-10-25 16:12:12 +01:00
2019-11-22 11:55:52 +01:00
g_lock_cleanup_shared ( & lck ) ;
2019-11-19 17:29:18 +01:00
2020-04-29 15:28:03 +02:00
status = g_lock_store ( rec , & lck , & self , NULL , 0 ) ;
2019-11-19 17:29:18 +01:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " g_lock_store() failed: %s \n " ,
nt_errstr ( status ) ) ;
return status ;
}
return NT_STATUS_OK ;
}
2017-06-28 20:01:34 +02:00
2019-10-23 11:34:47 +02:00
static void g_lock_lock_fn (
struct db_record * rec ,
TDB_DATA value ,
void * private_data )
2017-06-28 20:01:34 +02:00
{
struct g_lock_lock_fn_state * state = private_data ;
2019-07-10 11:22:43 +00:00
struct server_id blocker = { 0 } ;
2017-06-28 20:01:34 +02:00
2019-11-19 17:29:18 +01:00
state - > status = g_lock_trylock ( rec , state , value , & blocker ) ;
2021-02-04 18:46:59 +01:00
if ( ! NT_STATUS_IS_OK ( state - > status ) ) {
DBG_DEBUG ( " g_lock_trylock returned %s \n " ,
nt_errstr ( state - > status ) ) ;
}
2017-06-28 20:01:34 +02:00
if ( ! NT_STATUS_EQUAL ( state - > status , NT_STATUS_LOCK_NOT_GRANTED ) ) {
return ;
}
state - > watch_req = dbwrap_watched_watch_send (
2019-11-19 17:29:18 +01:00
state - > req_state , state - > req_state - > ev , rec , blocker ) ;
if ( state - > watch_req = = NULL ) {
state - > status = NT_STATUS_NO_MEMORY ;
}
}
static int g_lock_lock_state_destructor ( struct g_lock_lock_state * s )
{
NTSTATUS status = g_lock_unlock ( s - > ctx , s - > key ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " g_lock_unlock failed: %s \n " , nt_errstr ( status ) ) ;
}
return 0 ;
2017-06-28 20:01:34 +02:00
}
2019-11-19 17:29:18 +01:00
static void g_lock_lock_retry ( struct tevent_req * subreq ) ;
2012-02-15 16:38:43 +01:00
struct tevent_req * g_lock_lock_send ( TALLOC_CTX * mem_ctx ,
struct tevent_context * ev ,
struct g_lock_ctx * ctx ,
2017-12-03 20:47:02 +01:00
TDB_DATA key ,
2012-02-15 16:38:43 +01:00
enum g_lock_type type )
2009-10-25 16:12:12 +01:00
{
2017-06-28 20:01:34 +02:00
struct tevent_req * req ;
2012-02-15 16:38:43 +01:00
struct g_lock_lock_state * state ;
2017-06-28 20:01:34 +02:00
struct g_lock_lock_fn_state fn_state ;
2012-02-15 16:38:43 +01:00
NTSTATUS status ;
2019-11-19 17:29:18 +01:00
bool ok ;
2012-02-15 16:38:43 +01:00
req = tevent_req_create ( mem_ctx , & state , struct g_lock_lock_state ) ;
if ( req = = NULL ) {
return NULL ;
}
state - > ev = ev ;
state - > ctx = ctx ;
2017-12-03 20:47:02 +01:00
state - > key = key ;
2012-02-15 16:38:43 +01:00
state - > type = type ;
2009-10-25 16:12:12 +01:00
2017-06-28 20:01:34 +02:00
fn_state = ( struct g_lock_lock_fn_state ) {
2019-11-19 17:29:18 +01:00
. req_state = state ,
2017-06-28 20:01:34 +02:00
} ;
2017-12-03 20:47:02 +01:00
status = dbwrap_do_locked ( ctx - > db , key , g_lock_lock_fn , & fn_state ) ;
2017-06-28 20:01:34 +02:00
if ( tevent_req_nterror ( req , status ) ) {
DBG_DEBUG ( " dbwrap_do_locked failed: %s \n " ,
nt_errstr ( status ) ) ;
2012-02-15 16:38:43 +01:00
return tevent_req_post ( req , ev ) ;
2009-10-25 16:12:12 +01:00
}
2017-06-28 20:01:34 +02:00
if ( NT_STATUS_IS_OK ( fn_state . status ) ) {
2012-02-15 16:38:43 +01:00
tevent_req_done ( req ) ;
return tevent_req_post ( req , ev ) ;
2009-10-25 16:12:12 +01:00
}
2017-06-28 20:01:34 +02:00
if ( ! NT_STATUS_EQUAL ( fn_state . status , NT_STATUS_LOCK_NOT_GRANTED ) ) {
tevent_req_nterror ( req , fn_state . status ) ;
2012-02-15 16:38:43 +01:00
return tevent_req_post ( req , ev ) ;
2009-10-25 16:12:12 +01:00
}
2017-06-28 20:01:34 +02:00
if ( tevent_req_nomem ( fn_state . watch_req , req ) ) {
2012-02-15 16:38:43 +01:00
return tevent_req_post ( req , ev ) ;
2009-10-25 16:12:12 +01:00
}
2017-06-28 20:01:34 +02:00
2019-11-19 17:29:18 +01:00
ok = tevent_req_set_endtime (
fn_state . watch_req ,
state - > ev ,
timeval_current_ofs ( 5 + generate_random ( ) % 5 , 0 ) ) ;
if ( ! ok ) {
tevent_req_oom ( req ) ;
2012-08-10 17:00:38 +02:00
return tevent_req_post ( req , ev ) ;
}
2017-06-28 20:01:34 +02:00
tevent_req_set_callback ( fn_state . watch_req , g_lock_lock_retry , req ) ;
2019-11-19 17:29:18 +01:00
2012-02-15 16:38:43 +01:00
return req ;
2009-10-25 16:12:12 +01:00
}
2012-02-15 16:38:43 +01:00
static void g_lock_lock_retry ( struct tevent_req * subreq )
2009-10-25 16:12:12 +01:00
{
2012-02-15 16:38:43 +01:00
struct tevent_req * req = tevent_req_callback_data (
subreq , struct tevent_req ) ;
struct g_lock_lock_state * state = tevent_req_data (
req , struct g_lock_lock_state ) ;
2017-06-30 22:20:41 +02:00
struct g_lock_lock_fn_state fn_state ;
2021-03-03 19:15:31 +01:00
struct server_id blocker = { . pid = 0 } ;
bool blockerdead = false ;
2009-10-25 16:12:12 +01:00
NTSTATUS status ;
2019-11-19 17:29:18 +01:00
status = dbwrap_watched_watch_recv ( subreq , & blockerdead , & blocker ) ;
2017-06-30 22:20:41 +02:00
DBG_DEBUG ( " watch_recv returned %s \n " , nt_errstr ( status ) ) ;
2012-02-15 16:38:43 +01:00
TALLOC_FREE ( subreq ) ;
2012-08-10 17:00:38 +02:00
2017-06-30 22:20:41 +02:00
if ( ! NT_STATUS_IS_OK ( status ) & &
! NT_STATUS_EQUAL ( status , NT_STATUS_IO_TIMEOUT ) ) {
tevent_req_nterror ( req , status ) ;
return ;
2012-08-10 17:00:38 +02:00
}
2019-11-19 17:29:18 +01:00
state - > retry = true ;
2017-06-30 22:20:41 +02:00
fn_state = ( struct g_lock_lock_fn_state ) {
2019-11-19 17:29:18 +01:00
. req_state = state ,
. dead_blocker = blockerdead ? & blocker : NULL ,
2017-06-30 22:20:41 +02:00
} ;
2017-12-03 20:47:02 +01:00
status = dbwrap_do_locked ( state - > ctx - > db , state - > key ,
2017-06-30 22:20:41 +02:00
g_lock_lock_fn , & fn_state ) ;
2012-02-15 16:38:43 +01:00
if ( tevent_req_nterror ( req , status ) ) {
2017-06-30 22:20:41 +02:00
DBG_DEBUG ( " dbwrap_do_locked failed: %s \n " ,
nt_errstr ( status ) ) ;
2012-02-15 16:38:43 +01:00
return ;
2009-10-25 16:12:12 +01:00
}
2017-06-30 22:20:41 +02:00
if ( NT_STATUS_IS_OK ( fn_state . status ) ) {
2012-02-15 16:38:43 +01:00
tevent_req_done ( req ) ;
return ;
2009-10-25 16:12:12 +01:00
}
2017-06-30 22:20:41 +02:00
if ( ! NT_STATUS_EQUAL ( fn_state . status , NT_STATUS_LOCK_NOT_GRANTED ) ) {
tevent_req_nterror ( req , fn_state . status ) ;
2012-02-15 16:38:43 +01:00
return ;
2009-10-25 16:12:12 +01:00
}
2017-06-30 22:20:41 +02:00
if ( tevent_req_nomem ( fn_state . watch_req , req ) ) {
2012-02-15 16:38:43 +01:00
return ;
2009-10-25 16:12:12 +01:00
}
2017-06-30 22:20:41 +02:00
2012-08-10 17:00:38 +02:00
if ( ! tevent_req_set_endtime (
2017-06-30 22:20:41 +02:00
fn_state . watch_req , state - > ev ,
2019-10-09 21:38:42 +02:00
timeval_current_ofs ( 5 + generate_random ( ) % 5 , 0 ) ) ) {
2012-08-10 17:00:38 +02:00
return ;
}
2017-06-30 22:20:41 +02:00
tevent_req_set_callback ( fn_state . watch_req , g_lock_lock_retry , req ) ;
2009-10-25 16:12:12 +01:00
}
2012-02-15 16:38:43 +01:00
NTSTATUS g_lock_lock_recv ( struct tevent_req * req )
2009-10-25 16:12:12 +01:00
{
2019-11-21 15:20:07 +01:00
struct g_lock_lock_state * state = tevent_req_data (
req , struct g_lock_lock_state ) ;
struct g_lock_ctx * ctx = state - > ctx ;
NTSTATUS status ;
if ( tevent_req_is_nterror ( req , & status ) ) {
return status ;
}
if ( ( ctx - > lock_order ! = DBWRAP_LOCK_ORDER_NONE ) & &
( ( state - > type = = G_LOCK_READ ) | |
( state - > type = = G_LOCK_WRITE ) ) ) {
const char * name = dbwrap_name ( ctx - > db ) ;
dbwrap_lock_order_lock ( name , ctx - > lock_order ) ;
}
return NT_STATUS_OK ;
2012-02-15 16:38:43 +01:00
}
2009-10-25 16:12:12 +01:00
2019-11-22 12:02:22 +01:00
struct g_lock_lock_simple_state {
struct server_id me ;
enum g_lock_type type ;
NTSTATUS status ;
} ;
static void g_lock_lock_simple_fn (
struct db_record * rec ,
TDB_DATA value ,
void * private_data )
{
struct g_lock_lock_simple_state * state = private_data ;
2021-02-04 18:46:59 +01:00
struct server_id_buf buf ;
2019-11-22 12:02:22 +01:00
struct g_lock lck = { . exclusive . pid = 0 } ;
bool ok ;
ok = g_lock_parse ( value . dptr , value . dsize , & lck ) ;
if ( ! ok ) {
DBG_DEBUG ( " g_lock_parse failed \n " ) ;
state - > status = NT_STATUS_INTERNAL_DB_CORRUPTION ;
return ;
}
if ( lck . exclusive . pid ! = 0 ) {
2021-02-04 18:46:59 +01:00
DBG_DEBUG ( " locked by %s \n " ,
server_id_str_buf ( lck . exclusive , & buf ) ) ;
2019-11-22 12:02:22 +01:00
goto not_granted ;
}
if ( state - > type = = G_LOCK_WRITE ) {
if ( lck . num_shared ! = 0 ) {
2021-02-04 18:46:59 +01:00
DBG_DEBUG ( " num_shared=%zu \n " , lck . num_shared ) ;
2019-11-22 12:02:22 +01:00
goto not_granted ;
}
lck . exclusive = state - > me ;
2020-04-29 15:28:03 +02:00
state - > status = g_lock_store ( rec , & lck , NULL , NULL , 0 ) ;
2019-11-22 12:02:22 +01:00
return ;
}
if ( state - > type = = G_LOCK_READ ) {
g_lock_cleanup_shared ( & lck ) ;
2020-04-29 15:28:03 +02:00
state - > status = g_lock_store ( rec , & lck , & state - > me , NULL , 0 ) ;
2019-11-22 12:02:22 +01:00
return ;
}
not_granted :
state - > status = NT_STATUS_LOCK_NOT_GRANTED ;
}
2017-12-03 20:47:02 +01:00
NTSTATUS g_lock_lock ( struct g_lock_ctx * ctx , TDB_DATA key ,
2012-02-15 16:38:43 +01:00
enum g_lock_type type , struct timeval timeout )
{
2019-11-22 12:02:22 +01:00
TALLOC_CTX * frame ;
2012-02-15 16:38:43 +01:00
struct tevent_context * ev ;
struct tevent_req * req ;
struct timeval end ;
2019-11-22 12:02:22 +01:00
NTSTATUS status ;
if ( ( type = = G_LOCK_READ ) | | ( type = = G_LOCK_WRITE ) ) {
/*
* This is an abstraction violation : Normally we do
* the sync wrappers around async functions with full
* nested event contexts . However , this is used in
* very hot code paths , so avoid the event context
* creation for the good path where there ' s no lock
* contention . My benchmark gave a factor of 2
* improvement for lock / unlock .
*/
struct g_lock_lock_simple_state state = {
. me = messaging_server_id ( ctx - > msg ) ,
. type = type ,
} ;
status = dbwrap_do_locked (
ctx - > db , key , g_lock_lock_simple_fn , & state ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " dbwrap_do_locked() failed: %s \n " ,
nt_errstr ( status ) ) ;
return status ;
}
2021-02-04 18:46:59 +01:00
DBG_DEBUG ( " status=%s, state.status=%s \n " ,
nt_errstr ( status ) ,
nt_errstr ( state . status ) ) ;
2019-11-22 12:02:22 +01:00
if ( NT_STATUS_IS_OK ( state . status ) ) {
2019-11-21 15:20:07 +01:00
if ( ctx - > lock_order ! = DBWRAP_LOCK_ORDER_NONE ) {
const char * name = dbwrap_name ( ctx - > db ) ;
dbwrap_lock_order_lock ( name , ctx - > lock_order ) ;
}
2019-11-22 12:02:22 +01:00
return NT_STATUS_OK ;
}
if ( ! NT_STATUS_EQUAL (
state . status , NT_STATUS_LOCK_NOT_GRANTED ) ) {
return state . status ;
}
/*
* Fall back to the full g_lock_trylock logic ,
* g_lock_lock_simple_fn ( ) called above only covers
* the uncontended path .
*/
}
frame = talloc_stackframe ( ) ;
status = NT_STATUS_NO_MEMORY ;
2009-10-25 16:12:12 +01:00
2013-02-18 09:10:34 +01:00
ev = samba_tevent_context_init ( frame ) ;
2012-02-15 16:38:43 +01:00
if ( ev = = NULL ) {
goto fail ;
}
2017-12-03 20:47:02 +01:00
req = g_lock_lock_send ( frame , ev , ctx , key , type ) ;
2012-02-15 16:38:43 +01:00
if ( req = = NULL ) {
goto fail ;
}
end = timeval_current_ofs ( timeout . tv_sec , timeout . tv_usec ) ;
if ( ! tevent_req_set_endtime ( req , ev , end ) ) {
goto fail ;
}
if ( ! tevent_req_poll_ntstatus ( req , ev , & status ) ) {
goto fail ;
}
status = g_lock_lock_recv ( req ) ;
fail :
TALLOC_FREE ( frame ) ;
return status ;
2009-10-25 16:12:12 +01:00
}
2017-06-28 15:39:49 +02:00
struct g_lock_unlock_state {
struct server_id self ;
2009-10-25 16:12:12 +01:00
NTSTATUS status ;
2017-06-28 15:39:49 +02:00
} ;
2009-10-25 16:12:12 +01:00
2019-10-23 11:34:47 +02:00
static void g_lock_unlock_fn (
struct db_record * rec ,
TDB_DATA value ,
void * private_data )
2017-06-28 15:39:49 +02:00
{
struct g_lock_unlock_state * state = private_data ;
2021-02-04 18:46:59 +01:00
struct server_id_buf tmp1 , tmp2 ;
2017-06-28 15:39:49 +02:00
struct g_lock lck ;
size_t i ;
2019-11-19 17:29:18 +01:00
bool ok , exclusive ;
2009-10-25 16:12:12 +01:00
2017-06-28 15:39:49 +02:00
ok = g_lock_parse ( value . dptr , value . dsize , & lck ) ;
if ( ! ok ) {
2019-11-19 17:29:18 +01:00
DBG_DEBUG ( " g_lock_parse() failed \n " ) ;
state - > status = NT_STATUS_INTERNAL_DB_CORRUPTION ;
2017-06-28 15:39:49 +02:00
return ;
2009-10-25 16:12:12 +01:00
}
2019-11-19 17:29:18 +01:00
exclusive = server_id_equal ( & state - > self , & lck . exclusive ) ;
for ( i = 0 ; i < lck . num_shared ; i + + ) {
struct server_id shared ;
g_lock_get_shared ( & lck , i , & shared ) ;
if ( server_id_equal ( & state - > self , & shared ) ) {
2009-10-25 16:12:12 +01:00
break ;
}
}
2019-11-19 17:29:18 +01:00
if ( i < lck . num_shared ) {
if ( exclusive ) {
DBG_DEBUG ( " %s both exclusive and shared (%zu) \n " ,
2021-02-04 18:46:59 +01:00
server_id_str_buf ( state - > self , & tmp1 ) ,
2019-11-19 17:29:18 +01:00
i ) ;
state - > status = NT_STATUS_INTERNAL_DB_CORRUPTION ;
return ;
}
g_lock_del_shared ( & lck , i ) ;
} else {
if ( ! exclusive ) {
2021-02-04 18:46:59 +01:00
DBG_DEBUG ( " Lock not found, self=%s, lck.exclusive=%s, "
2021-10-22 17:30:46 +02:00
" num_shared=%zu \n " ,
2021-02-04 18:46:59 +01:00
server_id_str_buf ( state - > self , & tmp1 ) ,
server_id_str_buf ( lck . exclusive , & tmp2 ) ,
2019-11-19 17:29:18 +01:00
lck . num_shared ) ;
state - > status = NT_STATUS_NOT_FOUND ;
return ;
}
lck . exclusive = ( struct server_id ) { . pid = 0 } ;
}
2009-10-25 16:12:12 +01:00
2019-11-19 17:29:18 +01:00
if ( ( lck . exclusive . pid = = 0 ) & &
( lck . num_shared = = 0 ) & &
( lck . datalen = = 0 ) ) {
2017-06-28 15:39:49 +02:00
state - > status = dbwrap_record_delete ( rec ) ;
return ;
2009-10-25 16:12:12 +01:00
}
2019-11-19 17:29:18 +01:00
2020-04-29 15:28:03 +02:00
state - > status = g_lock_store ( rec , & lck , NULL , NULL , 0 ) ;
2017-06-28 15:39:49 +02:00
}
2017-12-03 20:47:02 +01:00
NTSTATUS g_lock_unlock ( struct g_lock_ctx * ctx , TDB_DATA key )
2017-06-28 15:39:49 +02:00
{
struct g_lock_unlock_state state = {
2019-11-19 17:29:18 +01:00
. self = messaging_server_id ( ctx - > msg ) ,
2017-06-28 15:39:49 +02:00
} ;
NTSTATUS status ;
2017-12-03 20:47:02 +01:00
status = dbwrap_do_locked ( ctx - > db , key , g_lock_unlock_fn , & state ) ;
2009-10-25 16:12:12 +01:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
2017-06-28 15:39:49 +02:00
DBG_WARNING ( " dbwrap_do_locked failed: %s \n " ,
nt_errstr ( status ) ) ;
return status ;
}
if ( ! NT_STATUS_IS_OK ( state . status ) ) {
DBG_WARNING ( " g_lock_unlock_fn failed: %s \n " ,
nt_errstr ( state . status ) ) ;
return state . status ;
2009-10-25 16:12:12 +01:00
}
2019-11-21 15:20:07 +01:00
if ( ctx - > lock_order ! = DBWRAP_LOCK_ORDER_NONE ) {
const char * name = dbwrap_name ( ctx - > db ) ;
dbwrap_lock_order_unlock ( name , ctx - > lock_order ) ;
}
2017-06-28 15:39:49 +02:00
return NT_STATUS_OK ;
2009-10-25 16:12:12 +01:00
}
2020-04-29 15:35:39 +02:00
struct g_lock_writev_data_state {
2017-12-03 20:47:02 +01:00
TDB_DATA key ;
2017-06-30 19:42:50 +02:00
struct server_id self ;
2020-04-29 15:35:39 +02:00
const TDB_DATA * dbufs ;
size_t num_dbufs ;
2017-05-23 12:32:24 +02:00
NTSTATUS status ;
2017-06-30 19:42:50 +02:00
} ;
2017-05-23 12:32:24 +02:00
2020-04-29 15:35:39 +02:00
static void g_lock_writev_data_fn (
2019-10-23 11:34:47 +02:00
struct db_record * rec ,
TDB_DATA value ,
void * private_data )
2017-06-30 19:42:50 +02:00
{
2020-04-29 15:35:39 +02:00
struct g_lock_writev_data_state * state = private_data ;
2017-06-30 19:42:50 +02:00
struct g_lock lck ;
2019-11-19 17:29:18 +01:00
bool exclusive ;
2017-06-30 19:42:50 +02:00
bool ok ;
2017-05-23 12:32:24 +02:00
2017-06-30 19:42:50 +02:00
ok = g_lock_parse ( value . dptr , value . dsize , & lck ) ;
if ( ! ok ) {
2017-12-03 20:47:02 +01:00
DBG_DEBUG ( " g_lock_parse for %s failed \n " ,
hex_encode_talloc ( talloc_tos ( ) ,
state - > key . dptr ,
state - > key . dsize ) ) ;
2017-06-30 19:42:50 +02:00
state - > status = NT_STATUS_INTERNAL_DB_CORRUPTION ;
return ;
2017-05-23 12:32:24 +02:00
}
2019-11-19 17:29:18 +01:00
exclusive = server_id_equal ( & state - > self , & lck . exclusive ) ;
/*
* Make sure we ' re really exclusive . We are marked as
* exclusive when we are waiting for an exclusive lock
*/
exclusive & = ( lck . num_shared = = 0 ) ;
if ( ! exclusive ) {
2021-02-04 18:46:59 +01:00
struct server_id_buf buf1 , buf2 ;
DBG_DEBUG ( " Not locked by us: self=%s, lck.exclusive=%s, "
" lck.num_shared=%zu \n " ,
server_id_str_buf ( state - > self , & buf1 ) ,
server_id_str_buf ( lck . exclusive , & buf2 ) ,
lck . num_shared ) ;
2017-06-30 19:42:50 +02:00
state - > status = NT_STATUS_NOT_LOCKED ;
return ;
2017-05-23 12:32:24 +02:00
}
s3:g_lock: avoid very expensive generate_random_buffer() in g_lock_parse()
We don't require a sequence number that is incremented,
we just need a value that's not reused.
We use the new generate_unique_u64(), which is much cheaper!
Using smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 500000
under valgrind --tool=callgrind...
This change replaces this:
13,129,925,659 PROGRAM TOTALS
4,125,752,958 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
1,257,005,866 ???:_nettle_aes_encrypt [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
571,503,429 ???:_nettle_aes_set_key [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
by this:
6,877,826,377 PROGRAM TOTALS
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
12,500,033 bin/default/../../lib/util/genrand_util.c:generate_unique_u64
...
8,996,970 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
time smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 5000000
gives:
537426 locks/sec
real 0m19,071s
user 0m15,061s
sys 0m3,999s
vs.
900956 locks/sec
real 0m11,155s
user 0m8,293s
sys 0m2,860s
Signed-off-by: Stefan Metzmacher <metze@samba.org>
Reviewed-by: Volker Lendecke <vl@samba.org>
2020-05-19 02:58:23 +02:00
lck . unique_data_epoch = generate_unique_u64 ( lck . unique_data_epoch ) ;
2020-04-29 15:35:39 +02:00
lck . data = NULL ;
lck . datalen = 0 ;
state - > status = g_lock_store (
rec , & lck , NULL , state - > dbufs , state - > num_dbufs ) ;
2017-06-30 19:42:50 +02:00
}
2017-05-23 12:32:24 +02:00
2020-04-29 15:35:39 +02:00
NTSTATUS g_lock_writev_data (
struct g_lock_ctx * ctx ,
TDB_DATA key ,
const TDB_DATA * dbufs ,
size_t num_dbufs )
2017-06-30 19:42:50 +02:00
{
2020-04-29 15:35:39 +02:00
struct g_lock_writev_data_state state = {
. key = key ,
. self = messaging_server_id ( ctx - > msg ) ,
. dbufs = dbufs ,
. num_dbufs = num_dbufs ,
2017-06-30 19:42:50 +02:00
} ;
NTSTATUS status ;
2020-04-29 15:35:39 +02:00
status = dbwrap_do_locked (
ctx - > db , key , g_lock_writev_data_fn , & state ) ;
2017-06-30 19:42:50 +02:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_WARNING ( " dbwrap_do_locked failed: %s \n " ,
nt_errstr ( status ) ) ;
return status ;
}
if ( ! NT_STATUS_IS_OK ( state . status ) ) {
2020-04-29 15:35:39 +02:00
DBG_WARNING ( " g_lock_writev_data_fn failed: %s \n " ,
2017-06-30 19:42:50 +02:00
nt_errstr ( state . status ) ) ;
return state . status ;
}
return NT_STATUS_OK ;
2017-05-23 12:32:24 +02:00
}
2020-04-29 15:35:39 +02:00
NTSTATUS g_lock_write_data ( struct g_lock_ctx * ctx , TDB_DATA key ,
const uint8_t * buf , size_t buflen )
{
TDB_DATA dbuf = {
. dptr = discard_const_p ( uint8_t , buf ) ,
. dsize = buflen ,
} ;
return g_lock_writev_data ( ctx , key , & dbuf , 1 ) ;
}
2009-10-25 16:12:12 +01:00
struct g_lock_locks_state {
2017-12-03 20:47:02 +01:00
int ( * fn ) ( TDB_DATA key , void * private_data ) ;
2009-10-25 16:12:12 +01:00
void * private_data ;
} ;
static int g_lock_locks_fn ( struct db_record * rec , void * priv )
{
2011-08-17 11:21:31 +02:00
TDB_DATA key ;
2009-10-25 16:12:12 +01:00
struct g_lock_locks_state * state = ( struct g_lock_locks_state * ) priv ;
2011-08-17 11:21:31 +02:00
key = dbwrap_record_get_key ( rec ) ;
2017-12-03 20:47:02 +01:00
return state - > fn ( key , state - > private_data ) ;
2009-10-25 16:12:12 +01:00
}
int g_lock_locks ( struct g_lock_ctx * ctx ,
2017-12-03 20:47:02 +01:00
int ( * fn ) ( TDB_DATA key , void * private_data ) ,
2009-10-25 16:12:12 +01:00
void * private_data )
{
struct g_lock_locks_state state ;
2011-08-17 11:21:31 +02:00
NTSTATUS status ;
int count ;
2009-10-25 16:12:12 +01:00
state . fn = fn ;
state . private_data = private_data ;
2011-08-17 11:21:31 +02:00
status = dbwrap_traverse_read ( ctx - > db , g_lock_locks_fn , & state , & count ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
return - 1 ;
}
2017-05-17 16:43:01 +02:00
return count ;
2009-10-25 16:12:12 +01:00
}
2017-06-30 22:09:12 +02:00
struct g_lock_dump_state {
TALLOC_CTX * mem_ctx ;
2017-12-03 20:47:02 +01:00
TDB_DATA key ;
2019-10-25 13:35:39 +02:00
void ( * fn ) ( struct server_id exclusive ,
size_t num_shared ,
struct server_id * shared ,
2017-06-30 22:09:12 +02:00
const uint8_t * data ,
size_t datalen ,
void * private_data ) ;
void * private_data ;
NTSTATUS status ;
2020-03-24 09:54:26 +01:00
enum dbwrap_req_state req_state ;
2017-06-30 22:09:12 +02:00
} ;
static void g_lock_dump_fn ( TDB_DATA key , TDB_DATA data ,
void * private_data )
{
struct g_lock_dump_state * state = private_data ;
2019-11-19 17:29:18 +01:00
struct g_lock lck = ( struct g_lock ) { . exclusive . pid = 0 } ;
2019-10-25 13:35:39 +02:00
struct server_id * shared = NULL ;
2017-06-30 22:09:12 +02:00
size_t i ;
bool ok ;
ok = g_lock_parse ( data . dptr , data . dsize , & lck ) ;
if ( ! ok ) {
DBG_DEBUG ( " g_lock_parse failed for %s \n " ,
2017-12-03 20:47:02 +01:00
hex_encode_talloc ( talloc_tos ( ) ,
state - > key . dptr ,
state - > key . dsize ) ) ;
2017-06-30 22:09:12 +02:00
state - > status = NT_STATUS_INTERNAL_DB_CORRUPTION ;
return ;
}
2019-11-19 17:29:18 +01:00
shared = talloc_array (
state - > mem_ctx , struct server_id , lck . num_shared ) ;
if ( shared = = NULL ) {
2017-06-30 22:09:12 +02:00
DBG_DEBUG ( " talloc failed \n " ) ;
state - > status = NT_STATUS_NO_MEMORY ;
2019-11-19 17:29:18 +01:00
return ;
2017-06-30 22:09:12 +02:00
}
2019-11-19 17:29:18 +01:00
for ( i = 0 ; i < lck . num_shared ; i + + ) {
g_lock_get_shared ( & lck , i , & shared [ i ] ) ;
2019-10-25 13:35:39 +02:00
}
2017-06-30 22:09:12 +02:00
2019-11-19 17:29:18 +01:00
state - > fn ( lck . exclusive ,
lck . num_shared ,
2019-10-25 13:35:39 +02:00
shared ,
lck . data ,
lck . datalen ,
state - > private_data ) ;
2017-06-30 22:09:12 +02:00
2019-11-19 17:29:18 +01:00
TALLOC_FREE ( shared ) ;
2017-06-30 22:09:12 +02:00
state - > status = NT_STATUS_OK ;
}
2017-12-03 20:47:02 +01:00
NTSTATUS g_lock_dump ( struct g_lock_ctx * ctx , TDB_DATA key ,
2019-10-25 13:35:39 +02:00
void ( * fn ) ( struct server_id exclusive ,
size_t num_shared ,
struct server_id * shared ,
2017-05-18 15:27:46 +02:00
const uint8_t * data ,
size_t datalen ,
void * private_data ) ,
2009-10-25 16:12:12 +01:00
void * private_data )
{
2017-06-30 22:09:12 +02:00
struct g_lock_dump_state state = {
2017-12-03 20:47:02 +01:00
. mem_ctx = ctx , . key = key ,
2017-06-30 22:09:12 +02:00
. fn = fn , . private_data = private_data
} ;
2011-08-17 11:21:31 +02:00
NTSTATUS status ;
2009-10-25 16:12:12 +01:00
2017-12-03 20:47:02 +01:00
status = dbwrap_parse_record ( ctx - > db , key , g_lock_dump_fn , & state ) ;
2011-08-17 11:21:31 +02:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
2017-06-30 22:09:12 +02:00
DBG_DEBUG ( " dbwrap_parse_record returned %s \n " ,
nt_errstr ( status ) ) ;
2011-08-17 11:21:31 +02:00
return status ;
2009-10-25 16:12:12 +01:00
}
2017-06-30 22:09:12 +02:00
if ( ! NT_STATUS_IS_OK ( state . status ) ) {
DBG_DEBUG ( " g_lock_dump_fn returned %s \n " ,
nt_errstr ( state . status ) ) ;
return state . status ;
2009-10-25 16:12:12 +01:00
}
return NT_STATUS_OK ;
}
2019-10-30 16:12:11 +01:00
2020-03-24 09:54:26 +01:00
static void g_lock_dump_done ( struct tevent_req * subreq ) ;
struct tevent_req * g_lock_dump_send (
TALLOC_CTX * mem_ctx ,
struct tevent_context * ev ,
struct g_lock_ctx * ctx ,
TDB_DATA key ,
void ( * fn ) ( struct server_id exclusive ,
size_t num_shared ,
struct server_id * shared ,
const uint8_t * data ,
size_t datalen ,
void * private_data ) ,
void * private_data )
{
struct tevent_req * req = NULL , * subreq = NULL ;
struct g_lock_dump_state * state = NULL ;
req = tevent_req_create ( mem_ctx , & state , struct g_lock_dump_state ) ;
if ( req = = NULL ) {
return NULL ;
}
state - > mem_ctx = state ;
state - > key = key ;
state - > fn = fn ;
state - > private_data = private_data ;
subreq = dbwrap_parse_record_send (
state ,
ev ,
ctx - > db ,
key ,
g_lock_dump_fn ,
state ,
& state - > req_state ) ;
if ( tevent_req_nomem ( subreq , req ) ) {
return tevent_req_post ( req , ev ) ;
}
tevent_req_set_callback ( subreq , g_lock_dump_done , req ) ;
return req ;
}
static void g_lock_dump_done ( struct tevent_req * subreq )
{
struct tevent_req * req = tevent_req_callback_data (
subreq , struct tevent_req ) ;
struct g_lock_dump_state * state = tevent_req_data (
req , struct g_lock_dump_state ) ;
NTSTATUS status ;
status = dbwrap_parse_record_recv ( subreq ) ;
TALLOC_FREE ( subreq ) ;
if ( tevent_req_nterror ( req , status ) | |
tevent_req_nterror ( req , state - > status ) ) {
return ;
}
tevent_req_done ( req ) ;
}
NTSTATUS g_lock_dump_recv ( struct tevent_req * req )
{
return tevent_req_simple_recv_ntstatus ( req ) ;
}
2019-11-05 16:36:59 +01:00
int g_lock_seqnum ( struct g_lock_ctx * ctx )
{
return dbwrap_get_seqnum ( ctx - > db ) ;
}
2019-10-30 16:12:11 +01:00
struct g_lock_watch_data_state {
struct tevent_context * ev ;
struct g_lock_ctx * ctx ;
TDB_DATA key ;
struct server_id blocker ;
bool blockerdead ;
s3:g_lock: avoid very expensive generate_random_buffer() in g_lock_parse()
We don't require a sequence number that is incremented,
we just need a value that's not reused.
We use the new generate_unique_u64(), which is much cheaper!
Using smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 500000
under valgrind --tool=callgrind...
This change replaces this:
13,129,925,659 PROGRAM TOTALS
4,125,752,958 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
1,257,005,866 ???:_nettle_aes_encrypt [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
571,503,429 ???:_nettle_aes_set_key [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
by this:
6,877,826,377 PROGRAM TOTALS
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
12,500,033 bin/default/../../lib/util/genrand_util.c:generate_unique_u64
...
8,996,970 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
time smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 5000000
gives:
537426 locks/sec
real 0m19,071s
user 0m15,061s
sys 0m3,999s
vs.
900956 locks/sec
real 0m11,155s
user 0m8,293s
sys 0m2,860s
Signed-off-by: Stefan Metzmacher <metze@samba.org>
Reviewed-by: Volker Lendecke <vl@samba.org>
2020-05-19 02:58:23 +02:00
uint64_t unique_data_epoch ;
2019-10-30 16:12:11 +01:00
NTSTATUS status ;
} ;
static void g_lock_watch_data_done ( struct tevent_req * subreq ) ;
static void g_lock_watch_data_send_fn (
struct db_record * rec ,
TDB_DATA value ,
void * private_data )
{
struct tevent_req * req = talloc_get_type_abort (
private_data , struct tevent_req ) ;
struct g_lock_watch_data_state * state = tevent_req_data (
req , struct g_lock_watch_data_state ) ;
struct tevent_req * subreq = NULL ;
struct g_lock lck ;
bool ok ;
ok = g_lock_parse ( value . dptr , value . dsize , & lck ) ;
if ( ! ok ) {
state - > status = NT_STATUS_INTERNAL_DB_CORRUPTION ;
return ;
}
s3:g_lock: avoid very expensive generate_random_buffer() in g_lock_parse()
We don't require a sequence number that is incremented,
we just need a value that's not reused.
We use the new generate_unique_u64(), which is much cheaper!
Using smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 500000
under valgrind --tool=callgrind...
This change replaces this:
13,129,925,659 PROGRAM TOTALS
4,125,752,958 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
1,257,005,866 ???:_nettle_aes_encrypt [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
571,503,429 ???:_nettle_aes_set_key [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
by this:
6,877,826,377 PROGRAM TOTALS
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
12,500,033 bin/default/../../lib/util/genrand_util.c:generate_unique_u64
...
8,996,970 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
time smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 5000000
gives:
537426 locks/sec
real 0m19,071s
user 0m15,061s
sys 0m3,999s
vs.
900956 locks/sec
real 0m11,155s
user 0m8,293s
sys 0m2,860s
Signed-off-by: Stefan Metzmacher <metze@samba.org>
Reviewed-by: Volker Lendecke <vl@samba.org>
2020-05-19 02:58:23 +02:00
state - > unique_data_epoch = lck . unique_data_epoch ;
2019-10-30 16:12:11 +01:00
s3:g_lock: avoid very expensive generate_random_buffer() in g_lock_parse()
We don't require a sequence number that is incremented,
we just need a value that's not reused.
We use the new generate_unique_u64(), which is much cheaper!
Using smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 500000
under valgrind --tool=callgrind...
This change replaces this:
13,129,925,659 PROGRAM TOTALS
4,125,752,958 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
1,257,005,866 ???:_nettle_aes_encrypt [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
571,503,429 ???:_nettle_aes_set_key [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
by this:
6,877,826,377 PROGRAM TOTALS
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
12,500,033 bin/default/../../lib/util/genrand_util.c:generate_unique_u64
...
8,996,970 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
time smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 5000000
gives:
537426 locks/sec
real 0m19,071s
user 0m15,061s
sys 0m3,999s
vs.
900956 locks/sec
real 0m11,155s
user 0m8,293s
sys 0m2,860s
Signed-off-by: Stefan Metzmacher <metze@samba.org>
Reviewed-by: Volker Lendecke <vl@samba.org>
2020-05-19 02:58:23 +02:00
DBG_DEBUG ( " state->unique_data_epoch=% " PRIu64 " \n " , state - > unique_data_epoch ) ;
2019-10-30 16:12:11 +01:00
subreq = dbwrap_watched_watch_send (
state , state - > ev , rec , state - > blocker ) ;
if ( subreq = = NULL ) {
state - > status = NT_STATUS_NO_MEMORY ;
return ;
}
tevent_req_set_callback ( subreq , g_lock_watch_data_done , req ) ;
state - > status = NT_STATUS_EVENT_PENDING ;
}
struct tevent_req * g_lock_watch_data_send (
TALLOC_CTX * mem_ctx ,
struct tevent_context * ev ,
struct g_lock_ctx * ctx ,
TDB_DATA key ,
struct server_id blocker )
{
struct tevent_req * req = NULL ;
struct g_lock_watch_data_state * state = NULL ;
NTSTATUS status ;
req = tevent_req_create (
mem_ctx , & state , struct g_lock_watch_data_state ) ;
if ( req = = NULL ) {
return NULL ;
}
state - > ev = ev ;
state - > ctx = ctx ;
state - > blocker = blocker ;
state - > key = tdb_data_talloc_copy ( state , key ) ;
if ( tevent_req_nomem ( state - > key . dptr , req ) ) {
return tevent_req_post ( req , ev ) ;
}
status = dbwrap_do_locked (
ctx - > db , key , g_lock_watch_data_send_fn , req ) ;
if ( tevent_req_nterror ( req , status ) ) {
DBG_DEBUG ( " dbwrap_do_locked returned %s \n " , nt_errstr ( status ) ) ;
return tevent_req_post ( req , ev ) ;
}
if ( NT_STATUS_IS_OK ( state - > status ) ) {
tevent_req_done ( req ) ;
return tevent_req_post ( req , ev ) ;
}
return req ;
}
static void g_lock_watch_data_done_fn (
struct db_record * rec ,
TDB_DATA value ,
void * private_data )
{
struct tevent_req * req = talloc_get_type_abort (
private_data , struct tevent_req ) ;
struct g_lock_watch_data_state * state = tevent_req_data (
req , struct g_lock_watch_data_state ) ;
struct tevent_req * subreq = NULL ;
struct g_lock lck ;
bool ok ;
ok = g_lock_parse ( value . dptr , value . dsize , & lck ) ;
if ( ! ok ) {
state - > status = NT_STATUS_INTERNAL_DB_CORRUPTION ;
return ;
}
s3:g_lock: avoid very expensive generate_random_buffer() in g_lock_parse()
We don't require a sequence number that is incremented,
we just need a value that's not reused.
We use the new generate_unique_u64(), which is much cheaper!
Using smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 500000
under valgrind --tool=callgrind...
This change replaces this:
13,129,925,659 PROGRAM TOTALS
4,125,752,958 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
1,257,005,866 ???:_nettle_aes_encrypt [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
571,503,429 ???:_nettle_aes_set_key [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
by this:
6,877,826,377 PROGRAM TOTALS
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
12,500,033 bin/default/../../lib/util/genrand_util.c:generate_unique_u64
...
8,996,970 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
time smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 5000000
gives:
537426 locks/sec
real 0m19,071s
user 0m15,061s
sys 0m3,999s
vs.
900956 locks/sec
real 0m11,155s
user 0m8,293s
sys 0m2,860s
Signed-off-by: Stefan Metzmacher <metze@samba.org>
Reviewed-by: Volker Lendecke <vl@samba.org>
2020-05-19 02:58:23 +02:00
if ( lck . unique_data_epoch ! = state - > unique_data_epoch ) {
DBG_DEBUG ( " lck.unique_data_epoch=% " PRIu64 " , "
" state->unique_data_epoch=% " PRIu64 " \n " ,
lck . unique_data_epoch ,
state - > unique_data_epoch ) ;
2019-10-30 16:12:11 +01:00
state - > status = NT_STATUS_OK ;
return ;
}
subreq = dbwrap_watched_watch_send (
state , state - > ev , rec , state - > blocker ) ;
if ( subreq = = NULL ) {
state - > status = NT_STATUS_NO_MEMORY ;
return ;
}
tevent_req_set_callback ( subreq , g_lock_watch_data_done , req ) ;
state - > status = NT_STATUS_EVENT_PENDING ;
}
static void g_lock_watch_data_done ( struct tevent_req * subreq )
{
struct tevent_req * req = tevent_req_callback_data (
subreq , struct tevent_req ) ;
struct g_lock_watch_data_state * state = tevent_req_data (
req , struct g_lock_watch_data_state ) ;
NTSTATUS status ;
status = dbwrap_watched_watch_recv (
subreq , & state - > blockerdead , & state - > blocker ) ;
TALLOC_FREE ( subreq ) ;
if ( tevent_req_nterror ( req , status ) ) {
DBG_DEBUG ( " dbwrap_watched_watch_recv returned %s \n " ,
nt_errstr ( status ) ) ;
return ;
}
status = dbwrap_do_locked (
state - > ctx - > db , state - > key , g_lock_watch_data_done_fn , req ) ;
if ( tevent_req_nterror ( req , status ) ) {
DBG_DEBUG ( " dbwrap_do_locked returned %s \n " , nt_errstr ( status ) ) ;
return ;
}
if ( NT_STATUS_EQUAL ( state - > status , NT_STATUS_EVENT_PENDING ) ) {
return ;
}
if ( tevent_req_nterror ( req , state - > status ) ) {
return ;
}
tevent_req_done ( req ) ;
}
NTSTATUS g_lock_watch_data_recv (
struct tevent_req * req ,
bool * blockerdead ,
struct server_id * blocker )
{
struct g_lock_watch_data_state * state = tevent_req_data (
req , struct g_lock_watch_data_state ) ;
NTSTATUS status ;
if ( tevent_req_is_nterror ( req , & status ) ) {
return status ;
}
if ( blockerdead ! = NULL ) {
* blockerdead = state - > blockerdead ;
}
if ( blocker ! = NULL ) {
* blocker = state - > blocker ;
}
return NT_STATUS_OK ;
}
2019-11-04 16:03:52 +01:00
static void g_lock_wake_watchers_fn (
struct db_record * rec ,
TDB_DATA value ,
void * private_data )
{
struct g_lock lck = { . exclusive . pid = 0 } ;
NTSTATUS status ;
bool ok ;
ok = g_lock_parse ( value . dptr , value . dsize , & lck ) ;
if ( ! ok ) {
DBG_WARNING ( " g_lock_parse failed \n " ) ;
return ;
}
s3:g_lock: avoid very expensive generate_random_buffer() in g_lock_parse()
We don't require a sequence number that is incremented,
we just need a value that's not reused.
We use the new generate_unique_u64(), which is much cheaper!
Using smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 500000
under valgrind --tool=callgrind...
This change replaces this:
13,129,925,659 PROGRAM TOTALS
4,125,752,958 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
1,257,005,866 ???:_nettle_aes_encrypt [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
571,503,429 ???:_nettle_aes_set_key [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
by this:
6,877,826,377 PROGRAM TOTALS
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
12,500,033 bin/default/../../lib/util/genrand_util.c:generate_unique_u64
...
8,996,970 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
time smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 5000000
gives:
537426 locks/sec
real 0m19,071s
user 0m15,061s
sys 0m3,999s
vs.
900956 locks/sec
real 0m11,155s
user 0m8,293s
sys 0m2,860s
Signed-off-by: Stefan Metzmacher <metze@samba.org>
Reviewed-by: Volker Lendecke <vl@samba.org>
2020-05-19 02:58:23 +02:00
lck . unique_data_epoch = generate_unique_u64 ( lck . unique_data_epoch ) ;
2019-11-04 16:03:52 +01:00
2020-04-29 15:28:03 +02:00
status = g_lock_store ( rec , & lck , NULL , NULL , 0 ) ;
2019-11-04 16:03:52 +01:00
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_WARNING ( " g_lock_store failed: %s \n " , nt_errstr ( status ) ) ;
return ;
}
}
void g_lock_wake_watchers ( struct g_lock_ctx * ctx , TDB_DATA key )
{
NTSTATUS status ;
status = dbwrap_do_locked ( ctx - > db , key , g_lock_wake_watchers_fn , NULL ) ;
if ( ! NT_STATUS_IS_OK ( status ) ) {
DBG_DEBUG ( " dbwrap_do_locked returned %s \n " ,
nt_errstr ( status ) ) ;
}
}