1
0
mirror of https://github.com/samba-team/samba.git synced 2025-03-27 22:50:26 +03:00

LACOUNT: Add back lacount mechanism to defer migrating a fetched/read copy until after default of 20 consecutive requests from the same node

This can improve performance slightly on certain workloads where smbds frequently read from the same record

(This used to be ctdb commit 035c0d981bde8c0eee8b3f24ba8e2dc817e5b504)
This commit is contained in:
Ronnie Sahlberg 2012-03-20 12:20:55 +11:00
parent 6a493a0b08
commit e7e51ddb64
6 changed files with 30 additions and 14 deletions

View File

@ -71,7 +71,7 @@ struct ctdb_req_header *_ctdbd_allocate_pkt(struct ctdb_context *ctdb,
*/
int ctdb_call_local(struct ctdb_db_context *ctdb_db, struct ctdb_call *call,
struct ctdb_ltdb_header *header, TALLOC_CTX *mem_ctx,
TDB_DATA *data, bool updatetdb)
TDB_DATA *data, bool updatetdb, uint32_t caller)
{
struct ctdb_call_info *c;
struct ctdb_registered_call *fn;
@ -106,7 +106,15 @@ int ctdb_call_local(struct ctdb_db_context *ctdb_db, struct ctdb_call *call,
}
/* we need to force the record to be written out if this was a remote access */
if (c->new_data == NULL) {
if (header->laccessor != caller) {
header->lacount = 0;
}
header->laccessor = caller;
header->lacount++;
/* we need to force the record to be written out if this was a remote access,
so that the lacount is updated */
if (c->new_data == NULL && header->laccessor != ctdb->pnn) {
c->new_data = &c->record_data;
}
@ -386,7 +394,7 @@ static struct ctdb_client_call_state *ctdb_client_call_local_send(struct ctdb_db
*(state->call) = *call;
state->ctdb_db = ctdb_db;
ret = ctdb_call_local(ctdb_db, state->call, header, state, data, true);
ret = ctdb_call_local(ctdb_db, state->call, header, state, data, true, ctdb->pnn);
if (ret != 0) {
DEBUG(DEBUG_DEBUG,("ctdb_call_local() failed, ignoring return code %d\n", ret));
}

View File

@ -67,6 +67,7 @@ static void ltdb_initial_header(struct ctdb_db_context *ctdb_db,
/* initial dmaster is the lmaster */
header->dmaster = ctdb_lmaster(ctdb_db->ctdb, &key);
header->flags = CTDB_REC_FLAG_AUTOMATIC;
header->laccessor = header->dmaster;
}

View File

@ -126,6 +126,7 @@ struct ctdb_tunable {
uint32_t recover_pdb_by_seqnum;
uint32_t deferred_rebalance_on_node_add;
uint32_t fetch_collapse;
uint32_t max_lacount;
};
/*
@ -798,7 +799,7 @@ struct ctdb_call_state *ctdb_daemon_call_send_remote(struct ctdb_db_context *ctd
int ctdb_call_local(struct ctdb_db_context *ctdb_db, struct ctdb_call *call,
struct ctdb_ltdb_header *header, TALLOC_CTX *mem_ctx,
TDB_DATA *data, bool updatetdb);
TDB_DATA *data, bool updatetdb, uint32_t caller);
#define ctdb_reqid_find(ctdb, reqid, type) (type *)_ctdb_reqid_find(ctdb, reqid, #type, __location__)

View File

@ -501,7 +501,8 @@ enum ctdb_trans2_commit_error {
struct ctdb_ltdb_header {
uint64_t rsn;
uint32_t dmaster;
uint32_t reserved1;
uint16_t laccessor;
uint16_t lacount;
#define CTDB_REC_FLAG_DEFAULT 0x00000000
#define CTDB_REC_FLAG_MIGRATED_WITH_DATA 0x00010000
#define CTDB_REC_FLAG_VACUUM_MIGRATED 0x00020000

View File

@ -338,7 +338,7 @@ static void ctdb_become_dmaster(struct ctdb_db_context *ctdb_db,
return;
}
ctdb_call_local(ctdb_db, state->call, &header, state, &data, true);
ctdb_call_local(ctdb_db, state->call, &header, state, &data, true, ctdb->pnn);
ret = ctdb_ltdb_unlock(ctdb_db, state->call->key);
if (ret != 0) {
@ -643,11 +643,15 @@ void ctdb_request_call(struct ctdb_context *ctdb, struct ctdb_req_header *hdr)
CTDB_INCREMENT_DB_STAT(ctdb_db, hop_count_bucket[bucket]);
/* Try if possible to migrate the record off to the caller node.
* From the clients perspective a fetch of the data is just as
* expensive as a migration.
*/
if (c->hdr.srcnode != ctdb->pnn) {
/* if this nodes has done enough consecutive calls on the same record
then give them the record
or if the node requested an immediate migration
*/
if ( c->hdr.srcnode != ctdb->pnn &&
((header.laccessor == c->hdr.srcnode
&& header.lacount >= ctdb->tunable.max_lacount
&& ctdb->tunable.max_lacount != 0)
|| (c->flags & CTDB_IMMEDIATE_MIGRATION)) ) {
if (ctdb_db->transaction_active) {
DEBUG(DEBUG_INFO, (__location__ " refusing migration"
" of key %s while transaction is active\n",
@ -666,7 +670,7 @@ void ctdb_request_call(struct ctdb_context *ctdb, struct ctdb_req_header *hdr)
}
}
ret = ctdb_call_local(ctdb_db, call, &header, hdr, &data, true);
ret = ctdb_call_local(ctdb_db, call, &header, hdr, &data, true, c->hdr.srcnode);
if (ret != 0) {
DEBUG(DEBUG_ERR,(__location__ " ctdb_call_local failed\n"));
call->status = -1;
@ -953,7 +957,7 @@ struct ctdb_call_state *ctdb_call_local_send(struct ctdb_db_context *ctdb_db,
*(state->call) = *call;
state->ctdb_db = ctdb_db;
ret = ctdb_call_local(ctdb_db, state->call, header, state, data, true);
ret = ctdb_call_local(ctdb_db, state->call, header, state, data, true, ctdb->pnn);
if (ret != 0) {
DEBUG(DEBUG_DEBUG,("ctdb_call_local() failed, ignoring return code %d\n", ret));
}

View File

@ -74,7 +74,8 @@ static const struct {
{ "AllowClientDBAttach", 1, offsetof(struct ctdb_tunable, allow_client_db_attach), false },
{ "RecoverPDBBySeqNum", 0, offsetof(struct ctdb_tunable, recover_pdb_by_seqnum), false },
{ "DeferredRebalanceOnNodeAdd", 300, offsetof(struct ctdb_tunable, deferred_rebalance_on_node_add) },
{ "FetchCollapse", 1, offsetof(struct ctdb_tunable, fetch_collapse) }
{ "FetchCollapse", 1, offsetof(struct ctdb_tunable, fetch_collapse) },
{ "MaxLACount", 20, offsetof(struct ctdb_tunable, max_lacount) }
};
/*