1
0
mirror of https://github.com/samba-team/samba.git synced 2025-01-10 01:18:15 +03:00

Revert "LACOUNT: Add back lacount mechanism to defer migrating a fetched/read copy until after default of 20 consecutive requests from the same node"

This reverts commit 035c0d981bde8c0eee8b3f24ba8e2dc817e5b504.

This is a premature optimization.  Record can bounce between nodes
very quickly if it is a contended record.  There is no need to hold a
record on a node unnecessarily.  In case record contention becomes bad,
enabling sticky records on a database is a better idea.

Conflicts:
	include/ctdb_private.h
	server/ctdb_tunables.c

Signed-off-by: Amitay Isaacs <amitay@gmail.com>

(This used to be ctdb commit ac417b0003f0116f116834ad2ac51482d25cfa0d)
This commit is contained in:
Amitay Isaacs 2013-08-19 15:04:46 +10:00
parent 59dae19f5a
commit 1467b666f2
6 changed files with 13 additions and 29 deletions

View File

@ -70,7 +70,7 @@ struct ctdb_req_header *_ctdbd_allocate_pkt(struct ctdb_context *ctdb,
*/
int ctdb_call_local(struct ctdb_db_context *ctdb_db, struct ctdb_call *call,
struct ctdb_ltdb_header *header, TALLOC_CTX *mem_ctx,
TDB_DATA *data, bool updatetdb, uint32_t caller)
TDB_DATA *data, bool updatetdb)
{
struct ctdb_call_info *c;
struct ctdb_registered_call *fn;
@ -105,15 +105,7 @@ int ctdb_call_local(struct ctdb_db_context *ctdb_db, struct ctdb_call *call,
}
/* we need to force the record to be written out if this was a remote access */
if (header->laccessor != caller) {
header->lacount = 0;
}
header->laccessor = caller;
header->lacount++;
/* we need to force the record to be written out if this was a remote access,
so that the lacount is updated */
if (c->new_data == NULL && header->laccessor != ctdb->pnn) {
if (c->new_data == NULL) {
c->new_data = &c->record_data;
}
@ -368,7 +360,7 @@ static struct ctdb_client_call_state *ctdb_client_call_local_send(struct ctdb_db
*(state->call) = *call;
state->ctdb_db = ctdb_db;
ret = ctdb_call_local(ctdb_db, state->call, header, state, data, true, ctdb->pnn);
ret = ctdb_call_local(ctdb_db, state->call, header, state, data, true);
if (ret != 0) {
DEBUG(DEBUG_DEBUG,("ctdb_call_local() failed, ignoring return code %d\n", ret));
}

View File

@ -66,7 +66,6 @@ static void ltdb_initial_header(struct ctdb_db_context *ctdb_db,
/* initial dmaster is the lmaster */
header->dmaster = ctdb_lmaster(ctdb_db->ctdb, &key);
header->flags = CTDB_REC_FLAG_AUTOMATIC;
header->laccessor = header->dmaster;
}

View File

@ -126,7 +126,6 @@ struct ctdb_tunable {
uint32_t recover_pdb_by_seqnum;
uint32_t deferred_rebalance_on_node_add;
uint32_t fetch_collapse;
uint32_t max_lacount;
uint32_t hopcount_make_sticky;
uint32_t sticky_duration;
uint32_t sticky_pindown;
@ -872,7 +871,7 @@ struct ctdb_call_state *ctdb_daemon_call_send_remote(struct ctdb_db_context *ctd
int ctdb_call_local(struct ctdb_db_context *ctdb_db, struct ctdb_call *call,
struct ctdb_ltdb_header *header, TALLOC_CTX *mem_ctx,
TDB_DATA *data, bool updatetdb, uint32_t caller);
TDB_DATA *data, bool updatetdb);
#define ctdb_reqid_find(ctdb, reqid, type) (type *)_ctdb_reqid_find(ctdb, reqid, #type, __location__)

View File

@ -524,8 +524,7 @@ enum ctdb_trans2_commit_error {
struct ctdb_ltdb_header {
uint64_t rsn;
uint32_t dmaster;
uint16_t laccessor;
uint16_t lacount;
uint32_t reserved1;
#define CTDB_REC_FLAG_DEFAULT 0x00000000
#define CTDB_REC_FLAG_MIGRATED_WITH_DATA 0x00010000
#define CTDB_REC_FLAG_VACUUM_MIGRATED 0x00020000

View File

@ -399,7 +399,7 @@ static void ctdb_become_dmaster(struct ctdb_db_context *ctdb_db,
return;
}
ctdb_call_local(ctdb_db, state->call, &header, state, &data, true, ctdb->pnn);
ctdb_call_local(ctdb_db, state->call, &header, state, &data, true);
ret = ctdb_ltdb_unlock(ctdb_db, state->call->key);
if (ret != 0) {
@ -930,15 +930,11 @@ void ctdb_request_call(struct ctdb_context *ctdb, struct ctdb_req_header *hdr)
}
/* if this nodes has done enough consecutive calls on the same record
then give them the record
or if the node requested an immediate migration
*/
if ( c->hdr.srcnode != ctdb->pnn &&
((header.laccessor == c->hdr.srcnode
&& header.lacount >= ctdb->tunable.max_lacount
&& ctdb->tunable.max_lacount != 0)
|| (c->flags & CTDB_IMMEDIATE_MIGRATION)) ) {
/* Try if possible to migrate the record off to the caller node.
* From the clients perspective a fetch of the data is just as
* expensive as a migration.
*/
if (c->hdr.srcnode != ctdb->pnn) {
if (ctdb_db->transaction_active) {
DEBUG(DEBUG_INFO, (__location__ " refusing migration"
" of key %s while transaction is active\n",
@ -957,7 +953,7 @@ void ctdb_request_call(struct ctdb_context *ctdb, struct ctdb_req_header *hdr)
}
}
ret = ctdb_call_local(ctdb_db, call, &header, hdr, &data, true, c->hdr.srcnode);
ret = ctdb_call_local(ctdb_db, call, &header, hdr, &data, true);
if (ret != 0) {
DEBUG(DEBUG_ERR,(__location__ " ctdb_call_local failed\n"));
call->status = -1;
@ -1244,7 +1240,7 @@ struct ctdb_call_state *ctdb_call_local_send(struct ctdb_db_context *ctdb_db,
*(state->call) = *call;
state->ctdb_db = ctdb_db;
ret = ctdb_call_local(ctdb_db, state->call, header, state, data, true, ctdb->pnn);
ret = ctdb_call_local(ctdb_db, state->call, header, state, data, true);
if (ret != 0) {
DEBUG(DEBUG_DEBUG,("ctdb_call_local() failed, ignoring return code %d\n", ret));
}

View File

@ -75,7 +75,6 @@ static const struct {
{ "RecoverPDBBySeqNum", 0, offsetof(struct ctdb_tunable, recover_pdb_by_seqnum), false },
{ "DeferredRebalanceOnNodeAdd", 300, offsetof(struct ctdb_tunable, deferred_rebalance_on_node_add) },
{ "FetchCollapse", 1, offsetof(struct ctdb_tunable, fetch_collapse) },
{ "MaxLACount", 20, offsetof(struct ctdb_tunable, max_lacount) },
{ "HopcountMakeSticky", 50, offsetof(struct ctdb_tunable, hopcount_make_sticky) },
{ "StickyDuration", 600, offsetof(struct ctdb_tunable, sticky_duration) },
{ "StickyPindown", 200, offsetof(struct ctdb_tunable, sticky_pindown) },