1
0
mirror of https://github.com/samba-team/samba.git synced 2025-01-10 01:18:15 +03:00

vacuum: Avoid some tallocs in ctdb recovery

In a heavily loaded and volatile database a lot of SCHEDULE_FOR_DELETION
requests can come in between fast vacuuming runs. This can lead to
significant ctdb cpu load due to the cost of doing talloc_free. This
reduces the number of objects a bit by coalescing the two objects
of delete_record_data into one. It will also avoid having to allocate
another talloc header for a SCHEDULE_FOR_DELETION key. Not the full fix
for this problem, but it might contribute a bit.

(This used to be ctdb commit 9a02f61547ddf74629aca21639d8fb61c1df7cbb)
This commit is contained in:
Volker Lendecke 2012-11-22 15:27:51 +01:00 committed by Amitay Isaacs
parent b2e96641eb
commit 83862c5c8d

View File

@ -91,6 +91,7 @@ struct delete_record_data {
struct ctdb_db_context *ctdb_db;
struct ctdb_ltdb_header hdr;
TDB_DATA key;
uint8_t keydata[1];
};
struct delete_records_list {
@ -108,21 +109,22 @@ static int insert_delete_record_data_into_tree(struct ctdb_context *ctdb,
{
struct delete_record_data *dd;
uint32_t hash;
size_t len;
dd = talloc_zero(tree, struct delete_record_data);
len = offsetof(struct delete_record_data, keydata) + key.dsize;
dd = (struct delete_record_data *)talloc_size(tree, len);
if (dd == NULL) {
DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
return -1;
}
talloc_set_name_const(dd, "struct delete_record_data");
dd->ctdb = ctdb;
dd->ctdb_db = ctdb_db;
dd->key.dsize = key.dsize;
dd->key.dptr = talloc_memdup(dd, key.dptr, key.dsize);
if (dd->key.dptr == NULL) {
DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
return -1;
}
dd->key.dptr = dd->keydata;
memcpy(dd->keydata, key.dptr, key.dsize);
dd->hdr = *hdr;