BUG/MEDIUM: pattern: make the pattern LRU cache thread-local and lockless

As reported in issue #335, a lot of contention happens on the PATLRU lock
when performing expensive regex lookups. This is absurd since the purpose
of the LRU cache was to have a fast cache for expressions, thus the cache
must not be shared between threads and must remain lockless.

This commit makes the LRU cache thread-local and gets rid of the PATLRU
lock. A test with 7 threads on 4 cores climbed from 67kH/s to 369kH/s,
or a scalability factor of 5.5.

Given the huge performance difference and the regression caused to
users migrating from processes to threads, this should be backported at
least to 2.0.

Thanks to Brian Diekelman for his detailed report about this regression.
This commit is contained in:
Willy Tarreau 2019-10-23 06:59:31 +02:00
parent 28c63c15f5
commit 403bfbb130
3 changed files with 31 additions and 62 deletions

View File

@ -1845,7 +1845,8 @@ tune.pattern.cache-size <number>
configuration line (including all those loaded from files). It automatically
invalidates entries which are updated using HTTP actions or on the CLI. The
default cache size is set to 10000 entries, which limits its footprint to
about 5 MB on 32-bit systems and 8 MB on 64-bit systems. There is a very low
about 5 MB per process/thread on 32-bit systems and 8 MB per process/thread
on 64-bit systems, as caches are thread/process local. There is a very low
risk of collision in this cache, which is in the order of the size of the
cache divided by 2^64. Typically, at 10000 requests per second with the
default cache size of 10000 entries, there's 1% chance that a brute force

View File

@ -548,7 +548,6 @@ enum lock_label {
SSL_GEN_CERTS_LOCK,
PATREF_LOCK,
PATEXP_LOCK,
PATLRU_LOCK,
VARS_LOCK,
COMP_POOL_LOCK,
LUA_LOCK,
@ -668,7 +667,6 @@ static inline const char *lock_label(enum lock_label label)
case SSL_GEN_CERTS_LOCK: return "SSL_GEN_CERTS";
case PATREF_LOCK: return "PATREF";
case PATEXP_LOCK: return "PATEXP";
case PATLRU_LOCK: return "PATLRU";
case VARS_LOCK: return "VARS";
case COMP_POOL_LOCK: return "COMP_POOL";
case LUA_LOCK: return "LUA";

View File

@ -154,8 +154,7 @@ static THREAD_LOCAL struct sample_data static_sample_data;
/* This is the root of the list of all pattern_ref avalaibles. */
struct list pattern_reference = LIST_HEAD_INIT(pattern_reference);
static struct lru64_head *pat_lru_tree;
__decl_spinlock(pat_lru_tree_lock);
static THREAD_LOCAL struct lru64_head *pat_lru_tree;
static unsigned long long pat_lru_seed;
/*
@ -487,15 +486,10 @@ struct pattern *pat_match_str(struct sample *smp, struct pattern_expr *expr, int
if (pat_lru_tree) {
unsigned long long seed = pat_lru_seed ^ (long)expr;
HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
lru = lru64_get(XXH64(smp->data.u.str.area, smp->data.u.str.data, seed),
pat_lru_tree, expr, expr->revision);
if (!lru) {
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
}
else if (lru->domain) {
if (lru && lru->domain) {
ret = lru->data;
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
return ret;
}
}
@ -515,10 +509,8 @@ struct pattern *pat_match_str(struct sample *smp, struct pattern_expr *expr, int
}
}
if (lru) {
if (lru)
lru64_commit(lru, ret, expr, expr->revision, NULL);
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
}
return ret;
}
@ -534,15 +526,10 @@ struct pattern *pat_match_bin(struct sample *smp, struct pattern_expr *expr, int
if (pat_lru_tree) {
unsigned long long seed = pat_lru_seed ^ (long)expr;
HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
lru = lru64_get(XXH64(smp->data.u.str.area, smp->data.u.str.data, seed),
pat_lru_tree, expr, expr->revision);
if (!lru) {
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
}
else if (lru->domain) {
if (lru && lru->domain) {
ret = lru->data;
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
return ret;
}
}
@ -559,10 +546,8 @@ struct pattern *pat_match_bin(struct sample *smp, struct pattern_expr *expr, int
}
}
if (lru) {
if (lru)
lru64_commit(lru, ret, expr, expr->revision, NULL);
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
}
return ret;
}
@ -604,15 +589,10 @@ struct pattern *pat_match_reg(struct sample *smp, struct pattern_expr *expr, int
if (pat_lru_tree) {
unsigned long long seed = pat_lru_seed ^ (long)expr;
HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
lru = lru64_get(XXH64(smp->data.u.str.area, smp->data.u.str.data, seed),
pat_lru_tree, expr, expr->revision);
if (!lru) {
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
}
else if (lru->domain) {
if (lru && lru->domain) {
ret = lru->data;
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
return ret;
}
}
@ -626,10 +606,8 @@ struct pattern *pat_match_reg(struct sample *smp, struct pattern_expr *expr, int
}
}
if (lru) {
if (lru)
lru64_commit(lru, ret, expr, expr->revision, NULL);
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
}
return ret;
}
@ -674,15 +652,10 @@ struct pattern *pat_match_beg(struct sample *smp, struct pattern_expr *expr, int
if (pat_lru_tree) {
unsigned long long seed = pat_lru_seed ^ (long)expr;
HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
lru = lru64_get(XXH64(smp->data.u.str.area, smp->data.u.str.data, seed),
pat_lru_tree, expr, expr->revision);
if (!lru) {
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
}
else if (lru->domain) {
if (lru && lru->domain) {
ret = lru->data;
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
return ret;
}
}
@ -702,10 +675,8 @@ struct pattern *pat_match_beg(struct sample *smp, struct pattern_expr *expr, int
break;
}
if (lru) {
if (lru)
lru64_commit(lru, ret, expr, expr->revision, NULL);
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
}
return ret;
}
@ -722,15 +693,10 @@ struct pattern *pat_match_end(struct sample *smp, struct pattern_expr *expr, int
if (pat_lru_tree) {
unsigned long long seed = pat_lru_seed ^ (long)expr;
HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
lru = lru64_get(XXH64(smp->data.u.str.area, smp->data.u.str.data, seed),
pat_lru_tree, expr, expr->revision);
if (!lru) {
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
}
else if (lru->domain) {
if (lru && lru->domain) {
ret = lru->data;
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
return ret;
}
}
@ -750,10 +716,8 @@ struct pattern *pat_match_end(struct sample *smp, struct pattern_expr *expr, int
break;
}
if (lru) {
if (lru)
lru64_commit(lru, ret, expr, expr->revision, NULL);
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
}
return ret;
}
@ -774,15 +738,10 @@ struct pattern *pat_match_sub(struct sample *smp, struct pattern_expr *expr, int
if (pat_lru_tree) {
unsigned long long seed = pat_lru_seed ^ (long)expr;
HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
lru = lru64_get(XXH64(smp->data.u.str.area, smp->data.u.str.data, seed),
pat_lru_tree, expr, expr->revision);
if (!lru) {
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
}
else if (lru->domain) {
if (lru && lru->domain) {
ret = lru->data;
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
return ret;
}
}
@ -816,10 +775,8 @@ struct pattern *pat_match_sub(struct sample *smp, struct pattern_expr *expr, int
}
}
leave:
if (lru) {
if (lru)
lru64_commit(lru, ret, expr, expr->revision, NULL);
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
}
return ret;
}
@ -2685,9 +2642,6 @@ void pattern_finalize_config(void)
struct list pr = LIST_HEAD_INIT(pr);
pat_lru_seed = random();
if (global.tune.pattern_cache) {
pat_lru_tree = lru64_new(global.tune.pattern_cache);
}
list_for_each_entry(ref, &pattern_reference, list) {
if (ref->unique_id == -1) {
@ -2726,3 +2680,19 @@ void pattern_finalize_config(void)
LIST_ADD(&pr, &pattern_reference);
LIST_DEL(&pr);
}
static int pattern_per_thread_lru_alloc()
{
if (!global.tune.pattern_cache)
return 1;
pat_lru_tree = lru64_new(global.tune.pattern_cache);
return !!pat_lru_tree;
}
static void pattern_per_thread_lru_free()
{
lru64_destroy(pat_lru_tree);
}
REGISTER_PER_THREAD_ALLOC(pattern_per_thread_lru_alloc);
REGISTER_PER_THREAD_FREE(pattern_per_thread_lru_free);