MEDIUM: memory: make the pool cache an array and not a thread_local
Having a thread_local for the pool cache is messy as we need to initialize all elements upon startup, but we can't until the threads are created, and once created it's too late. For this reason, the allocation code used to check for the pool's initialization, and it was the release code which used to detect the first call and to initialize the cache on the fly, which is not exactly optimal. Now that we have initcalls, let's turn this into a per-thread array. This array is initialized very early in the boot process (STG_PREPARE) so that pools are always safe to use. This allows to remove the tests from the alloc/free calls. Doing just this has removed 2.5 kB of code on all cumulated pool_alloc() and pool_free() paths.
This commit is contained in:
parent
b6b3df3ed3
commit
7f0165e399
@ -149,7 +149,7 @@ static inline struct buffer *b_alloc_margin(struct buffer *buf, int margin)
|
||||
cached = 0;
|
||||
idx = pool_get_index(pool_head_buffer);
|
||||
if (idx >= 0)
|
||||
cached = pool_cache[idx].count;
|
||||
cached = pool_cache[tid][idx].count;
|
||||
|
||||
*buf = BUF_WANTED;
|
||||
|
||||
|
@ -63,8 +63,7 @@ struct pool_cache_item {
|
||||
struct list by_lru; /* link to objects by LRU order */
|
||||
};
|
||||
|
||||
extern THREAD_LOCAL struct pool_cache_head pool_cache[MAX_BASE_POOLS];
|
||||
extern THREAD_LOCAL struct list pool_lru_head; /* oldest objects */
|
||||
extern struct pool_cache_head pool_cache[][MAX_BASE_POOLS];
|
||||
extern THREAD_LOCAL size_t pool_cache_bytes; /* total cache size */
|
||||
extern THREAD_LOCAL size_t pool_cache_count; /* #cache objects */
|
||||
|
||||
@ -183,18 +182,19 @@ static inline void *__pool_get_from_cache(struct pool_head *pool)
|
||||
{
|
||||
ssize_t idx = pool_get_index(pool);
|
||||
struct pool_cache_item *item;
|
||||
struct pool_cache_head *ph;
|
||||
|
||||
/* pool not in cache */
|
||||
if (idx < 0)
|
||||
return NULL;
|
||||
|
||||
/* never allocated or empty */
|
||||
if (pool_cache[idx].list.n == NULL || LIST_ISEMPTY(&pool_cache[idx].list))
|
||||
return NULL;
|
||||
ph = &pool_cache[tid][idx];
|
||||
if (LIST_ISEMPTY(&ph->list))
|
||||
return NULL; // empty
|
||||
|
||||
item = LIST_NEXT(&pool_cache[idx].list, typeof(item), by_pool);
|
||||
pool_cache[idx].count--;
|
||||
pool_cache_bytes -= pool_cache[idx].size;
|
||||
item = LIST_NEXT(&ph->list, typeof(item), by_pool);
|
||||
ph->count--;
|
||||
pool_cache_bytes -= ph->size;
|
||||
pool_cache_count--;
|
||||
LIST_DEL(&item->by_pool);
|
||||
LIST_DEL(&item->by_lru);
|
||||
@ -306,7 +306,7 @@ static inline void pool_put_to_cache(struct pool_head *pool, void *ptr)
|
||||
*/
|
||||
if (idx < 0 ||
|
||||
(pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 3 / 4 &&
|
||||
pool_cache[idx].count >= 16 + pool_cache_count / 8)) {
|
||||
pool_cache[tid][idx].count >= 16 + pool_cache_count / 8)) {
|
||||
__pool_free(pool, ptr);
|
||||
return;
|
||||
}
|
||||
|
36
src/memory.c
36
src/memory.c
@ -37,8 +37,9 @@
|
||||
struct pool_head pool_base_start[MAX_BASE_POOLS] = { };
|
||||
unsigned int pool_base_count = 0;
|
||||
|
||||
THREAD_LOCAL struct pool_cache_head pool_cache[MAX_BASE_POOLS] = { };
|
||||
THREAD_LOCAL struct list pool_lru_head = { }; /* oldest objects */
|
||||
/* These ones are initialized per-thread on startup by init_pools() */
|
||||
struct pool_cache_head pool_cache[MAX_THREADS][MAX_BASE_POOLS];
|
||||
static struct list pool_lru_head[MAX_THREADS]; /* oldest objects */
|
||||
THREAD_LOCAL size_t pool_cache_bytes = 0; /* total cache size */
|
||||
THREAD_LOCAL size_t pool_cache_count = 0; /* #cache objects */
|
||||
|
||||
@ -259,18 +260,10 @@ void pool_gc(struct pool_head *pool_ctx)
|
||||
void __pool_put_to_cache(struct pool_head *pool, void *ptr, ssize_t idx)
|
||||
{
|
||||
struct pool_cache_item *item = (struct pool_cache_item *)ptr;
|
||||
struct pool_cache_head *ph = &pool_cache[idx];
|
||||
|
||||
/* never allocated or empty */
|
||||
if (unlikely(ph->list.n == NULL)) {
|
||||
LIST_INIT(&ph->list);
|
||||
ph->size = pool->size;
|
||||
if (pool_lru_head.n == NULL)
|
||||
LIST_INIT(&pool_lru_head);
|
||||
}
|
||||
struct pool_cache_head *ph = &pool_cache[tid][idx];
|
||||
|
||||
LIST_ADD(&ph->list, &item->by_pool);
|
||||
LIST_ADD(&pool_lru_head, &item->by_lru);
|
||||
LIST_ADD(&pool_lru_head[tid], &item->by_lru);
|
||||
ph->count++;
|
||||
pool_cache_count++;
|
||||
pool_cache_bytes += ph->size;
|
||||
@ -279,7 +272,7 @@ void __pool_put_to_cache(struct pool_head *pool, void *ptr, ssize_t idx)
|
||||
return;
|
||||
|
||||
do {
|
||||
item = LIST_PREV(&pool_lru_head, struct pool_cache_item *, by_lru);
|
||||
item = LIST_PREV(&pool_lru_head[tid], struct pool_cache_item *, by_lru);
|
||||
/* note: by definition we remove oldest objects so they also are the
|
||||
* oldest in their own pools, thus their next is the pool's head.
|
||||
*/
|
||||
@ -289,7 +282,7 @@ void __pool_put_to_cache(struct pool_head *pool, void *ptr, ssize_t idx)
|
||||
ph->count--;
|
||||
pool_cache_count--;
|
||||
pool_cache_bytes -= ph->size;
|
||||
__pool_free(pool_base_start + (ph - pool_cache), item);
|
||||
__pool_free(pool_base_start + (ph - pool_cache[tid]), item);
|
||||
} while (pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 7 / 8);
|
||||
}
|
||||
|
||||
@ -545,6 +538,21 @@ void create_pool_callback(struct pool_head **ptr, char *name, unsigned int size)
|
||||
}
|
||||
}
|
||||
|
||||
/* Initializes all per-thread arrays on startup */
|
||||
static void init_pools()
|
||||
{
|
||||
int thr, idx;
|
||||
|
||||
for (thr = 0; thr < MAX_THREADS; thr++) {
|
||||
for (idx = 0; idx < MAX_BASE_POOLS; idx++) {
|
||||
LIST_INIT(&pool_cache[thr][idx].list);
|
||||
pool_cache[thr][idx].size = 0;
|
||||
}
|
||||
LIST_INIT(&pool_lru_head[thr]);
|
||||
}
|
||||
}
|
||||
|
||||
INITCALL0(STG_PREPARE, init_pools);
|
||||
|
||||
/* register cli keywords */
|
||||
static struct cli_kw_list cli_kws = {{ },{
|
||||
|
Loading…
x
Reference in New Issue
Block a user