MEDIUM: pools: remove the locked pools implementation

Now that the modified lockless variant does not need a DWCAS anymore,
there's no reason to keep the much slower locked version, so let's
just get rid of it.
This commit is contained in:
Willy Tarreau 2021-06-10 17:31:48 +02:00
parent 1526ffe815
commit 8715dec6f9
5 changed files with 4 additions and 110 deletions

View File

@ -230,7 +230,7 @@ SMALL_OPTS =
# passed as-is to CFLAGS). Please check sources for their exact meaning or do
# not use them at all. Some even more obscure ones might also be available
# without appearing here. Currently defined DEBUG macros include DEBUG_FULL,
# DEBUG_MEM_STATS, DEBUG_DONT_SHARE_POOLS, DEBUG_NO_LOCKLESS_POOLS, DEBUG_FD,
# DEBUG_MEM_STATS, DEBUG_DONT_SHARE_POOLS, DEBUG_FD,
# DEBUG_NO_POOLS, DEBUG_FAIL_ALLOC, DEBUG_STRICT_NOCRASH, DEBUG_HPACK,
# DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF, DEBUG_THREAD, DEBUG_STRICT, DEBUG_DEV,
# DEBUG_TASK.

View File

@ -33,13 +33,6 @@
#define CONFIG_HAP_POOLS
#endif
/* On architectures supporting threads and double-word CAS, we can implement
* lock-less memory pools. This isn't supported for debugging modes however.
*/
#if defined(USE_THREAD) && defined(HA_HAVE_CAS_DW) && defined(CONFIG_HAP_POOLS) && !defined(DEBUG_NO_LOCKLESS_POOLS)
#define CONFIG_HAP_LOCKLESS_POOLS
#endif
/* On modern architectures with many threads, a fast memory allocator, and
* local pools, the global pools with their single list can be way slower than
* the standard allocator which already has its own per-thread arenas. In this
@ -93,7 +86,6 @@ struct pool_cache_item {
struct pool_head {
void **free_list;
__decl_thread(HA_SPINLOCK_T lock); /* the spin lock */
unsigned int used; /* how many chunks are currently in use */
unsigned int needed_avg;/* floating indicator between used and allocated */
unsigned int allocated; /* how many chunks have been allocated */

View File

@ -101,9 +101,7 @@ static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
pool_free_nocache(pool, ptr);
}
#elif defined(CONFIG_HAP_LOCKLESS_POOLS)
/****************** Lockless pools implementation ******************/
#else /* CONFIG_HAP_NO_GLOBAL_POOLS */
/*
* Returns a pointer to type <type> taken from the pool <pool_type> if
@ -174,59 +172,7 @@ static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
}
#else /* CONFIG_HAP_LOCKLESS_POOLS */
/****************** Locked pools implementation ******************/
/*
* Returns a pointer to type <type> taken from the pool <pool_type> if
* available, otherwise returns NULL. No malloc() is attempted, and poisonning
* is never performed. The purpose is to get the fastest possible allocation.
* This version takes the pool's lock in order to do this.
*/
static inline void *pool_get_from_shared_cache(struct pool_head *pool)
{
void *p;
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
if ((p = pool->free_list) != NULL)
pool->free_list = *POOL_LINK(pool, p);
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
if (p)
_HA_ATOMIC_INC(&pool->used);
#ifdef DEBUG_MEMORY_POOLS
if (p) {
/* keep track of where the element was allocated from */
*POOL_LINK(pool, p) = (void *)pool;
}
#endif
return p;
}
/* unconditionally stores the object as-is into the global pool. The object
* must not be NULL. Use pool_free() instead.
*/
static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
{
_HA_ATOMIC_DEC(&pool->used);
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
if (!pool_is_crowded(pool)) {
*POOL_LINK(pool, ptr) = (void *)pool->free_list;
pool->free_list = (void *)ptr;
ptr = NULL;
}
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
if (ptr) {
/* still not freed */
pool_put_to_os(pool, ptr);
}
swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
}
#endif /* CONFIG_HAP_LOCKLESS_POOLS */
#endif /* CONFIG_HAP_NO_GLOBAL_POOLS */
/* These are generic cache-aware wrappers that allocate/free from/to the local
* cache first, then from the second level if it exists.

View File

@ -371,7 +371,6 @@ int thread_cpu_mask_forced();
enum lock_label {
TASK_RQ_LOCK,
TASK_WQ_LOCK,
POOL_LOCK,
LISTENER_LOCK,
PROXY_LOCK,
SERVER_LOCK,
@ -423,7 +422,6 @@ static inline const char *lock_label(enum lock_label label)
switch (label) {
case TASK_RQ_LOCK: return "TASK_RQ";
case TASK_WQ_LOCK: return "TASK_WQ";
case POOL_LOCK: return "POOL";
case LISTENER_LOCK: return "LISTENER";
case PROXY_LOCK: return "PROXY";
case SERVER_LOCK: return "SERVER";

View File

@ -116,7 +116,6 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
LIST_INIT(&pool->cache[thr].list);
}
#endif
HA_SPIN_INIT(&pool->lock);
}
pool->users++;
return pool;
@ -124,8 +123,7 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
/* Tries to allocate an object for the pool <pool> using the system's allocator
* and directly returns it. The pool's allocated counter is checked and updated,
* but no other checks are performed. The pool's lock is not used and is not a
* problem either.
* but no other checks are performed.
*/
void *pool_get_from_os(struct pool_head *pool)
{
@ -162,8 +160,6 @@ void pool_put_to_os(struct pool_head *pool, void *ptr)
/* Tries to allocate an object for the pool <pool> using the system's allocator
* and directly returns it. The pool's counters are updated but the object is
* never cached, so this is usable with and without local or shared caches.
* This may be called with or without the pool lock held, so it must not use
* the pool's lock.
*/
void *pool_alloc_nocache(struct pool_head *pool)
{
@ -286,8 +282,6 @@ void pool_gc(struct pool_head *pool_ctx)
#else /* CONFIG_HAP_NO_GLOBAL_POOLS */
#if defined(CONFIG_HAP_LOCKLESS_POOLS)
/*
* This function frees whatever can be freed in pool <pool>.
*/
@ -321,33 +315,6 @@ void pool_flush(struct pool_head *pool)
/* here, we should have pool->allocated == pool->used */
}
#else /* CONFIG_HAP_LOCKLESS_POOLS */
/*
* This function frees whatever can be freed in pool <pool>.
*/
void pool_flush(struct pool_head *pool)
{
void *temp, **next;
if (!pool)
return;
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
next = pool->free_list;
pool->free_list = NULL;
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
while (next) {
temp = next;
next = *POOL_LINK(pool, temp);
pool_put_to_os(pool, temp);
}
/* here, we should have pool->allocated == pool->used */
}
#endif /* CONFIG_HAP_LOCKLESS_POOLS */
/*
* This function frees whatever can be freed in all pools, but respecting
* the minimum thresholds imposed by owners. It makes sure to be alone to
@ -414,9 +381,6 @@ void *pool_destroy(struct pool_head *pool)
pool->users--;
if (!pool->users) {
LIST_DELETE(&pool->list);
#ifndef CONFIG_HAP_LOCKLESS_POOLS
HA_SPIN_DESTROY(&pool->lock);
#endif
/* note that if used == 0, the cache is empty */
free(pool);
}
@ -443,9 +407,6 @@ void dump_pools_to_trash()
allocated = used = nbpools = 0;
chunk_printf(&trash, "Dumping pools usage. Use SIGQUIT to flush them.\n");
list_for_each_entry(entry, &pools, list) {
#ifndef CONFIG_HAP_LOCKLESS_POOLS
HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
#endif
chunk_appendf(&trash, " - Pool %s (%u bytes) : %u allocated (%u bytes), %u used, needed_avg %u, %u failures, %u users, @%p%s\n",
entry->name, entry->size, entry->allocated,
entry->size * entry->allocated, entry->used,
@ -456,9 +417,6 @@ void dump_pools_to_trash()
allocated += entry->allocated * entry->size;
used += entry->used * entry->size;
nbpools++;
#ifndef CONFIG_HAP_LOCKLESS_POOLS
HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
#endif
}
chunk_appendf(&trash, "Total: %d pools, %lu bytes allocated, %lu used.\n",
nbpools, allocated, used);