MINOR: pools: split pool_free() in the lockfree variant

This separates the validity tests from the code committing the object
to the pool, in order to ease insertion of the thread-local cache.
This commit is contained in:
Willy Tarreau 2018-10-16 08:55:15 +02:00
parent 0a93b6413f
commit 146794dc4f

View File

@ -215,6 +215,21 @@ static inline void *pool_alloc(struct pool_head *pool)
return p;
}
/* Locklessly add item <ptr> to pool <pool>, then update the pool used count.
* Both the pool and the pointer must be valid. Use pool_free() for normal
* operations.
*/
static inline void __pool_free(struct pool_head *pool, void *ptr)
{
void *free_list = pool->free_list;
do {
*POOL_LINK(pool, ptr) = (void *)free_list;
__ha_barrier_store();
} while (!HA_ATOMIC_CAS(&pool->free_list, (void *)&free_list, ptr));
HA_ATOMIC_SUB(&pool->used, 1);
}
/*
* Puts a memory area back to the corresponding pool.
* Items are chained directly through a pointer that
@ -227,19 +242,12 @@ static inline void *pool_alloc(struct pool_head *pool)
static inline void pool_free(struct pool_head *pool, void *ptr)
{
if (likely(ptr != NULL)) {
void *free_list;
#ifdef DEBUG_MEMORY_POOLS
/* we'll get late corruption if we refill to the wrong pool or double-free */
if (*POOL_LINK(pool, ptr) != (void *)pool)
*(volatile int *)0 = 0;
#endif
free_list = pool->free_list;
do {
*POOL_LINK(pool, ptr) = (void *)free_list;
__ha_barrier_store();
} while (!HA_ATOMIC_CAS(&pool->free_list, (void *)&free_list, ptr));
HA_ATOMIC_SUB(&pool->used, 1);
__pool_free(pool, ptr);
}
}