mem-pool: Add api to mem_get based on requested size

Currently mem-pool implementation provides api to get from the
mem pool based on the struct type. This is to retain api
compatibility with the old implementation of mem pool. Internally
in the mem pool structure there is a mapping from struct to size
based pools.

In this patch, we are adding new APIs to fetch memory from mem pool,
given a size.

Change-Id: Ib220ee45ebd134a7be8f6482db5a592dbb7b9211
Updates: #325
Signed-off-by: Poornima G <pgurusid@redhat.com>
This commit is contained in:
Poornima G 2018-11-21 10:01:08 +05:30 committed by Amar Tumballi
parent 822779332e
commit 9ff080382c
4 changed files with 127 additions and 27 deletions

View File

@ -151,11 +151,6 @@ trap(void);
#define GF_THREAD_NAME_PREFIX "glfs_"
#define GF_THREAD_NAME_PREFIX_LEN 5
#include <stdbool.h>
#define gf_boolean_t bool
#define _gf_false false
#define _gf_true true
/*
* we could have initialized these as +ve values and treated
* them as negative while comparing etc.. (which would have

View File

@ -14,6 +14,7 @@
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <stdbool.h>
#include <netinet/in.h>
#include <sys/socket.h>
#include <sys/types.h>
@ -369,6 +370,10 @@ enum gf_internal_fop_indicator {
#define GF_CS_OBJECT_STATUS "trusted.glusterfs.cs.status"
#define GF_CS_OBJECT_REPAIR "trusted.glusterfs.cs.repair"
#define gf_boolean_t bool
#define _gf_false false
#define _gf_true true
typedef enum {
GF_CS_LOCAL = 1,
GF_CS_REMOTE = 2,

View File

@ -38,6 +38,10 @@
#define GF_MEM_TRAILER_MAGIC 0xBAADF00D
#define GF_MEM_INVALID_MAGIC 0xDEADC0DE
#define POOL_SMALLEST 7 /* i.e. 128 */
#define POOL_LARGEST 20 /* i.e. 1048576 */
#define NPOOLS (POOL_LARGEST - POOL_SMALLEST + 1)
struct mem_acct_rec {
const char *typestr;
uint64_t size;
@ -207,7 +211,10 @@ struct mem_pool {
unsigned long count; /* requested pool size (unused) */
char *name;
gf_atomic_t active; /* current allocations */
#ifdef DEBUG
gf_atomic_t hit; /* number of allocations served from pt_pool */
gf_atomic_t miss; /* number of std allocs due to miss */
#endif
struct list_head owner; /* glusterfs_ctx_t->mempool_list */
glusterfs_ctx_t *ctx; /* take ctx->lock when updating owner */
@ -224,7 +231,7 @@ typedef struct pooled_obj_hdr {
struct mem_pool *pool;
} pooled_obj_hdr_t;
#define AVAILABLE_SIZE(p2) ((1 << (p2)) - sizeof(pooled_obj_hdr_t))
#define AVAILABLE_SIZE(p2) (1 << (p2))
typedef struct per_thread_pool {
/* the pool that was used to request this allocation */
@ -301,4 +308,15 @@ mem_pool_destroy(struct mem_pool *pool);
void
gf_mem_acct_enable_set(void *ctx);
/* hit will be set to :
* _gf_true if the memory is served from mem pool
* _gf_false if the requested size was not present in mem pool and hence
* std alloc'd.
*/
void *
mem_pool_get(unsigned long sizeof_type, gf_boolean_t *hit);
void *
mem_pool_get0(unsigned long sizeof_type, gf_boolean_t *hit);
#endif /* _MEM_POOL_H */

View File

@ -373,10 +373,6 @@ free:
FREE(ptr);
}
#define POOL_SMALLEST 7 /* i.e. 128 */
#define POOL_LARGEST 20 /* i.e. 1048576 */
#define NPOOLS (POOL_LARGEST - POOL_SMALLEST + 1)
static pthread_key_t pool_key;
static pthread_mutex_t pool_lock = PTHREAD_MUTEX_INITIALIZER;
static struct list_head pool_threads;
@ -696,7 +692,7 @@ struct mem_pool *
mem_pool_new_fn(glusterfs_ctx_t *ctx, unsigned long sizeof_type,
unsigned long count, char *name)
{
unsigned int i;
unsigned int power;
struct mem_pool *new = NULL;
struct mem_pool_shared *pool = NULL;
@ -706,18 +702,16 @@ mem_pool_new_fn(glusterfs_ctx_t *ctx, unsigned long sizeof_type,
return NULL;
}
for (i = 0; i < NPOOLS; ++i) {
if (sizeof_type <= AVAILABLE_SIZE(pools[i].power_of_two)) {
pool = &pools[i];
break;
}
}
if (!pool) {
/* We ensure sizeof_type > 1 and the next power of two will be, at least,
* 2^POOL_SMALLEST */
sizeof_type |= (1 << POOL_SMALLEST) - 1;
power = sizeof(sizeof_type) * 8 - __builtin_clzl(sizeof_type - 1) + 1;
if (power > POOL_LARGEST) {
gf_msg_callingfn("mem-pool", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
"invalid argument");
return NULL;
}
pool = &pools[power - POOL_SMALLEST];
new = GF_CALLOC(sizeof(struct mem_pool), 1, gf_common_mt_mem_pool);
if (!new)
@ -729,6 +723,10 @@ mem_pool_new_fn(glusterfs_ctx_t *ctx, unsigned long sizeof_type,
new->name = name;
new->pool = pool;
GF_ATOMIC_INIT(new->active, 0);
#ifdef DEBUG
GF_ATOMIC_INIT(new->hit, 0);
GF_ATOMIC_INIT(new->miss, 0);
#endif
INIT_LIST_HEAD(&new->owner);
LOCK(&ctx->lock);
@ -807,7 +805,8 @@ mem_get_pool_list(void)
}
pooled_obj_hdr_t *
mem_get_from_pool(struct mem_pool *mem_pool)
mem_get_from_pool(struct mem_pool *mem_pool, struct mem_pool_shared *pool,
gf_boolean_t *hit)
{
per_thread_pool_list_t *pool_list;
per_thread_pool_t *pt_pool;
@ -818,7 +817,16 @@ mem_get_from_pool(struct mem_pool *mem_pool)
return NULL;
}
pt_pool = &pool_list->pools[mem_pool->pool->power_of_two - POOL_SMALLEST];
if (mem_pool) {
pt_pool = &pool_list
->pools[mem_pool->pool->power_of_two - POOL_SMALLEST];
} else {
pt_pool = &pool_list->pools[pool->power_of_two - POOL_SMALLEST];
}
#ifdef DEBUG
*hit = _gf_true;
#endif
(void)pthread_spin_lock(&pool_list->lock);
@ -836,15 +844,30 @@ mem_get_from_pool(struct mem_pool *mem_pool)
} else {
(void)pthread_spin_unlock(&pool_list->lock);
GF_ATOMIC_INC(pt_pool->parent->allocs_stdc);
retval = malloc(1 << pt_pool->parent->power_of_two);
retval = malloc((1 << pt_pool->parent->power_of_two) +
sizeof(pooled_obj_hdr_t));
#ifdef DEBUG
*hit = _gf_false;
#endif
}
}
if (retval != NULL) {
if (mem_pool) {
retval->pool = mem_pool;
retval->power_of_two = mem_pool->pool->power_of_two;
#ifdef DEBUG
if (*hit == _gf_true)
GF_ATOMIC_INC(mem_pool->hit);
else
GF_ATOMIC_INC(mem_pool->miss);
#endif
} else {
retval->power_of_two = pool->power_of_two;
retval->pool = NULL;
}
retval->magic = GF_MEM_HEADER_MAGIC;
retval->pool = mem_pool;
retval->pool_list = pool_list;
retval->power_of_two = mem_pool->pool->power_of_two;
}
return retval;
@ -857,6 +880,7 @@ mem_get(struct mem_pool *mem_pool)
return GF_MALLOC(mem_pool->sizeof_type, gf_common_mt_mem_pool);
#else
pooled_obj_hdr_t *retval;
gf_boolean_t hit;
if (!mem_pool) {
gf_msg_callingfn("mem-pool", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
@ -864,7 +888,7 @@ mem_get(struct mem_pool *mem_pool)
return NULL;
}
retval = mem_get_from_pool(mem_pool);
retval = mem_get_from_pool(mem_pool, NULL, &hit);
if (!retval) {
return NULL;
}
@ -875,6 +899,63 @@ mem_get(struct mem_pool *mem_pool)
#endif /* GF_DISABLE_MEMPOOL */
}
void *
mem_pool_get(unsigned long sizeof_type, gf_boolean_t *hit)
{
#if defined(GF_DISABLE_MEMPOOL)
return GF_MALLOC(sizeof_type, gf_common_mt_mem_pool);
#else
pooled_obj_hdr_t *retval;
unsigned int power;
struct mem_pool_shared *pool = NULL;
if (!sizeof_type) {
gf_msg_callingfn("mem-pool", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
"invalid argument");
return NULL;
}
/* We ensure sizeof_type > 1 and the next power of two will be, at least,
* 2^POOL_SMALLEST */
sizeof_type |= (1 << POOL_SMALLEST) - 1;
power = sizeof(sizeof_type) * 8 - __builtin_clzl(sizeof_type - 1) + 1;
if (power > POOL_LARGEST) {
gf_msg_callingfn("mem-pool", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
"invalid argument");
return NULL;
}
pool = &pools[power - POOL_SMALLEST];
retval = mem_get_from_pool(NULL, pool, hit);
return retval + 1;
#endif /* GF_DISABLE_MEMPOOL */
}
void *
mem_pool_get0(unsigned long sizeof_type, gf_boolean_t *hit)
{
void *ptr = NULL;
unsigned int power;
struct mem_pool_shared *pool = NULL;
ptr = mem_pool_get(sizeof_type, hit);
if (ptr) {
#if defined(GF_DISABLE_MEMPOOL)
memset(ptr, 0, sizeof_type);
#else
/* We ensure sizeof_type > 1 and the next power of two will be, at
* least, 2^POOL_SMALLEST */
sizeof_type |= (1 << POOL_SMALLEST) - 1;
power = sizeof(sizeof_type) * 8 - __builtin_clzl(sizeof_type - 1) + 1;
pool = &pools[power - POOL_SMALLEST];
memset(ptr, 0, AVAILABLE_SIZE(pool->power_of_two));
#endif
}
return ptr;
}
void
mem_put(void *ptr)
{
@ -899,7 +980,8 @@ mem_put(void *ptr)
pool_list = hdr->pool_list;
pt_pool = &pool_list->pools[hdr->power_of_two - POOL_SMALLEST];
GF_ATOMIC_DEC(hdr->pool->active);
if (hdr->pool)
GF_ATOMIC_DEC(hdr->pool->active);
hdr->magic = GF_MEM_INVALID_MAGIC;