mem-pool: initialize pthread_key_t pool_key in mem_pool_init_early()
It is not possible to call pthread_key_delete for the pool_key that is intialized in the constructor for the memory pools. This makes it difficult to do a full cleanup of all the resources in mem_pools_fini(). For this, the initialization of pool_key should be moved to mem_pool_init(). However, the glusterfsd binary has a rather complex initialization procedure. The memory pools need to get initialized partially to get mem_get() functionality working. But, the pool_sweeper thread can get killed in case it is started before glusterfsd deamonizes. In order to solve this, mem_pools_init() is split into two pieces: 1. mem_pools_init_early() for initializing the basic structures 2. mem_pools_init_late() to start the pool_sweeper thread With the split of mem_pools_init(), and placing the pthread_key_create() in mem_pools_init_early(), it is now possible to correctly cleanup the pool_key with pthread_key_delete() in mem_pools_fini(). It seems that there was no memory pool initialization in the CLI. This has been added as well now. Without it, the CLI will not be able to call mem_get() successfully which results in a hang of the process. Change-Id: I1de0153dfe600fd79eac7468cc070e4bd35e71dd BUG: 1470170 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: https://review.gluster.org/17779 Smoke: Gluster Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Jeff Darcy <jeff@pl.atyp.us>
This commit is contained in:
parent
acdbdaeba2
commit
1e8e626403
@ -749,7 +749,8 @@ pub_glfs_new (const char *volname)
|
|||||||
* Do this as soon as possible in case something else depends on
|
* Do this as soon as possible in case something else depends on
|
||||||
* pool allocations.
|
* pool allocations.
|
||||||
*/
|
*/
|
||||||
mem_pools_init ();
|
mem_pools_init_early ();
|
||||||
|
mem_pools_init_late ();
|
||||||
|
|
||||||
fs = glfs_new_fs (volname);
|
fs = glfs_new_fs (volname);
|
||||||
if (!fs)
|
if (!fs)
|
||||||
|
@ -722,6 +722,9 @@ main (int argc, char *argv[])
|
|||||||
int ret = -1;
|
int ret = -1;
|
||||||
glusterfs_ctx_t *ctx = NULL;
|
glusterfs_ctx_t *ctx = NULL;
|
||||||
|
|
||||||
|
mem_pools_init_early ();
|
||||||
|
mem_pools_init_late ();
|
||||||
|
|
||||||
ctx = glusterfs_ctx_new ();
|
ctx = glusterfs_ctx_new ();
|
||||||
if (!ctx)
|
if (!ctx)
|
||||||
return ENOMEM;
|
return ENOMEM;
|
||||||
@ -786,6 +789,8 @@ main (int argc, char *argv[])
|
|||||||
out:
|
out:
|
||||||
// glusterfs_ctx_destroy (ctx);
|
// glusterfs_ctx_destroy (ctx);
|
||||||
|
|
||||||
|
mem_pools_fini ();
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2417,6 +2417,8 @@ main (int argc, char *argv[])
|
|||||||
char cmdlinestr[PATH_MAX] = {0,};
|
char cmdlinestr[PATH_MAX] = {0,};
|
||||||
cmd_args_t *cmd = NULL;
|
cmd_args_t *cmd = NULL;
|
||||||
|
|
||||||
|
mem_pools_init_early ();
|
||||||
|
|
||||||
gf_check_and_set_mem_acct (argc, argv);
|
gf_check_and_set_mem_acct (argc, argv);
|
||||||
|
|
||||||
ctx = glusterfs_ctx_new ();
|
ctx = glusterfs_ctx_new ();
|
||||||
@ -2493,7 +2495,7 @@ main (int argc, char *argv[])
|
|||||||
* the parent, but we want to do it as soon as possible after that in
|
* the parent, but we want to do it as soon as possible after that in
|
||||||
* case something else depends on pool allocations.
|
* case something else depends on pool allocations.
|
||||||
*/
|
*/
|
||||||
mem_pools_init ();
|
mem_pools_init_late ();
|
||||||
|
|
||||||
#ifdef GF_LINUX_HOST_OS
|
#ifdef GF_LINUX_HOST_OS
|
||||||
ret = set_oom_score_adj (ctx);
|
ret = set_oom_score_adj (ctx);
|
||||||
|
@ -519,12 +519,6 @@ mem_pools_preinit (void)
|
|||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
/* Use a pthread_key destructor to clean up when a thread exits. */
|
|
||||||
if (pthread_key_create (&pool_key, pool_destructor) != 0) {
|
|
||||||
gf_log ("mem-pool", GF_LOG_CRITICAL,
|
|
||||||
"failed to initialize mem-pool key");
|
|
||||||
}
|
|
||||||
|
|
||||||
INIT_LIST_HEAD (&pool_threads);
|
INIT_LIST_HEAD (&pool_threads);
|
||||||
INIT_LIST_HEAD (&pool_free_threads);
|
INIT_LIST_HEAD (&pool_free_threads);
|
||||||
|
|
||||||
@ -546,8 +540,34 @@ static pthread_mutex_t init_mutex = PTHREAD_MUTEX_INITIALIZER;
|
|||||||
static unsigned int init_count = 0;
|
static unsigned int init_count = 0;
|
||||||
static pthread_t sweeper_tid;
|
static pthread_t sweeper_tid;
|
||||||
|
|
||||||
|
/* Use mem_pools_init_early() function for basic initialization. There will be
|
||||||
|
* no cleanup done by the pool_sweeper thread until mem_pools_init_late() has
|
||||||
|
* been called. Calling mem_get() will be possible after this function has
|
||||||
|
* setup the basic structures. */
|
||||||
void
|
void
|
||||||
mem_pools_init (void)
|
mem_pools_init_early (void)
|
||||||
|
{
|
||||||
|
pthread_mutex_lock (&init_mutex);
|
||||||
|
/* Use a pthread_key destructor to clean up when a thread exits.
|
||||||
|
*
|
||||||
|
* We won't increase init_count here, that is only done when the
|
||||||
|
* pool_sweeper thread is started too.
|
||||||
|
*/
|
||||||
|
if (pthread_getspecific (pool_key) == NULL) {
|
||||||
|
/* key has not been created yet */
|
||||||
|
if (pthread_key_create (&pool_key, pool_destructor) != 0) {
|
||||||
|
gf_log ("mem-pool", GF_LOG_CRITICAL,
|
||||||
|
"failed to initialize mem-pool key");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pthread_mutex_unlock (&init_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Call mem_pools_init_late() once threading has been configured completely.
|
||||||
|
* This prevent the pool_sweeper thread from getting killed once the main()
|
||||||
|
* thread exits during deamonizing. */
|
||||||
|
void
|
||||||
|
mem_pools_init_late (void)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock (&init_mutex);
|
pthread_mutex_lock (&init_mutex);
|
||||||
if ((init_count++) == 0) {
|
if ((init_count++) == 0) {
|
||||||
@ -565,17 +585,30 @@ mem_pools_fini (void)
|
|||||||
case 0:
|
case 0:
|
||||||
/*
|
/*
|
||||||
* If init_count is already zero (as e.g. if somebody called
|
* If init_count is already zero (as e.g. if somebody called
|
||||||
* this before mem_pools_init) then the sweeper was probably
|
* this before mem_pools_init_late) then the sweeper was
|
||||||
* never even started so we don't need to stop it. Even if
|
* probably never even started so we don't need to stop it.
|
||||||
* there's some crazy circumstance where there is a sweeper but
|
* Even if there's some crazy circumstance where there is a
|
||||||
* init_count is still zero, that just means we'll leave it
|
* sweeper but init_count is still zero, that just means we'll
|
||||||
* running. Not perfect, but far better than any known
|
* leave it running. Not perfect, but far better than any
|
||||||
* alternative.
|
* known alternative.
|
||||||
*/
|
*/
|
||||||
break;
|
break;
|
||||||
case 1:
|
case 1:
|
||||||
|
/* if only mem_pools_init_early() was called, sweeper_tid will
|
||||||
|
* be invalid and the functions will error out. That is not
|
||||||
|
* critical. In all other cases, the sweeper_tid will be valid
|
||||||
|
* and the thread gets stopped. */
|
||||||
(void) pthread_cancel (sweeper_tid);
|
(void) pthread_cancel (sweeper_tid);
|
||||||
(void) pthread_join (sweeper_tid, NULL);
|
(void) pthread_join (sweeper_tid, NULL);
|
||||||
|
|
||||||
|
/* Need to clean the pool_key to prevent further usage of the
|
||||||
|
* per_thread_pool_list_t structure that is stored for each
|
||||||
|
* thread.
|
||||||
|
* This also prevents calling pool_destructor() when a thread
|
||||||
|
* exits, so there is no chance on a use-after-free of the
|
||||||
|
* per_thread_pool_list_t structure. */
|
||||||
|
(void) pthread_key_delete (pool_key);
|
||||||
|
|
||||||
/* Fall through. */
|
/* Fall through. */
|
||||||
default:
|
default:
|
||||||
--init_count;
|
--init_count;
|
||||||
@ -584,7 +617,8 @@ mem_pools_fini (void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
void mem_pools_init (void) {}
|
void mem_pools_init_early (void) {}
|
||||||
|
void mem_pools_init_late (void) {}
|
||||||
void mem_pools_fini (void) {}
|
void mem_pools_fini (void) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -256,8 +256,9 @@ struct mem_pool {
|
|||||||
gf_atomic_t frees_to_list;
|
gf_atomic_t frees_to_list;
|
||||||
};
|
};
|
||||||
|
|
||||||
void mem_pools_init (void);
|
void mem_pools_init_early (void); /* basic initialization of memory pools */
|
||||||
void mem_pools_fini (void);
|
void mem_pools_init_late (void); /* start the pool_sweeper thread */
|
||||||
|
void mem_pools_fini (void); /* cleanup memory pools */
|
||||||
|
|
||||||
struct mem_pool *
|
struct mem_pool *
|
||||||
mem_pool_new_fn (unsigned long sizeof_type, unsigned long count, char *name);
|
mem_pool_new_fn (unsigned long sizeof_type, unsigned long count, char *name);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user