1
0
mirror of https://github.com/systemd/systemd.git synced 2024-10-29 21:55:36 +03:00

hashmap: expose helper for releasing memory pools independently of valgrind

Let's clean this up and export this always, so that we can later call
when we are under memory pressure.
This commit is contained in:
Lennart Poettering 2023-02-08 18:03:27 +01:00
parent a133d2c366
commit 9a0f0ef557
4 changed files with 20 additions and 18 deletions

View File

@ -274,28 +274,32 @@ static _used_ const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX
},
};
#if VALGRIND
_destructor_ static void cleanup_pools(void) {
_cleanup_free_ char *t = NULL;
void hashmap_cleanup_pools(void) {
int r;
/* Be nice to valgrind */
/* The pool is only allocated by the main thread, but the memory can be passed to other
* threads. Let's clean up if we are the main thread and no other threads are live. */
/* The pool is only allocated by the main thread, but the memory can
* be passed to other threads. Let's clean up if we are the main thread
* and no other threads are live. */
/* We build our own is_main_thread() here, which doesn't use C11
* TLS based caching of the result. That's because valgrind apparently
* doesn't like malloc() (which C11 TLS internally uses) to be called
* from a GCC destructors. */
/* We build our own is_main_thread() here, which doesn't use C11 TLS based caching of the
* result. That's because valgrind apparently doesn't like TLS to be used from a GCC destructor. */
if (getpid() != gettid())
return;
return (void) log_debug("Not cleaning up memory pools, not in main thread.");
if (get_process_threads(0) != 1)
return;
r = get_process_threads(0);
if (r < 0)
return (void) log_debug_errno(r, "Failed to determine number of threads, not cleaning up memory pools: %m");
if (r != 1)
return (void) log_debug("Not cleaning up memory pools, running in multi-threaded process.");
mempool_drop(&hashmap_pool);
mempool_drop(&ordered_hashmap_pool);
}
#if VALGRIND
_destructor_ static void cleanup_pools(void) {
/* Be nice to valgrind */
hashmap_cleanup_pools();
}
#endif
static unsigned n_buckets(HashmapBase *h) {

View File

@ -443,3 +443,5 @@ DEFINE_TRIVIAL_CLEANUP_FUNC(OrderedHashmap*, ordered_hashmap_free_free_free);
DEFINE_TRIVIAL_CLEANUP_FUNC(IteratedCache*, iterated_cache_free);
#define _cleanup_iterated_cache_free_ _cleanup_(iterated_cache_freep)
void hashmap_cleanup_pools(void);

View File

@ -70,7 +70,6 @@ void mempool_free_tile(struct mempool *mp, void *p) {
mp->freelist = p;
}
#if VALGRIND
void mempool_drop(struct mempool *mp) {
struct pool *p = mp->first_pool;
while (p) {
@ -80,4 +79,3 @@ void mempool_drop(struct mempool *mp) {
p = n;
}
}
#endif

View File

@ -25,6 +25,4 @@ static struct mempool pool_name = { \
__attribute__((weak)) bool mempool_enabled(void);
#if VALGRIND
void mempool_drop(struct mempool *mp);
#endif