1
0
mirror of https://github.com/systemd/systemd.git synced 2025-03-07 04:58:29 +03:00

Merge pull request #26443 from poettering/mempool-fixes

mempool tweaks
This commit is contained in:
Lennart Poettering 2023-02-17 17:15:35 +01:00 committed by GitHub
commit 6cbb864d37
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 227 additions and 39 deletions

View File

@ -274,27 +274,31 @@ static _used_ const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX
},
};
void hashmap_trim_pools(void) {
int r;
/* The pool is only allocated by the main thread, but the memory can be passed to other
* threads. Let's clean up if we are the main thread and no other threads are live. */
/* We build our own is_main_thread() here, which doesn't use C11 TLS based caching of the
* result. That's because valgrind apparently doesn't like TLS to be used from a GCC destructor. */
if (getpid() != gettid())
return (void) log_debug("Not cleaning up memory pools, not in main thread.");
r = get_process_threads(0);
if (r < 0)
return (void) log_debug_errno(r, "Failed to determine number of threads, not cleaning up memory pools: %m");
if (r != 1)
return (void) log_debug("Not cleaning up memory pools, running in multi-threaded process.");
mempool_trim(&hashmap_pool);
mempool_trim(&ordered_hashmap_pool);
}
#if VALGRIND
_destructor_ static void cleanup_pools(void) {
_cleanup_free_ char *t = NULL;
/* Be nice to valgrind */
/* The pool is only allocated by the main thread, but the memory can
* be passed to other threads. Let's clean up if we are the main thread
* and no other threads are live. */
/* We build our own is_main_thread() here, which doesn't use C11
* TLS based caching of the result. That's because valgrind apparently
* doesn't like malloc() (which C11 TLS internally uses) to be called
* from a GCC destructors. */
if (getpid() != gettid())
return;
if (get_process_threads(0) != 1)
return;
mempool_drop(&hashmap_pool);
mempool_drop(&ordered_hashmap_pool);
hashmap_cleanup_pools();
}
#endif

View File

@ -443,3 +443,5 @@ DEFINE_TRIVIAL_CLEANUP_FUNC(OrderedHashmap*, ordered_hashmap_free_free_free);
DEFINE_TRIVIAL_CLEANUP_FUNC(IteratedCache*, iterated_cache_free);
#define _cleanup_iterated_cache_free_ _cleanup_(iterated_cache_freep)
void hashmap_trim_pools(void);

View File

@ -3,6 +3,7 @@
#include <stdint.h>
#include <stdlib.h>
#include "format-util.h"
#include "macro.h"
#include "memory-util.h"
#include "mempool.h"
@ -13,21 +14,26 @@ struct pool {
size_t n_used;
};
static void* pool_ptr(struct pool *p) {
return ((uint8_t*) ASSERT_PTR(p)) + ALIGN(sizeof(struct pool));
}
void* mempool_alloc_tile(struct mempool *mp) {
size_t i;
/* When a tile is released we add it to the list and simply
* place the next pointer at its offset 0. */
assert(mp);
assert(mp->tile_size >= sizeof(void*));
assert(mp->at_least > 0);
if (mp->freelist) {
void *r;
void *t;
r = mp->freelist;
mp->freelist = * (void**) mp->freelist;
return r;
t = mp->freelist;
mp->freelist = *(void**) mp->freelist;
return t;
}
if (_unlikely_(!mp->first_pool) ||
@ -53,7 +59,7 @@ void* mempool_alloc_tile(struct mempool *mp) {
i = mp->first_pool->n_used++;
return ((uint8_t*) mp->first_pool) + ALIGN(sizeof(struct pool)) + i*mp->tile_size;
return (uint8_t*) pool_ptr(mp->first_pool) + i*mp->tile_size;
}
void* mempool_alloc0_tile(struct mempool *mp) {
@ -65,19 +71,103 @@ void* mempool_alloc0_tile(struct mempool *mp) {
return p;
}
void mempool_free_tile(struct mempool *mp, void *p) {
* (void**) p = mp->freelist;
void* mempool_free_tile(struct mempool *mp, void *p) {
assert(mp);
if (!p)
return NULL;
*(void**) p = mp->freelist;
mp->freelist = p;
return NULL;
}
#if VALGRIND
void mempool_drop(struct mempool *mp) {
struct pool *p = mp->first_pool;
while (p) {
struct pool *n;
n = p->next;
free(p);
p = n;
static bool pool_contains(struct mempool *mp, struct pool *p, void *ptr) {
size_t off;
void *a;
assert(mp);
assert(p);
if (!ptr)
return false;
a = pool_ptr(p);
if ((uint8_t*) ptr < (uint8_t*) a)
return false;
off = (uint8_t*) ptr - (uint8_t*) a;
assert(off % mp->tile_size == 0);
return off < mp->tile_size * p->n_tiles;
}
static bool pool_is_unused(struct mempool *mp, struct pool *p) {
assert(mp);
assert(p);
if (p->n_used == 0)
return true;
/* Check if all tiles in this specific pool are in the freelist. */
size_t n = 0;
void *i = mp->freelist;
while (i) {
if (pool_contains(mp, p, i))
n++;
i = *(void**) i;
}
assert(n <= p->n_used);
return n == p->n_used;
}
static void pool_unlink(struct mempool *mp, struct pool *p) {
size_t m = 0;
assert(mp);
assert(p);
if (p->n_used == 0)
return;
void **i = &mp->freelist;
while (*i) {
void *d = *i;
if (pool_contains(mp, p, d)) {
*i = *(void**) d;
m++;
if (m == p->n_used)
break;
} else
i = (void**) d;
}
}
#endif
void mempool_trim(struct mempool *mp) {
size_t trimmed = 0, left = 0;
assert(mp);
struct pool **p = &mp->first_pool;
while (*p) {
struct pool *d = *p;
if (pool_is_unused(mp, d)) {
trimmed += d->n_tiles * mp->tile_size;
pool_unlink(mp, d);
*p = d->next;
free(d);
} else {
left += d->n_tiles * mp->tile_size;
p = &d->next;
}
}
log_debug("Trimmed %s from memory pool %p. (%s left)", FORMAT_BYTES(trimmed), mp, FORMAT_BYTES(left));
}

View File

@ -10,12 +10,12 @@ struct mempool {
struct pool *first_pool;
void *freelist;
size_t tile_size;
unsigned at_least;
size_t at_least;
};
void* mempool_alloc_tile(struct mempool *mp);
void* mempool_alloc0_tile(struct mempool *mp);
void mempool_free_tile(struct mempool *mp, void *p);
void* mempool_free_tile(struct mempool *mp, void *p);
#define DEFINE_MEMPOOL(pool_name, tile_type, alloc_at_least) \
static struct mempool pool_name = { \
@ -25,6 +25,4 @@ static struct mempool pool_name = { \
__attribute__((weak)) bool mempool_enabled(void);
#if VALGRIND
void mempool_drop(struct mempool *mp);
#endif
void mempool_trim(struct mempool *mp);

View File

@ -509,6 +509,8 @@ tests += [
[files('test-tpm2.c')],
[files('test-mempool.c')],
[files('test-replace-var.c')],
[files('test-calendarspec.c')],

92
src/test/test-mempool.c Normal file
View File

@ -0,0 +1,92 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
#include "mempool.h"
#include "random-util.h"
#include "tests.h"
struct element {
uint64_t value;
};
DEFINE_MEMPOOL(test_mempool, struct element, 8);
TEST(mempool_trim) {
#define NN 4000
struct element *a[NN];
size_t n_freed = 0;
assert_se(!test_mempool.first_pool);
assert_se(!test_mempool.freelist);
mempool_trim(&test_mempool);
for (size_t i = 0; i < NN; i++) {
assert_se(a[i] = mempool_alloc_tile(&test_mempool));
a[i]->value = i;
}
mempool_trim(&test_mempool);
/* free up to one third randomly */
size_t x = 0;
for (size_t i = 0; i < NN/3; i++) {
x = (x + random_u64()) % ELEMENTSOF(a);
assert_se(!a[x] || a[x]->value == x);
if (a[x])
n_freed ++;
a[x] = mempool_free_tile(&test_mempool, a[x]);
}
mempool_trim(&test_mempool);
/* free definitely at least one third */
for (size_t i = 2; i < NN; i += 3) {
assert_se(!a[i] || a[i]->value == i);
if (a[i])
n_freed ++;
a[i] = mempool_free_tile(&test_mempool, a[i]);
}
mempool_trim(&test_mempool);
/* Allocate another set of tiles, which will fill up the free list and allocate some new tiles */
struct element *b[NN];
for (size_t i = 0; i < NN; i++) {
assert_se(b[i] = mempool_alloc_tile(&test_mempool));
b[i]->value = ~(uint64_t) i;
}
mempool_trim(&test_mempool);
/* free everything from the original set*/
for (size_t i = 0; i < NN; i += 1) {
assert_se(!a[i] || a[i]->value == i);
if (a[i])
n_freed ++;
a[i] = mempool_free_tile(&test_mempool, a[i]);
}
mempool_trim(&test_mempool);
/* and now everything from the second set too */
for (size_t i = 0; i < NN; i += 1) {
assert_se(!b[i] || b[i]->value == ~(uint64_t) i);
if (b[i])
n_freed ++;
b[i] = mempool_free_tile(&test_mempool, b[i]);
}
assert_se(n_freed == NN * 2);
mempool_trim(&test_mempool);
assert_se(!test_mempool.first_pool);
assert_se(!test_mempool.freelist);
}
DEFINE_TEST_MAIN(LOG_DEBUG);