mirror of
https://github.com/systemd/systemd.git
synced 2024-11-07 09:56:51 +03:00
basic: implement the IteratedCache
Adds the basics of the IteratedCache and constructor support for the Hashmap and OrderedHashmap types. iterated_cache_get() is responsible for synchronizing the cache with the associated Hashmap and making it available to the caller at the supplied result pointers. Since iterated_cache_get() may need to allocate memory, it may fail, so callers must check the return value. On success, pointer arrays containing pointers to the associated Hashmap's keys and values, in as-iterated order, are returned in res_keys and res_values, respectively. Either may be supplied as NULL to inhibit caching of the keys or values, respectively. Note that if the cached Hashmap hasn't changed since the previous call to iterated_cache_get(), and it's not a call activating caching of the values or keys, the cost is effectively zero as the resulting pointers will simply refer to the previously returned arrays as-is. A cleanup function has also been added, iterated_cache_free(). This only frees the IteratedCache container and related arrays. The associated Hashmap, its keys, and values are not affected. Also note that the associated Hashmap does not automatically free its associated IteratedCache when freed. One could, in theory, safely access the arrays returned by a successful iterated_cache_get() call after its associated Hashmap has been freed, including the referenced values and keys. Provided the iterated_cache_get() was performed prior to the hashmap free, and that the type of hashmap free performed didn't free keys and/or values as well.
This commit is contained in:
parent
84dcca75b4
commit
45ea84d8ed
@ -229,7 +229,8 @@ struct HashmapBase {
|
|||||||
unsigned n_direct_entries:3; /* Number of entries in direct storage.
|
unsigned n_direct_entries:3; /* Number of entries in direct storage.
|
||||||
* Only valid if !has_indirect. */
|
* Only valid if !has_indirect. */
|
||||||
bool from_pool:1; /* whether was allocated from mempool */
|
bool from_pool:1; /* whether was allocated from mempool */
|
||||||
bool dirty:1; /* whether dirtied since cache sync */
|
bool dirty:1; /* whether dirtied since last iterated_cache_get() */
|
||||||
|
bool cached:1; /* whether this hashmap is being cached */
|
||||||
HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
|
HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -249,6 +250,17 @@ struct Set {
|
|||||||
struct HashmapBase b;
|
struct HashmapBase b;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
typedef struct CacheMem {
|
||||||
|
const void **ptr;
|
||||||
|
size_t n_populated, n_allocated;
|
||||||
|
bool active:1;
|
||||||
|
} CacheMem;
|
||||||
|
|
||||||
|
struct IteratedCache {
|
||||||
|
HashmapBase *hashmap;
|
||||||
|
CacheMem keys, values;
|
||||||
|
};
|
||||||
|
|
||||||
DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
|
DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
|
||||||
DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
|
DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
|
||||||
/* No need for a separate Set pool */
|
/* No need for a separate Set pool */
|
||||||
@ -744,6 +756,25 @@ bool set_iterate(Set *s, Iterator *i, void **value) {
|
|||||||
(idx != IDX_NIL); \
|
(idx != IDX_NIL); \
|
||||||
(idx) = hashmap_iterate_entry((h), &(i)))
|
(idx) = hashmap_iterate_entry((h), &(i)))
|
||||||
|
|
||||||
|
IteratedCache *internal_hashmap_iterated_cache_new(HashmapBase *h) {
|
||||||
|
IteratedCache *cache;
|
||||||
|
|
||||||
|
assert(h);
|
||||||
|
assert(!h->cached);
|
||||||
|
|
||||||
|
if (h->cached)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
cache = new0(IteratedCache, 1);
|
||||||
|
if (!cache)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
cache->hashmap = h;
|
||||||
|
h->cached = true;
|
||||||
|
|
||||||
|
return cache;
|
||||||
|
}
|
||||||
|
|
||||||
static void reset_direct_storage(HashmapBase *h) {
|
static void reset_direct_storage(HashmapBase *h) {
|
||||||
const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
|
const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
|
||||||
void *p;
|
void *p;
|
||||||
@ -1866,3 +1897,95 @@ int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* expand the cachemem if needed, return true if newly (re)activated. */
|
||||||
|
static int cachemem_maintain(CacheMem *mem, unsigned size) {
|
||||||
|
int r = false;
|
||||||
|
|
||||||
|
assert(mem);
|
||||||
|
|
||||||
|
if (!GREEDY_REALLOC(mem->ptr, mem->n_allocated, size)) {
|
||||||
|
if (size > 0)
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!mem->active)
|
||||||
|
mem->active = r = true;
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
int iterated_cache_get(IteratedCache *cache, const void ***res_keys, const void ***res_values, unsigned *res_n_entries) {
|
||||||
|
bool sync_keys = false, sync_values = false;
|
||||||
|
unsigned size;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
assert(cache);
|
||||||
|
assert(cache->hashmap);
|
||||||
|
|
||||||
|
size = n_entries(cache->hashmap);
|
||||||
|
|
||||||
|
if (res_keys) {
|
||||||
|
r = cachemem_maintain(&cache->keys, size);
|
||||||
|
if (r < 0)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
sync_keys = r;
|
||||||
|
} else
|
||||||
|
cache->keys.active = false;
|
||||||
|
|
||||||
|
if (res_values) {
|
||||||
|
r = cachemem_maintain(&cache->values, size);
|
||||||
|
if (r < 0)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
sync_values = r;
|
||||||
|
} else
|
||||||
|
cache->values.active = false;
|
||||||
|
|
||||||
|
if (cache->hashmap->dirty) {
|
||||||
|
if (cache->keys.active)
|
||||||
|
sync_keys = true;
|
||||||
|
if (cache->values.active)
|
||||||
|
sync_values = true;
|
||||||
|
|
||||||
|
cache->hashmap->dirty = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sync_keys || sync_values) {
|
||||||
|
unsigned i, idx;
|
||||||
|
Iterator iter;
|
||||||
|
|
||||||
|
i = 0;
|
||||||
|
HASHMAP_FOREACH_IDX(idx, cache->hashmap, iter) {
|
||||||
|
struct hashmap_base_entry *e;
|
||||||
|
|
||||||
|
e = bucket_at(cache->hashmap, idx);
|
||||||
|
|
||||||
|
if (sync_keys)
|
||||||
|
cache->keys.ptr[i] = e->key;
|
||||||
|
if (sync_values)
|
||||||
|
cache->values.ptr[i] = entry_value(cache->hashmap, e);
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (res_keys)
|
||||||
|
*res_keys = cache->keys.ptr;
|
||||||
|
if (res_values)
|
||||||
|
*res_values = cache->values.ptr;
|
||||||
|
if (res_n_entries)
|
||||||
|
*res_n_entries = size;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
IteratedCache *iterated_cache_free(IteratedCache *cache) {
|
||||||
|
if (cache) {
|
||||||
|
free(cache->keys.ptr);
|
||||||
|
free(cache->values.ptr);
|
||||||
|
free(cache);
|
||||||
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
@ -53,6 +53,8 @@ typedef struct Hashmap Hashmap; /* Maps keys to values */
|
|||||||
typedef struct OrderedHashmap OrderedHashmap; /* Like Hashmap, but also remembers entry insertion order */
|
typedef struct OrderedHashmap OrderedHashmap; /* Like Hashmap, but also remembers entry insertion order */
|
||||||
typedef struct Set Set; /* Stores just keys */
|
typedef struct Set Set; /* Stores just keys */
|
||||||
|
|
||||||
|
typedef struct IteratedCache IteratedCache; /* Caches the iterated order of one of the above */
|
||||||
|
|
||||||
/* Ideally the Iterator would be an opaque struct, but it is instantiated
|
/* Ideally the Iterator would be an opaque struct, but it is instantiated
|
||||||
* by hashmap users, so the definition has to be here. Do not use its fields
|
* by hashmap users, so the definition has to be here. Do not use its fields
|
||||||
* directly. */
|
* directly. */
|
||||||
@ -126,6 +128,9 @@ static inline OrderedHashmap *ordered_hashmap_free_free_free(OrderedHashmap *h)
|
|||||||
return (void*)hashmap_free_free_free(PLAIN_HASHMAP(h));
|
return (void*)hashmap_free_free_free(PLAIN_HASHMAP(h));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
IteratedCache *iterated_cache_free(IteratedCache *cache);
|
||||||
|
int iterated_cache_get(IteratedCache *cache, const void ***res_keys, const void ***res_values, unsigned *res_n_entries);
|
||||||
|
|
||||||
HashmapBase *internal_hashmap_copy(HashmapBase *h);
|
HashmapBase *internal_hashmap_copy(HashmapBase *h);
|
||||||
static inline Hashmap *hashmap_copy(Hashmap *h) {
|
static inline Hashmap *hashmap_copy(Hashmap *h) {
|
||||||
return (Hashmap*) internal_hashmap_copy(HASHMAP_BASE(h));
|
return (Hashmap*) internal_hashmap_copy(HASHMAP_BASE(h));
|
||||||
@ -139,6 +144,14 @@ int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct h
|
|||||||
#define hashmap_ensure_allocated(h, ops) internal_hashmap_ensure_allocated(h, ops HASHMAP_DEBUG_SRC_ARGS)
|
#define hashmap_ensure_allocated(h, ops) internal_hashmap_ensure_allocated(h, ops HASHMAP_DEBUG_SRC_ARGS)
|
||||||
#define ordered_hashmap_ensure_allocated(h, ops) internal_ordered_hashmap_ensure_allocated(h, ops HASHMAP_DEBUG_SRC_ARGS)
|
#define ordered_hashmap_ensure_allocated(h, ops) internal_ordered_hashmap_ensure_allocated(h, ops HASHMAP_DEBUG_SRC_ARGS)
|
||||||
|
|
||||||
|
IteratedCache *internal_hashmap_iterated_cache_new(HashmapBase *h);
|
||||||
|
static inline IteratedCache *hashmap_iterated_cache_new(Hashmap *h) {
|
||||||
|
return (IteratedCache*) internal_hashmap_iterated_cache_new(HASHMAP_BASE(h));
|
||||||
|
}
|
||||||
|
static inline IteratedCache *ordered_hashmap_iterated_cache_new(OrderedHashmap *h) {
|
||||||
|
return (IteratedCache*) internal_hashmap_iterated_cache_new(HASHMAP_BASE(h));
|
||||||
|
}
|
||||||
|
|
||||||
int hashmap_put(Hashmap *h, const void *key, void *value);
|
int hashmap_put(Hashmap *h, const void *key, void *value);
|
||||||
static inline int ordered_hashmap_put(OrderedHashmap *h, const void *key, void *value) {
|
static inline int ordered_hashmap_put(OrderedHashmap *h, const void *key, void *value) {
|
||||||
return hashmap_put(PLAIN_HASHMAP(h), key, value);
|
return hashmap_put(PLAIN_HASHMAP(h), key, value);
|
||||||
@ -394,3 +407,7 @@ DEFINE_TRIVIAL_CLEANUP_FUNC(OrderedHashmap*, ordered_hashmap_free_free_free);
|
|||||||
#define _cleanup_ordered_hashmap_free_ _cleanup_(ordered_hashmap_freep)
|
#define _cleanup_ordered_hashmap_free_ _cleanup_(ordered_hashmap_freep)
|
||||||
#define _cleanup_ordered_hashmap_free_free_ _cleanup_(ordered_hashmap_free_freep)
|
#define _cleanup_ordered_hashmap_free_free_ _cleanup_(ordered_hashmap_free_freep)
|
||||||
#define _cleanup_ordered_hashmap_free_free_free_ _cleanup_(ordered_hashmap_free_free_freep)
|
#define _cleanup_ordered_hashmap_free_free_free_ _cleanup_(ordered_hashmap_free_free_freep)
|
||||||
|
|
||||||
|
DEFINE_TRIVIAL_CLEANUP_FUNC(IteratedCache*, iterated_cache_free);
|
||||||
|
|
||||||
|
#define _cleanup_iterated_cache_free_ _cleanup_(iterated_cache_freep)
|
||||||
|
Loading…
Reference in New Issue
Block a user