CLEANUP: pools: rename all pool functions and pointers to remove this "2"
During the migration to the second version of the pools, the new functions and pool pointers were all called "pool_something2()" and "pool2_something". Now there's no more pool v1 code and it's a real pain to still have to deal with this. Let's clean this up now by removing the "2" everywhere, and by renaming the pool heads "pool_head_something".
This commit is contained in:
parent
fbc74e8556
commit
bafbe01028
@ -192,7 +192,7 @@ list of options is :
|
||||
difficult to troubleshoot.
|
||||
|
||||
-dM[<byte>] : forces memory poisoning, which means that each and every
|
||||
memory region allocated with malloc() or pool_alloc2() will be filled with
|
||||
memory region allocated with malloc() or pool_alloc() will be filled with
|
||||
<byte> before being passed to the caller. When <byte> is not specified, it
|
||||
defaults to 0x50 ('P'). While this slightly slows down operations, it is
|
||||
useful to reliably trigger issues resulting from missing initializations in
|
||||
|
@ -48,7 +48,7 @@ struct buffer_wait {
|
||||
struct list list; /* Next element in the <buffer_wq> list */
|
||||
};
|
||||
|
||||
extern struct pool_head *pool2_buffer;
|
||||
extern struct pool_head *pool_head_buffer;
|
||||
extern struct buffer buf_empty;
|
||||
extern struct buffer buf_wanted;
|
||||
extern struct list buffer_wq;
|
||||
@ -665,9 +665,9 @@ static inline struct buffer *b_alloc(struct buffer **buf)
|
||||
struct buffer *b;
|
||||
|
||||
*buf = &buf_wanted;
|
||||
b = pool_alloc_dirty(pool2_buffer);
|
||||
b = pool_alloc_dirty(pool_head_buffer);
|
||||
if (likely(b)) {
|
||||
b->size = pool2_buffer->size - sizeof(struct buffer);
|
||||
b->size = pool_head_buffer->size - sizeof(struct buffer);
|
||||
b_reset(b);
|
||||
*buf = b;
|
||||
}
|
||||
@ -686,9 +686,9 @@ static inline struct buffer *b_alloc_fast(struct buffer **buf)
|
||||
struct buffer *b;
|
||||
|
||||
*buf = &buf_wanted;
|
||||
b = pool_get_first(pool2_buffer);
|
||||
b = pool_get_first(pool_head_buffer);
|
||||
if (likely(b)) {
|
||||
b->size = pool2_buffer->size - sizeof(struct buffer);
|
||||
b->size = pool_head_buffer->size - sizeof(struct buffer);
|
||||
b_reset(b);
|
||||
*buf = b;
|
||||
}
|
||||
@ -698,7 +698,7 @@ static inline struct buffer *b_alloc_fast(struct buffer **buf)
|
||||
/* Releases buffer *buf (no check of emptiness) */
|
||||
static inline void __b_drop(struct buffer **buf)
|
||||
{
|
||||
pool_free2(pool2_buffer, *buf);
|
||||
pool_free(pool_head_buffer, *buf);
|
||||
}
|
||||
|
||||
/* Releases buffer *buf if allocated. */
|
||||
@ -735,14 +735,14 @@ static inline struct buffer *b_alloc_margin(struct buffer **buf, int margin)
|
||||
return *buf;
|
||||
|
||||
*buf = &buf_wanted;
|
||||
HA_SPIN_LOCK(POOL_LOCK, &pool2_buffer->lock);
|
||||
HA_SPIN_LOCK(POOL_LOCK, &pool_head_buffer->lock);
|
||||
|
||||
/* fast path */
|
||||
if ((pool2_buffer->allocated - pool2_buffer->used) > margin) {
|
||||
b = __pool_get_first(pool2_buffer);
|
||||
if ((pool_head_buffer->allocated - pool_head_buffer->used) > margin) {
|
||||
b = __pool_get_first(pool_head_buffer);
|
||||
if (likely(b)) {
|
||||
HA_SPIN_UNLOCK(POOL_LOCK, &pool2_buffer->lock);
|
||||
b->size = pool2_buffer->size - sizeof(struct buffer);
|
||||
HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
|
||||
b->size = pool_head_buffer->size - sizeof(struct buffer);
|
||||
b_reset(b);
|
||||
*buf = b;
|
||||
return b;
|
||||
@ -750,12 +750,12 @@ static inline struct buffer *b_alloc_margin(struct buffer **buf, int margin)
|
||||
}
|
||||
|
||||
/* slow path, uses malloc() */
|
||||
b = __pool_refill_alloc(pool2_buffer, margin);
|
||||
b = __pool_refill_alloc(pool_head_buffer, margin);
|
||||
|
||||
HA_SPIN_UNLOCK(POOL_LOCK, &pool2_buffer->lock);
|
||||
HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
|
||||
|
||||
if (b) {
|
||||
b->size = pool2_buffer->size - sizeof(struct buffer);
|
||||
b->size = pool_head_buffer->size - sizeof(struct buffer);
|
||||
b_reset(b);
|
||||
*buf = b;
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ struct chunk {
|
||||
int len; /* current size of the string from first to last char. <0 = uninit. */
|
||||
};
|
||||
|
||||
struct pool_head *pool2_trash;
|
||||
struct pool_head *pool_head_trash;
|
||||
|
||||
/* function prototypes */
|
||||
|
||||
@ -60,7 +60,7 @@ void deinit_trash_buffers(void);
|
||||
*/
|
||||
static inline void free_trash_chunk(struct chunk *chunk)
|
||||
{
|
||||
pool_free2(pool2_trash, chunk);
|
||||
pool_free(pool_head_trash, chunk);
|
||||
}
|
||||
|
||||
|
||||
|
@ -93,23 +93,23 @@ unsigned long pool_total_used();
|
||||
/*
|
||||
* This function frees whatever can be freed in pool <pool>.
|
||||
*/
|
||||
void pool_flush2(struct pool_head *pool);
|
||||
void pool_flush(struct pool_head *pool);
|
||||
|
||||
/*
|
||||
* This function frees whatever can be freed in all pools, but respecting
|
||||
* the minimum thresholds imposed by owners.
|
||||
*
|
||||
* <pool_ctx> is used when pool_gc2 is called to release resources to allocate
|
||||
* <pool_ctx> is used when pool_gc is called to release resources to allocate
|
||||
* an element in __pool_refill_alloc. It is important because <pool_ctx> is
|
||||
* already locked, so we need to skip the lock here.
|
||||
*/
|
||||
void pool_gc2(struct pool_head *pool_ctx);
|
||||
void pool_gc(struct pool_head *pool_ctx);
|
||||
|
||||
/*
|
||||
* This function destroys a pull by freeing it completely.
|
||||
* This should be called only under extreme circumstances.
|
||||
*/
|
||||
void *pool_destroy2(struct pool_head *pool);
|
||||
void *pool_destroy(struct pool_head *pool);
|
||||
|
||||
/*
|
||||
* Returns a pointer to type <type> taken from the pool <pool_type> if
|
||||
@ -209,7 +209,7 @@ static inline void pool_free_area(void *area, size_t size)
|
||||
* dynamically allocated. In the first case, <pool_type> is updated to point to
|
||||
* the next element in the list. Memory poisonning is performed if enabled.
|
||||
*/
|
||||
static inline void *pool_alloc2(struct pool_head *pool)
|
||||
static inline void *pool_alloc(struct pool_head *pool)
|
||||
{
|
||||
void *p;
|
||||
|
||||
@ -238,7 +238,7 @@ static inline void *pool_alloc2(struct pool_head *pool)
|
||||
* pointer. Just like with the libc's free(), nothing
|
||||
* is done if <ptr> is NULL.
|
||||
*/
|
||||
static inline void pool_free2(struct pool_head *pool, void *ptr)
|
||||
static inline void pool_free(struct pool_head *pool, void *ptr)
|
||||
{
|
||||
if (likely(ptr != NULL)) {
|
||||
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
|
||||
|
@ -55,14 +55,14 @@ static inline void appctx_init(struct appctx *appctx, unsigned long thread_mask)
|
||||
|
||||
/* Tries to allocate a new appctx and initialize its main fields. The appctx
|
||||
* is returned on success, NULL on failure. The appctx must be released using
|
||||
* pool_free2(connection) or appctx_free(), since it's allocated from the
|
||||
* pool_free(connection) or appctx_free(), since it's allocated from the
|
||||
* connection pool. <applet> is assigned as the applet, but it can be NULL.
|
||||
*/
|
||||
static inline struct appctx *appctx_new(struct applet *applet, unsigned long thread_mask)
|
||||
{
|
||||
struct appctx *appctx;
|
||||
|
||||
appctx = pool_alloc2(pool2_connection);
|
||||
appctx = pool_alloc(pool_head_connection);
|
||||
if (likely(appctx != NULL)) {
|
||||
appctx->obj_type = OBJ_TYPE_APPCTX;
|
||||
appctx->applet = applet;
|
||||
@ -93,7 +93,7 @@ static inline void __appctx_free(struct appctx *appctx)
|
||||
HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
}
|
||||
|
||||
pool_free2(pool2_connection, appctx);
|
||||
pool_free(pool_head_connection, appctx);
|
||||
HA_ATOMIC_SUB(&nb_applets, 1);
|
||||
}
|
||||
static inline void appctx_free(struct appctx *appctx)
|
||||
|
@ -30,8 +30,8 @@
|
||||
#include <proto/fd.h>
|
||||
#include <proto/obj_type.h>
|
||||
|
||||
extern struct pool_head *pool2_connection;
|
||||
extern struct pool_head *pool2_connstream;
|
||||
extern struct pool_head *pool_head_connection;
|
||||
extern struct pool_head *pool_head_connstream;
|
||||
extern struct xprt_ops *registered_xprt[XPRT_ENTRIES];
|
||||
extern struct alpn_mux_list alpn_mux_list;
|
||||
|
||||
@ -642,13 +642,13 @@ static inline void conn_clear_xprt_done_cb(struct connection *conn)
|
||||
|
||||
/* Tries to allocate a new connection and initialized its main fields. The
|
||||
* connection is returned on success, NULL on failure. The connection must
|
||||
* be released using pool_free2() or conn_free().
|
||||
* be released using pool_free() or conn_free().
|
||||
*/
|
||||
static inline struct connection *conn_new()
|
||||
{
|
||||
struct connection *conn;
|
||||
|
||||
conn = pool_alloc2(pool2_connection);
|
||||
conn = pool_alloc(pool_head_connection);
|
||||
if (likely(conn != NULL))
|
||||
conn_init(conn);
|
||||
return conn;
|
||||
@ -657,13 +657,13 @@ static inline struct connection *conn_new()
|
||||
/* Releases a conn_stream previously allocated by cs_new() */
|
||||
static inline void cs_free(struct conn_stream *cs)
|
||||
{
|
||||
pool_free2(pool2_connstream, cs);
|
||||
pool_free(pool_head_connstream, cs);
|
||||
}
|
||||
|
||||
/* Tries to allocate a new conn_stream and initialize its main fields. If
|
||||
* <conn> is NULL, then a new connection is allocated on the fly, initialized,
|
||||
* and assigned to cs->conn ; this connection will then have to be released
|
||||
* using pool_free2() or conn_free(). The conn_stream is initialized and added
|
||||
* using pool_free() or conn_free(). The conn_stream is initialized and added
|
||||
* to the mux's stream list on success, then returned. On failure, nothing is
|
||||
* allocated and NULL is returned.
|
||||
*/
|
||||
@ -671,7 +671,7 @@ static inline struct conn_stream *cs_new(struct connection *conn)
|
||||
{
|
||||
struct conn_stream *cs;
|
||||
|
||||
cs = pool_alloc2(pool2_connstream);
|
||||
cs = pool_alloc(pool_head_connstream);
|
||||
if (!likely(cs))
|
||||
return NULL;
|
||||
|
||||
@ -691,7 +691,7 @@ static inline struct conn_stream *cs_new(struct connection *conn)
|
||||
/* Releases a connection previously allocated by conn_new() */
|
||||
static inline void conn_free(struct connection *conn)
|
||||
{
|
||||
pool_free2(pool2_connection, conn);
|
||||
pool_free(pool_head_connection, conn);
|
||||
}
|
||||
|
||||
/* Release a conn_stream, and kill the connection if it was the last one */
|
||||
|
@ -93,7 +93,7 @@
|
||||
FLT_STRM_DATA_CB_IMPL_2(strm, chn, call, ##__VA_ARGS__), \
|
||||
FLT_STRM_DATA_CB_IMPL_1(strm, chn, call, ##__VA_ARGS__))
|
||||
|
||||
extern struct pool_head *pool2_filter;
|
||||
extern struct pool_head *pool_head_filter;
|
||||
|
||||
void flt_deinit(struct proxy *p);
|
||||
int flt_check(struct proxy *p);
|
||||
|
@ -25,7 +25,7 @@
|
||||
#include <common/config.h>
|
||||
#include <types/hdr_idx.h>
|
||||
|
||||
extern struct pool_head *pool2_hdr_idx;
|
||||
extern struct pool_head *pool_head_hdr_idx;
|
||||
|
||||
/*
|
||||
* Initialize the list pointers.
|
||||
|
@ -34,8 +34,8 @@
|
||||
#include <types/proxy.h>
|
||||
#include <types/stream.h>
|
||||
|
||||
extern struct pool_head *pool2_requri;
|
||||
extern struct pool_head *pool2_uniqueid;
|
||||
extern struct pool_head *pool_head_requri;
|
||||
extern struct pool_head *pool_head_uniqueid;
|
||||
|
||||
extern char *log_format;
|
||||
extern char default_tcp_log_format[];
|
||||
|
@ -34,7 +34,7 @@
|
||||
|
||||
#include <proto/backend.h>
|
||||
|
||||
extern struct pool_head *pool2_pendconn;
|
||||
extern struct pool_head *pool_head_pendconn;
|
||||
|
||||
int init_pendconn();
|
||||
struct pendconn *pendconn_add(struct stream *strm);
|
||||
|
@ -32,7 +32,7 @@
|
||||
|
||||
#include <proto/stick_table.h>
|
||||
|
||||
extern struct pool_head *pool2_session;
|
||||
extern struct pool_head *pool_head_session;
|
||||
struct session *session_new(struct proxy *fe, struct listener *li, enum obj_type *origin);
|
||||
void session_free(struct session *sess);
|
||||
int init_session();
|
||||
|
@ -20,7 +20,7 @@
|
||||
|
||||
extern int signal_queue_len;
|
||||
extern struct signal_descriptor signal_state[];
|
||||
extern struct pool_head *pool2_sig_handlers;
|
||||
extern struct pool_head *pool_head_sig_handlers;
|
||||
|
||||
__decl_hathreads(extern HA_SPINLOCK_T signals_lock);
|
||||
|
||||
|
@ -30,7 +30,7 @@
|
||||
#include <proto/stick_table.h>
|
||||
#include <proto/task.h>
|
||||
|
||||
extern struct pool_head *pool2_stream;
|
||||
extern struct pool_head *pool_head_stream;
|
||||
extern struct list streams;
|
||||
|
||||
extern struct data_cb sess_conn_cb;
|
||||
|
@ -88,8 +88,8 @@ extern unsigned int tasks_run_queue; /* run queue size */
|
||||
extern unsigned int tasks_run_queue_cur;
|
||||
extern unsigned int nb_tasks_cur;
|
||||
extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
|
||||
extern struct pool_head *pool2_task;
|
||||
extern struct pool_head *pool2_notification;
|
||||
extern struct pool_head *pool_head_task;
|
||||
extern struct pool_head *pool_head_notification;
|
||||
|
||||
__decl_hathreads(extern HA_SPINLOCK_T rq_lock); /* spin lock related to run queue */
|
||||
__decl_hathreads(extern HA_SPINLOCK_T wq_lock); /* spin lock related to wait queue */
|
||||
@ -218,7 +218,7 @@ static inline struct task *task_init(struct task *t, unsigned long thread_mask)
|
||||
*/
|
||||
static inline struct task *task_new(unsigned long thread_mask)
|
||||
{
|
||||
struct task *t = pool_alloc2(pool2_task);
|
||||
struct task *t = pool_alloc(pool_head_task);
|
||||
if (t) {
|
||||
HA_ATOMIC_ADD(&nb_tasks, 1);
|
||||
task_init(t, thread_mask);
|
||||
@ -232,9 +232,9 @@ static inline struct task *task_new(unsigned long thread_mask)
|
||||
*/
|
||||
static inline void task_free(struct task *t)
|
||||
{
|
||||
pool_free2(pool2_task, t);
|
||||
pool_free(pool_head_task, t);
|
||||
if (unlikely(stopping))
|
||||
pool_flush2(pool2_task);
|
||||
pool_flush(pool_head_task);
|
||||
HA_ATOMIC_SUB(&nb_tasks, 1);
|
||||
}
|
||||
|
||||
@ -291,7 +291,7 @@ static inline void task_schedule(struct task *task, int when)
|
||||
*/
|
||||
static inline struct notification *notification_new(struct list *purge, struct list *event, struct task *wakeup)
|
||||
{
|
||||
struct notification *com = pool_alloc2(pool2_notification);
|
||||
struct notification *com = pool_alloc(pool_head_notification);
|
||||
if (!com)
|
||||
return NULL;
|
||||
LIST_ADDQ(purge, &com->purge_me);
|
||||
@ -315,7 +315,7 @@ static inline void notification_purge(struct list *purge)
|
||||
LIST_DEL(&com->purge_me);
|
||||
if (!com->task) {
|
||||
HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
|
||||
pool_free2(pool2_notification, com);
|
||||
pool_free(pool_head_notification, com);
|
||||
continue;
|
||||
}
|
||||
com->task = NULL;
|
||||
@ -337,7 +337,7 @@ static inline void notification_wake(struct list *wake)
|
||||
LIST_DEL(&com->wake_me);
|
||||
if (!com->task) {
|
||||
HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
|
||||
pool_free2(pool2_notification, com);
|
||||
pool_free(pool_head_notification, com);
|
||||
continue;
|
||||
}
|
||||
task_wakeup(com->task, TASK_WOKEN_MSG);
|
||||
|
@ -34,7 +34,7 @@ struct cap_hdr {
|
||||
struct pool_head *pool; /* pool of pre-allocated memory area of (len+1) bytes */
|
||||
};
|
||||
|
||||
extern struct pool_head *pool2_capture;
|
||||
extern struct pool_head *pool_head_capture;
|
||||
|
||||
#endif /* _TYPES_CAPTURE_H */
|
||||
|
||||
|
@ -357,7 +357,7 @@ extern struct action_kw_list http_res_keywords;
|
||||
|
||||
extern const struct http_method_name http_known_methods[HTTP_METH_OTHER];
|
||||
|
||||
extern struct pool_head *pool2_http_txn;
|
||||
extern struct pool_head *pool_head_http_txn;
|
||||
|
||||
#endif /* _TYPES_PROTO_HTTP_H */
|
||||
|
||||
|
18
src/buffer.c
18
src/buffer.c
@ -20,7 +20,7 @@
|
||||
|
||||
#include <types/global.h>
|
||||
|
||||
struct pool_head *pool2_buffer;
|
||||
struct pool_head *pool_head_buffer;
|
||||
|
||||
/* These buffers are used to always have a valid pointer to an empty buffer in
|
||||
* channels. The first buffer is set once a buffer is empty. The second one is
|
||||
@ -58,8 +58,8 @@ int init_buffer()
|
||||
{
|
||||
void *buffer;
|
||||
|
||||
pool2_buffer = create_pool("buffer", sizeof (struct buffer) + global.tune.bufsize, MEM_F_SHARED|MEM_F_EXACT);
|
||||
if (!pool2_buffer)
|
||||
pool_head_buffer = create_pool("buffer", sizeof (struct buffer) + global.tune.bufsize, MEM_F_SHARED|MEM_F_EXACT);
|
||||
if (!pool_head_buffer)
|
||||
return 0;
|
||||
|
||||
/* The reserved buffer is what we leave behind us. Thus we always need
|
||||
@ -69,17 +69,17 @@ int init_buffer()
|
||||
* (2 for current session, one for next session that might be needed to
|
||||
* release a server connection).
|
||||
*/
|
||||
pool2_buffer->minavail = MAX(global.tune.reserved_bufs, 3);
|
||||
pool_head_buffer->minavail = MAX(global.tune.reserved_bufs, 3);
|
||||
if (global.tune.buf_limit)
|
||||
pool2_buffer->limit = global.tune.buf_limit;
|
||||
pool_head_buffer->limit = global.tune.buf_limit;
|
||||
|
||||
HA_SPIN_INIT(&buffer_wq_lock);
|
||||
|
||||
buffer = pool_refill_alloc(pool2_buffer, pool2_buffer->minavail - 1);
|
||||
buffer = pool_refill_alloc(pool_head_buffer, pool_head_buffer->minavail - 1);
|
||||
if (!buffer)
|
||||
return 0;
|
||||
|
||||
pool_free2(pool2_buffer, buffer);
|
||||
pool_free(pool_head_buffer, buffer);
|
||||
|
||||
hap_register_per_thread_init(init_buffer_per_thread);
|
||||
hap_register_per_thread_deinit(deinit_buffer_per_thread);
|
||||
@ -88,7 +88,7 @@ int init_buffer()
|
||||
|
||||
void deinit_buffer()
|
||||
{
|
||||
pool_destroy2(pool2_buffer);
|
||||
pool_destroy(pool_head_buffer);
|
||||
}
|
||||
|
||||
/* This function writes the string <str> at position <pos> which must be in
|
||||
@ -261,7 +261,7 @@ void __offer_buffer(void *from, unsigned int threshold)
|
||||
* allocated, and in any case at least one task per two reserved
|
||||
* buffers.
|
||||
*/
|
||||
avail = pool2_buffer->allocated - pool2_buffer->used - global.tune.reserved_bufs / 2;
|
||||
avail = pool_head_buffer->allocated - pool_head_buffer->used - global.tune.reserved_bufs / 2;
|
||||
|
||||
list_for_each_entry_safe(wait, bak, &buffer_wq, list) {
|
||||
if (avail <= threshold)
|
||||
|
14
src/cache.c
14
src/cache.c
@ -38,7 +38,7 @@
|
||||
|
||||
static const char *cache_store_flt_id = "cache store filter";
|
||||
|
||||
static struct pool_head *pool2_cache_st = NULL;
|
||||
static struct pool_head *pool_head_cache_st = NULL;
|
||||
|
||||
struct applet http_cache_applet;
|
||||
|
||||
@ -125,7 +125,7 @@ cache_store_chn_start_analyze(struct stream *s, struct filter *filter, struct ch
|
||||
if (filter->ctx == NULL) {
|
||||
struct cache_st *st;
|
||||
|
||||
st = pool_alloc_dirty(pool2_cache_st);
|
||||
st = pool_alloc_dirty(pool_head_cache_st);
|
||||
if (st == NULL)
|
||||
return -1;
|
||||
|
||||
@ -160,7 +160,7 @@ cache_store_chn_end_analyze(struct stream *s, struct filter *filter, struct chan
|
||||
|
||||
}
|
||||
if (st) {
|
||||
pool_free2(pool2_cache_st, st);
|
||||
pool_free(pool_head_cache_st, st);
|
||||
filter->ctx = NULL;
|
||||
}
|
||||
|
||||
@ -218,7 +218,7 @@ cache_store_http_forward_data(struct stream *s, struct filter *filter,
|
||||
shctx_row_dec_hot(shctx, st->first_block);
|
||||
object->eb.key = 0;
|
||||
shctx_unlock(shctx);
|
||||
pool_free2(pool2_cache_st, st);
|
||||
pool_free(pool_head_cache_st, st);
|
||||
} else {
|
||||
/* Skip remaining headers to fill the cache */
|
||||
b_adv(msg->chn->buf, st->hdrs_len);
|
||||
@ -271,7 +271,7 @@ cache_store_http_end(struct stream *s, struct filter *filter,
|
||||
|
||||
}
|
||||
if (st) {
|
||||
pool_free2(pool2_cache_st, st);
|
||||
pool_free(pool_head_cache_st, st);
|
||||
filter->ctx = NULL;
|
||||
}
|
||||
|
||||
@ -488,7 +488,7 @@ enum act_return http_action_store_cache(struct act_rule *rule, struct proxy *px,
|
||||
shctx_unlock(shctx);
|
||||
if (filter->ctx) {
|
||||
object->eb.key = 0;
|
||||
pool_free2(pool2_cache_st, filter->ctx);
|
||||
pool_free(pool_head_cache_st, filter->ctx);
|
||||
filter->ctx = NULL;
|
||||
}
|
||||
goto out;
|
||||
@ -1031,6 +1031,6 @@ static void __cache_init(void)
|
||||
cli_register_kw(&cli_kws);
|
||||
http_res_keywords_register(&http_res_actions);
|
||||
http_req_keywords_register(&http_req_actions);
|
||||
pool2_cache_st = create_pool("cache_st", sizeof(struct cache_st), MEM_F_SHARED);
|
||||
pool_head_cache_st = create_pool("cache_st", sizeof(struct cache_st), MEM_F_SHARED);
|
||||
}
|
||||
|
||||
|
@ -7535,9 +7535,9 @@ int check_config_validity()
|
||||
if (!global.tune.requri_len)
|
||||
global.tune.requri_len = REQURI_LEN;
|
||||
|
||||
pool2_requri = create_pool("requri", global.tune.requri_len , MEM_F_SHARED);
|
||||
pool_head_requri = create_pool("requri", global.tune.requri_len , MEM_F_SHARED);
|
||||
|
||||
pool2_capture = create_pool("capture", global.tune.cookie_len, MEM_F_SHARED);
|
||||
pool_head_capture = create_pool("capture", global.tune.cookie_len, MEM_F_SHARED);
|
||||
|
||||
/* Post initialisation of the users and groups lists. */
|
||||
err_code = userlist_postinit();
|
||||
@ -9153,7 +9153,7 @@ out_uri_auth_compat:
|
||||
curproxy->server_state_file_name = strdup(curproxy->id);
|
||||
}
|
||||
|
||||
pool2_hdr_idx = create_pool("hdr_idx",
|
||||
pool_head_hdr_idx = create_pool("hdr_idx",
|
||||
global.tune.max_http_hdr * sizeof(struct hdr_idx_elem),
|
||||
MEM_F_SHARED);
|
||||
|
||||
|
36
src/checks.c
36
src/checks.c
@ -70,8 +70,8 @@ static int tcpcheck_get_step_id(struct check *);
|
||||
static char * tcpcheck_get_step_comment(struct check *, int);
|
||||
static int tcpcheck_main(struct check *);
|
||||
|
||||
static struct pool_head *pool2_email_alert = NULL;
|
||||
static struct pool_head *pool2_tcpcheck_rule = NULL;
|
||||
static struct pool_head *pool_head_email_alert = NULL;
|
||||
static struct pool_head *pool_head_tcpcheck_rule = NULL;
|
||||
|
||||
|
||||
static const struct check_status check_statuses[HCHK_STATUS_SIZE] = {
|
||||
@ -1612,7 +1612,7 @@ static int connect_conn_chk(struct task *t)
|
||||
}
|
||||
|
||||
static struct list pid_list = LIST_HEAD_INIT(pid_list);
|
||||
static struct pool_head *pool2_pid_list;
|
||||
static struct pool_head *pool_head_pid_list;
|
||||
__decl_hathreads(HA_SPINLOCK_T pid_list_lock);
|
||||
|
||||
void block_sigchld(void)
|
||||
@ -1636,7 +1636,7 @@ static struct pid_list *pid_list_add(pid_t pid, struct task *t)
|
||||
struct pid_list *elem;
|
||||
struct check *check = t->context;
|
||||
|
||||
elem = pool_alloc2(pool2_pid_list);
|
||||
elem = pool_alloc(pool_head_pid_list);
|
||||
if (!elem)
|
||||
return NULL;
|
||||
elem->pid = pid;
|
||||
@ -1668,7 +1668,7 @@ static void pid_list_del(struct pid_list *elem)
|
||||
|
||||
check = elem->t->context;
|
||||
check->curpid = NULL;
|
||||
pool_free2(pool2_pid_list, elem);
|
||||
pool_free(pool_head_pid_list, elem);
|
||||
}
|
||||
|
||||
/* Called from inside SIGCHLD handler, SIGCHLD is blocked */
|
||||
@ -1700,7 +1700,7 @@ static void sigchld_handler(struct sig_handler *sh)
|
||||
|
||||
static int init_pid_list(void)
|
||||
{
|
||||
if (pool2_pid_list != NULL)
|
||||
if (pool_head_pid_list != NULL)
|
||||
/* Nothing to do */
|
||||
return 0;
|
||||
|
||||
@ -1710,8 +1710,8 @@ static int init_pid_list(void)
|
||||
return 1;
|
||||
}
|
||||
|
||||
pool2_pid_list = create_pool("pid_list", sizeof(struct pid_list), MEM_F_SHARED);
|
||||
if (pool2_pid_list == NULL) {
|
||||
pool_head_pid_list = create_pool("pid_list", sizeof(struct pid_list), MEM_F_SHARED);
|
||||
if (pool_head_pid_list == NULL) {
|
||||
ha_alert("Failed to allocate memory pool for external health checks: %s. Aborting.\n",
|
||||
strerror(errno));
|
||||
return 1;
|
||||
@ -3122,9 +3122,9 @@ void email_alert_free(struct email_alert *alert)
|
||||
free(rule->string);
|
||||
if (rule->expect_regex)
|
||||
regex_free(rule->expect_regex);
|
||||
pool_free2(pool2_tcpcheck_rule, rule);
|
||||
pool_free(pool_head_tcpcheck_rule, rule);
|
||||
}
|
||||
pool_free2(pool2_email_alert, alert);
|
||||
pool_free(pool_head_email_alert, alert);
|
||||
}
|
||||
|
||||
static struct task *process_email_alert(struct task *t)
|
||||
@ -3250,7 +3250,7 @@ static int add_tcpcheck_expect_str(struct list *list, const char *str)
|
||||
{
|
||||
struct tcpcheck_rule *tcpcheck;
|
||||
|
||||
if ((tcpcheck = pool_alloc2(pool2_tcpcheck_rule)) == NULL)
|
||||
if ((tcpcheck = pool_alloc(pool_head_tcpcheck_rule)) == NULL)
|
||||
return 0;
|
||||
memset(tcpcheck, 0, sizeof(*tcpcheck));
|
||||
tcpcheck->action = TCPCHK_ACT_EXPECT;
|
||||
@ -3258,7 +3258,7 @@ static int add_tcpcheck_expect_str(struct list *list, const char *str)
|
||||
tcpcheck->expect_regex = NULL;
|
||||
tcpcheck->comment = NULL;
|
||||
if (!tcpcheck->string) {
|
||||
pool_free2(pool2_tcpcheck_rule, tcpcheck);
|
||||
pool_free(pool_head_tcpcheck_rule, tcpcheck);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3273,7 +3273,7 @@ static int add_tcpcheck_send_strs(struct list *list, const char * const *strs)
|
||||
char *dst;
|
||||
int i;
|
||||
|
||||
if ((tcpcheck = pool_alloc2(pool2_tcpcheck_rule)) == NULL)
|
||||
if ((tcpcheck = pool_alloc(pool_head_tcpcheck_rule)) == NULL)
|
||||
return 0;
|
||||
memset(tcpcheck, 0, sizeof(*tcpcheck));
|
||||
tcpcheck->action = TCPCHK_ACT_SEND;
|
||||
@ -3285,7 +3285,7 @@ static int add_tcpcheck_send_strs(struct list *list, const char * const *strs)
|
||||
|
||||
tcpcheck->string = malloc(tcpcheck->string_len + 1);
|
||||
if (!tcpcheck->string) {
|
||||
pool_free2(pool2_tcpcheck_rule, tcpcheck);
|
||||
pool_free(pool_head_tcpcheck_rule, tcpcheck);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3305,13 +3305,13 @@ static int enqueue_one_email_alert(struct proxy *p, struct server *s,
|
||||
struct tcpcheck_rule *tcpcheck;
|
||||
struct check *check = &q->check;
|
||||
|
||||
if ((alert = pool_alloc2(pool2_email_alert)) == NULL)
|
||||
if ((alert = pool_alloc(pool_head_email_alert)) == NULL)
|
||||
goto error;
|
||||
LIST_INIT(&alert->list);
|
||||
LIST_INIT(&alert->tcpcheck_rules);
|
||||
alert->srv = s;
|
||||
|
||||
if ((tcpcheck = pool_alloc2(pool2_tcpcheck_rule)) == NULL)
|
||||
if ((tcpcheck = pool_alloc(pool_head_tcpcheck_rule)) == NULL)
|
||||
goto error;
|
||||
memset(tcpcheck, 0, sizeof(*tcpcheck));
|
||||
tcpcheck->action = TCPCHK_ACT_CONNECT;
|
||||
@ -3499,8 +3499,8 @@ static void __check_init(void)
|
||||
{
|
||||
hap_register_post_check(start_checks);
|
||||
|
||||
pool2_email_alert = create_pool("email_alert", sizeof(struct email_alert), MEM_F_SHARED);
|
||||
pool2_tcpcheck_rule = create_pool("tcpcheck_rule", sizeof(struct tcpcheck_rule), MEM_F_SHARED);
|
||||
pool_head_email_alert = create_pool("email_alert", sizeof(struct email_alert), MEM_F_SHARED);
|
||||
pool_head_tcpcheck_rule = create_pool("tcpcheck_rule", sizeof(struct tcpcheck_rule), MEM_F_SHARED);
|
||||
}
|
||||
|
||||
|
||||
|
14
src/chunk.c
14
src/chunk.c
@ -32,7 +32,7 @@ static THREAD_LOCAL char *trash_buf1;
|
||||
static THREAD_LOCAL char *trash_buf2;
|
||||
|
||||
/* the trash pool for reentrant allocations */
|
||||
struct pool_head *pool2_trash = NULL;
|
||||
struct pool_head *pool_head_trash = NULL;
|
||||
|
||||
/* this is used to drain data, and as a temporary buffer for sprintf()... */
|
||||
THREAD_LOCAL struct chunk trash = { .str = NULL };
|
||||
@ -96,9 +96,9 @@ int init_trash_buffers(int first)
|
||||
hap_register_per_thread_init(init_trash_buffers_per_thread);
|
||||
hap_register_per_thread_deinit(deinit_trash_buffers_per_thread);
|
||||
}
|
||||
pool_destroy2(pool2_trash);
|
||||
pool2_trash = create_pool("trash", sizeof(struct chunk) + global.tune.bufsize, MEM_F_EXACT);
|
||||
if (!pool2_trash || !alloc_trash_buffers(global.tune.bufsize))
|
||||
pool_destroy(pool_head_trash);
|
||||
pool_head_trash = create_pool("trash", sizeof(struct chunk) + global.tune.bufsize, MEM_F_EXACT);
|
||||
if (!pool_head_trash || !alloc_trash_buffers(global.tune.bufsize))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
@ -108,7 +108,7 @@ int init_trash_buffers(int first)
|
||||
*/
|
||||
void deinit_trash_buffers(void)
|
||||
{
|
||||
pool_destroy2(pool2_trash);
|
||||
pool_destroy(pool_head_trash);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -121,11 +121,11 @@ struct chunk *alloc_trash_chunk(void)
|
||||
{
|
||||
struct chunk *chunk;
|
||||
|
||||
chunk = pool_alloc2(pool2_trash);
|
||||
chunk = pool_alloc(pool_head_trash);
|
||||
if (chunk) {
|
||||
char *buf = (char *)chunk + sizeof(struct chunk);
|
||||
*buf = 0;
|
||||
chunk_init(chunk, buf, pool2_trash->size - sizeof(struct chunk));
|
||||
chunk_init(chunk, buf, pool_head_trash->size - sizeof(struct chunk));
|
||||
}
|
||||
return chunk;
|
||||
}
|
||||
|
@ -166,7 +166,7 @@ static inline int init_comp_ctx(struct comp_ctx **comp_ctx)
|
||||
HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
}
|
||||
|
||||
*comp_ctx = pool_alloc2(pool_comp_ctx);
|
||||
*comp_ctx = pool_alloc(pool_comp_ctx);
|
||||
if (*comp_ctx == NULL)
|
||||
return -1;
|
||||
#if defined(USE_SLZ)
|
||||
@ -192,7 +192,7 @@ static inline int deinit_comp_ctx(struct comp_ctx **comp_ctx)
|
||||
if (!*comp_ctx)
|
||||
return 0;
|
||||
|
||||
pool_free2(pool_comp_ctx, *comp_ctx);
|
||||
pool_free(pool_comp_ctx, *comp_ctx);
|
||||
*comp_ctx = NULL;
|
||||
|
||||
#ifdef USE_ZLIB
|
||||
@ -418,7 +418,7 @@ static void *alloc_zlib(void *opaque, unsigned int items, unsigned int size)
|
||||
HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
}
|
||||
pool = zlib_pool_deflate_state;
|
||||
ctx->zlib_deflate_state = buf = pool_alloc2(pool);
|
||||
ctx->zlib_deflate_state = buf = pool_alloc(pool);
|
||||
break;
|
||||
|
||||
case 1:
|
||||
@ -429,7 +429,7 @@ static void *alloc_zlib(void *opaque, unsigned int items, unsigned int size)
|
||||
HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
}
|
||||
pool = zlib_pool_window;
|
||||
ctx->zlib_window = buf = pool_alloc2(pool);
|
||||
ctx->zlib_window = buf = pool_alloc(pool);
|
||||
break;
|
||||
|
||||
case 2:
|
||||
@ -440,7 +440,7 @@ static void *alloc_zlib(void *opaque, unsigned int items, unsigned int size)
|
||||
HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
}
|
||||
pool = zlib_pool_prev;
|
||||
ctx->zlib_prev = buf = pool_alloc2(pool);
|
||||
ctx->zlib_prev = buf = pool_alloc(pool);
|
||||
break;
|
||||
|
||||
case 3:
|
||||
@ -451,7 +451,7 @@ static void *alloc_zlib(void *opaque, unsigned int items, unsigned int size)
|
||||
HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
}
|
||||
pool = zlib_pool_head;
|
||||
ctx->zlib_head = buf = pool_alloc2(pool);
|
||||
ctx->zlib_head = buf = pool_alloc(pool);
|
||||
break;
|
||||
|
||||
case 4:
|
||||
@ -462,7 +462,7 @@ static void *alloc_zlib(void *opaque, unsigned int items, unsigned int size)
|
||||
HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
}
|
||||
pool = zlib_pool_pending_buf;
|
||||
ctx->zlib_pending_buf = buf = pool_alloc2(pool);
|
||||
ctx->zlib_pending_buf = buf = pool_alloc(pool);
|
||||
break;
|
||||
}
|
||||
if (buf != NULL)
|
||||
@ -496,7 +496,7 @@ static void free_zlib(void *opaque, void *ptr)
|
||||
else if (ptr == ctx->zlib_pending_buf)
|
||||
pool = zlib_pool_pending_buf;
|
||||
|
||||
pool_free2(pool, ptr);
|
||||
pool_free(pool, ptr);
|
||||
HA_ATOMIC_SUB(&zlib_used_memory, pool->size);
|
||||
}
|
||||
|
||||
|
@ -27,8 +27,8 @@
|
||||
#include <proto/ssl_sock.h>
|
||||
#endif
|
||||
|
||||
struct pool_head *pool2_connection;
|
||||
struct pool_head *pool2_connstream;
|
||||
struct pool_head *pool_head_connection;
|
||||
struct pool_head *pool_head_connstream;
|
||||
struct xprt_ops *registered_xprt[XPRT_ENTRIES] = { NULL, };
|
||||
|
||||
/* List head of all known muxes for ALPN */
|
||||
@ -39,18 +39,18 @@ struct alpn_mux_list alpn_mux_list = {
|
||||
/* perform minimal intializations, report 0 in case of error, 1 if OK. */
|
||||
int init_connection()
|
||||
{
|
||||
pool2_connection = create_pool("connection", sizeof (struct connection), MEM_F_SHARED);
|
||||
if (!pool2_connection)
|
||||
pool_head_connection = create_pool("connection", sizeof (struct connection), MEM_F_SHARED);
|
||||
if (!pool_head_connection)
|
||||
goto fail_conn;
|
||||
|
||||
pool2_connstream = create_pool("conn_stream", sizeof(struct conn_stream), MEM_F_SHARED);
|
||||
if (!pool2_connstream)
|
||||
pool_head_connstream = create_pool("conn_stream", sizeof(struct conn_stream), MEM_F_SHARED);
|
||||
if (!pool_head_connstream)
|
||||
goto fail_cs;
|
||||
|
||||
return 1;
|
||||
fail_cs:
|
||||
pool_destroy2(pool2_connection);
|
||||
pool2_connection = NULL;
|
||||
pool_destroy(pool_head_connection);
|
||||
pool_head_connection = NULL;
|
||||
fail_conn:
|
||||
return 0;
|
||||
}
|
||||
|
40
src/dns.c
40
src/dns.c
@ -504,7 +504,7 @@ static void dns_check_dns_response(struct dns_resolution *res)
|
||||
|
||||
rm_obselete_item:
|
||||
LIST_DEL(&item->list);
|
||||
pool_free2(dns_answer_item_pool, item);
|
||||
pool_free(dns_answer_item_pool, item);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -713,7 +713,7 @@ static int dns_validate_dns_response(unsigned char *resp, unsigned char *bufend,
|
||||
if (reader >= bufend)
|
||||
return DNS_RESP_INVALID;
|
||||
|
||||
dns_answer_record = pool_alloc2(dns_answer_item_pool);
|
||||
dns_answer_record = pool_alloc(dns_answer_item_pool);
|
||||
if (dns_answer_record == NULL)
|
||||
return (DNS_RESP_INVALID);
|
||||
|
||||
@ -721,14 +721,14 @@ static int dns_validate_dns_response(unsigned char *resp, unsigned char *bufend,
|
||||
len = dns_read_name(resp, bufend, reader, tmpname, DNS_MAX_NAME_SIZE, &offset);
|
||||
|
||||
if (len == 0) {
|
||||
pool_free2(dns_answer_item_pool, dns_answer_record);
|
||||
pool_free(dns_answer_item_pool, dns_answer_record);
|
||||
return DNS_RESP_INVALID;
|
||||
}
|
||||
|
||||
/* Check if the current record dname is valid. previous_dname
|
||||
* points either to queried dname or last CNAME target */
|
||||
if (dns_query->type != DNS_RTYPE_SRV && memcmp(previous_dname, tmpname, len) != 0) {
|
||||
pool_free2(dns_answer_item_pool, dns_answer_record);
|
||||
pool_free(dns_answer_item_pool, dns_answer_record);
|
||||
if (i == 0) {
|
||||
/* First record, means a mismatch issue between
|
||||
* queried dname and dname found in the first
|
||||
@ -748,13 +748,13 @@ static int dns_validate_dns_response(unsigned char *resp, unsigned char *bufend,
|
||||
|
||||
reader += offset;
|
||||
if (reader >= bufend) {
|
||||
pool_free2(dns_answer_item_pool, dns_answer_record);
|
||||
pool_free(dns_answer_item_pool, dns_answer_record);
|
||||
return DNS_RESP_INVALID;
|
||||
}
|
||||
|
||||
/* 2 bytes for record type (A, AAAA, CNAME, etc...) */
|
||||
if (reader + 2 > bufend) {
|
||||
pool_free2(dns_answer_item_pool, dns_answer_record);
|
||||
pool_free(dns_answer_item_pool, dns_answer_record);
|
||||
return DNS_RESP_INVALID;
|
||||
}
|
||||
dns_answer_record->type = reader[0] * 256 + reader[1];
|
||||
@ -762,7 +762,7 @@ static int dns_validate_dns_response(unsigned char *resp, unsigned char *bufend,
|
||||
|
||||
/* 2 bytes for class (2) */
|
||||
if (reader + 2 > bufend) {
|
||||
pool_free2(dns_answer_item_pool, dns_answer_record);
|
||||
pool_free(dns_answer_item_pool, dns_answer_record);
|
||||
return DNS_RESP_INVALID;
|
||||
}
|
||||
dns_answer_record->class = reader[0] * 256 + reader[1];
|
||||
@ -770,7 +770,7 @@ static int dns_validate_dns_response(unsigned char *resp, unsigned char *bufend,
|
||||
|
||||
/* 4 bytes for ttl (4) */
|
||||
if (reader + 4 > bufend) {
|
||||
pool_free2(dns_answer_item_pool, dns_answer_record);
|
||||
pool_free(dns_answer_item_pool, dns_answer_record);
|
||||
return DNS_RESP_INVALID;
|
||||
}
|
||||
dns_answer_record->ttl = reader[0] * 16777216 + reader[1] * 65536
|
||||
@ -779,7 +779,7 @@ static int dns_validate_dns_response(unsigned char *resp, unsigned char *bufend,
|
||||
|
||||
/* Now reading data len */
|
||||
if (reader + 2 > bufend) {
|
||||
pool_free2(dns_answer_item_pool, dns_answer_record);
|
||||
pool_free(dns_answer_item_pool, dns_answer_record);
|
||||
return DNS_RESP_INVALID;
|
||||
}
|
||||
dns_answer_record->data_len = reader[0] * 256 + reader[1];
|
||||
@ -792,7 +792,7 @@ static int dns_validate_dns_response(unsigned char *resp, unsigned char *bufend,
|
||||
case DNS_RTYPE_A:
|
||||
/* ipv4 is stored on 4 bytes */
|
||||
if (dns_answer_record->data_len != 4) {
|
||||
pool_free2(dns_answer_item_pool, dns_answer_record);
|
||||
pool_free(dns_answer_item_pool, dns_answer_record);
|
||||
return DNS_RESP_INVALID;
|
||||
}
|
||||
dns_answer_record->address.sa_family = AF_INET;
|
||||
@ -810,14 +810,14 @@ static int dns_validate_dns_response(unsigned char *resp, unsigned char *bufend,
|
||||
* starts at 1.
|
||||
*/
|
||||
if (i + 1 == dns_p->header.ancount) {
|
||||
pool_free2(dns_answer_item_pool, dns_answer_record);
|
||||
pool_free(dns_answer_item_pool, dns_answer_record);
|
||||
return DNS_RESP_CNAME_ERROR;
|
||||
}
|
||||
|
||||
offset = 0;
|
||||
len = dns_read_name(resp, bufend, reader, tmpname, DNS_MAX_NAME_SIZE, &offset);
|
||||
if (len == 0) {
|
||||
pool_free2(dns_answer_item_pool, dns_answer_record);
|
||||
pool_free(dns_answer_item_pool, dns_answer_record);
|
||||
return DNS_RESP_INVALID;
|
||||
}
|
||||
|
||||
@ -835,7 +835,7 @@ static int dns_validate_dns_response(unsigned char *resp, unsigned char *bufend,
|
||||
* - the target hostname
|
||||
*/
|
||||
if (dns_answer_record->data_len <= 6) {
|
||||
pool_free2(dns_answer_item_pool, dns_answer_record);
|
||||
pool_free(dns_answer_item_pool, dns_answer_record);
|
||||
return DNS_RESP_INVALID;
|
||||
}
|
||||
dns_answer_record->priority = read_n16(reader);
|
||||
@ -847,7 +847,7 @@ static int dns_validate_dns_response(unsigned char *resp, unsigned char *bufend,
|
||||
offset = 0;
|
||||
len = dns_read_name(resp, bufend, reader, tmpname, DNS_MAX_NAME_SIZE, &offset);
|
||||
if (len == 0) {
|
||||
pool_free2(dns_answer_item_pool, dns_answer_record);
|
||||
pool_free(dns_answer_item_pool, dns_answer_record);
|
||||
return DNS_RESP_INVALID;
|
||||
}
|
||||
dns_answer_record->data_len = len;
|
||||
@ -858,7 +858,7 @@ static int dns_validate_dns_response(unsigned char *resp, unsigned char *bufend,
|
||||
case DNS_RTYPE_AAAA:
|
||||
/* ipv6 is stored on 16 bytes */
|
||||
if (dns_answer_record->data_len != 16) {
|
||||
pool_free2(dns_answer_item_pool, dns_answer_record);
|
||||
pool_free(dns_answer_item_pool, dns_answer_record);
|
||||
return DNS_RESP_INVALID;
|
||||
}
|
||||
dns_answer_record->address.sa_family = AF_INET6;
|
||||
@ -918,7 +918,7 @@ static int dns_validate_dns_response(unsigned char *resp, unsigned char *bufend,
|
||||
|
||||
if (found == 1) {
|
||||
tmp_record->last_seen = now.tv_sec;
|
||||
pool_free2(dns_answer_item_pool, dns_answer_record);
|
||||
pool_free(dns_answer_item_pool, dns_answer_record);
|
||||
}
|
||||
else {
|
||||
dns_answer_record->last_seen = now.tv_sec;
|
||||
@ -1262,7 +1262,7 @@ static struct dns_resolution *dns_pick_resolution(struct dns_resolvers *resolver
|
||||
|
||||
from_pool:
|
||||
/* No resolution could be found, so let's allocate a new one */
|
||||
res = pool_alloc2(dns_resolution_pool);
|
||||
res = pool_alloc(dns_resolution_pool);
|
||||
if (res) {
|
||||
memset(res, 0, sizeof(*res));
|
||||
res->resolvers = resolvers;
|
||||
@ -1303,7 +1303,7 @@ static void dns_free_resolution(struct dns_resolution *resolution)
|
||||
}
|
||||
|
||||
LIST_DEL(&resolution->list);
|
||||
pool_free2(dns_resolution_pool, resolution);
|
||||
pool_free(dns_resolution_pool, resolution);
|
||||
}
|
||||
|
||||
/* Links a requester (a server or a dns_srvrq) with a resolution. It returns 0
|
||||
@ -1826,8 +1826,8 @@ static void dns_deinit(void)
|
||||
free(srvrq);
|
||||
}
|
||||
|
||||
pool_destroy2(dns_answer_item_pool);
|
||||
pool_destroy2(dns_resolution_pool);
|
||||
pool_destroy(dns_answer_item_pool);
|
||||
pool_destroy(dns_resolution_pool);
|
||||
}
|
||||
|
||||
/* Finalizes the DNS configuration by allocating required resources and checking
|
||||
|
@ -31,7 +31,7 @@
|
||||
#include <proto/stream_interface.h>
|
||||
|
||||
/* Pool used to allocate filters */
|
||||
struct pool_head *pool2_filter = NULL;
|
||||
struct pool_head *pool_head_filter = NULL;
|
||||
|
||||
static int handle_analyzer_result(struct stream *s, struct channel *chn, unsigned int an_bit, int ret);
|
||||
|
||||
@ -384,7 +384,7 @@ flt_deinit_all_per_thread()
|
||||
static int
|
||||
flt_stream_add_filter(struct stream *s, struct flt_conf *fconf, unsigned int flags)
|
||||
{
|
||||
struct filter *f = pool_alloc2(pool2_filter);
|
||||
struct filter *f = pool_alloc(pool_head_filter);
|
||||
|
||||
if (!f) /* not enough memory */
|
||||
return -1;
|
||||
@ -395,7 +395,7 @@ flt_stream_add_filter(struct stream *s, struct flt_conf *fconf, unsigned int fla
|
||||
if (FLT_OPS(f)->attach) {
|
||||
int ret = FLT_OPS(f)->attach(s, f);
|
||||
if (ret <= 0) {
|
||||
pool_free2(pool2_filter, f);
|
||||
pool_free(pool_head_filter, f);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -439,7 +439,7 @@ flt_stream_release(struct stream *s, int only_backend)
|
||||
if (FLT_OPS(filter)->detach)
|
||||
FLT_OPS(filter)->detach(s, filter);
|
||||
LIST_DEL(&filter->list);
|
||||
pool_free2(pool2_filter, filter);
|
||||
pool_free(pool_head_filter, filter);
|
||||
}
|
||||
}
|
||||
if (LIST_ISEMPTY(&strm_flt(s)->filters))
|
||||
@ -1184,7 +1184,7 @@ __attribute__((constructor))
|
||||
static void
|
||||
__filters_init(void)
|
||||
{
|
||||
pool2_filter = create_pool("filter", sizeof(struct filter), MEM_F_SHARED);
|
||||
pool_head_filter = create_pool("filter", sizeof(struct filter), MEM_F_SHARED);
|
||||
cfg_register_keywords(&cfg_kws);
|
||||
hap_register_post_check(flt_init_all);
|
||||
hap_register_per_thread_init(flt_init_all_per_thread);
|
||||
@ -1195,7 +1195,7 @@ __attribute__((destructor))
|
||||
static void
|
||||
__filters_deinit(void)
|
||||
{
|
||||
pool_destroy2(pool2_filter);
|
||||
pool_destroy(pool_head_filter);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -34,7 +34,7 @@ struct flt_ops comp_ops;
|
||||
|
||||
|
||||
/* Pools used to allocate comp_state structs */
|
||||
static struct pool_head *pool2_comp_state = NULL;
|
||||
static struct pool_head *pool_head_comp_state = NULL;
|
||||
|
||||
static THREAD_LOCAL struct buffer *tmpbuf = &buf_empty;
|
||||
static THREAD_LOCAL struct buffer *zbuf = &buf_empty;
|
||||
@ -91,7 +91,7 @@ comp_start_analyze(struct stream *s, struct filter *filter, struct channel *chn)
|
||||
if (filter->ctx == NULL) {
|
||||
struct comp_state *st;
|
||||
|
||||
st = pool_alloc_dirty(pool2_comp_state);
|
||||
st = pool_alloc_dirty(pool_head_comp_state);
|
||||
if (st == NULL)
|
||||
return -1;
|
||||
|
||||
@ -124,7 +124,7 @@ comp_end_analyze(struct stream *s, struct filter *filter, struct channel *chn)
|
||||
/* release any possible compression context */
|
||||
if (st->comp_algo)
|
||||
st->comp_algo->end(&st->comp_ctx);
|
||||
pool_free2(pool2_comp_state, st);
|
||||
pool_free(pool_head_comp_state, st);
|
||||
filter->ctx = NULL;
|
||||
end:
|
||||
return 1;
|
||||
@ -999,5 +999,5 @@ __flt_http_comp_init(void)
|
||||
cfg_register_keywords(&cfg_kws);
|
||||
flt_register_keywords(&filter_kws);
|
||||
sample_register_fetches(&sample_fetch_keywords);
|
||||
pool2_comp_state = create_pool("comp_state", sizeof(struct comp_state), MEM_F_SHARED);
|
||||
pool_head_comp_state = create_pool("comp_state", sizeof(struct comp_state), MEM_F_SHARED);
|
||||
}
|
||||
|
@ -89,8 +89,8 @@ struct list curmphs;
|
||||
struct list curgphs;
|
||||
|
||||
/* Pools used to allocate SPOE structs */
|
||||
static struct pool_head *pool2_spoe_ctx = NULL;
|
||||
static struct pool_head *pool2_spoe_appctx = NULL;
|
||||
static struct pool_head *pool_head_spoe_ctx = NULL;
|
||||
static struct pool_head *pool_head_spoe_appctx = NULL;
|
||||
|
||||
struct flt_ops spoe_ops;
|
||||
|
||||
@ -1286,7 +1286,7 @@ spoe_release_appctx(struct appctx *appctx)
|
||||
/* Release allocated memory */
|
||||
spoe_release_buffer(&spoe_appctx->buffer,
|
||||
&spoe_appctx->buffer_wait);
|
||||
pool_free2(pool2_spoe_appctx, spoe_appctx);
|
||||
pool_free(pool_head_spoe_appctx, spoe_appctx);
|
||||
|
||||
if (!LIST_ISEMPTY(&agent->rt[tid].applets))
|
||||
goto end;
|
||||
@ -1943,10 +1943,10 @@ spoe_create_appctx(struct spoe_config *conf)
|
||||
if ((appctx = appctx_new(&spoe_applet, tid_bit)) == NULL)
|
||||
goto out_error;
|
||||
|
||||
appctx->ctx.spoe.ptr = pool_alloc_dirty(pool2_spoe_appctx);
|
||||
appctx->ctx.spoe.ptr = pool_alloc_dirty(pool_head_spoe_appctx);
|
||||
if (SPOE_APPCTX(appctx) == NULL)
|
||||
goto out_free_appctx;
|
||||
memset(appctx->ctx.spoe.ptr, 0, pool2_spoe_appctx->size);
|
||||
memset(appctx->ctx.spoe.ptr, 0, pool_head_spoe_appctx->size);
|
||||
|
||||
appctx->st0 = SPOE_APPCTX_ST_CONNECT;
|
||||
if ((SPOE_APPCTX(appctx)->task = task_new(tid_bit)) == NULL)
|
||||
@ -2000,7 +2000,7 @@ spoe_create_appctx(struct spoe_config *conf)
|
||||
out_free_spoe:
|
||||
task_free(SPOE_APPCTX(appctx)->task);
|
||||
out_free_spoe_appctx:
|
||||
pool_free2(pool2_spoe_appctx, SPOE_APPCTX(appctx));
|
||||
pool_free(pool_head_spoe_appctx, SPOE_APPCTX(appctx));
|
||||
out_free_appctx:
|
||||
appctx_free(appctx);
|
||||
out_error:
|
||||
@ -2745,7 +2745,7 @@ spoe_create_context(struct filter *filter)
|
||||
struct spoe_config *conf = FLT_CONF(filter);
|
||||
struct spoe_context *ctx;
|
||||
|
||||
ctx = pool_alloc_dirty(pool2_spoe_ctx);
|
||||
ctx = pool_alloc_dirty(pool_head_spoe_ctx);
|
||||
if (ctx == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
@ -2776,7 +2776,7 @@ spoe_destroy_context(struct spoe_context *ctx)
|
||||
return;
|
||||
|
||||
spoe_stop_processing(ctx);
|
||||
pool_free2(pool2_spoe_ctx, ctx);
|
||||
pool_free(pool_head_spoe_ctx, ctx);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -4321,14 +4321,14 @@ static void __spoe_init(void)
|
||||
http_req_keywords_register(&http_req_action_kws);
|
||||
http_res_keywords_register(&http_res_action_kws);
|
||||
|
||||
pool2_spoe_ctx = create_pool("spoe_ctx", sizeof(struct spoe_context), MEM_F_SHARED);
|
||||
pool2_spoe_appctx = create_pool("spoe_appctx", sizeof(struct spoe_appctx), MEM_F_SHARED);
|
||||
pool_head_spoe_ctx = create_pool("spoe_ctx", sizeof(struct spoe_context), MEM_F_SHARED);
|
||||
pool_head_spoe_appctx = create_pool("spoe_appctx", sizeof(struct spoe_appctx), MEM_F_SHARED);
|
||||
}
|
||||
|
||||
__attribute__((destructor))
|
||||
static void
|
||||
__spoe_deinit(void)
|
||||
{
|
||||
pool_destroy2(pool2_spoe_ctx);
|
||||
pool_destroy2(pool2_spoe_appctx);
|
||||
pool_destroy(pool_head_spoe_ctx);
|
||||
pool_destroy(pool_head_spoe_appctx);
|
||||
}
|
||||
|
@ -131,13 +131,13 @@ int frontend_accept(struct stream *s)
|
||||
s->req.flags |= CF_READ_DONTWAIT; /* one read is usually enough */
|
||||
|
||||
if (unlikely(fe->nb_req_cap > 0)) {
|
||||
if ((s->req_cap = pool_alloc2(fe->req_cap_pool)) == NULL)
|
||||
if ((s->req_cap = pool_alloc(fe->req_cap_pool)) == NULL)
|
||||
goto out_return; /* no memory */
|
||||
memset(s->req_cap, 0, fe->nb_req_cap * sizeof(void *));
|
||||
}
|
||||
|
||||
if (unlikely(fe->nb_rsp_cap > 0)) {
|
||||
if ((s->res_cap = pool_alloc2(fe->rsp_cap_pool)) == NULL)
|
||||
if ((s->res_cap = pool_alloc(fe->rsp_cap_pool)) == NULL)
|
||||
goto out_free_reqcap; /* no memory */
|
||||
memset(s->res_cap, 0, fe->nb_rsp_cap * sizeof(void *));
|
||||
}
|
||||
@ -159,9 +159,9 @@ int frontend_accept(struct stream *s)
|
||||
|
||||
/* Error unrolling */
|
||||
out_free_rspcap:
|
||||
pool_free2(fe->rsp_cap_pool, s->res_cap);
|
||||
pool_free(fe->rsp_cap_pool, s->res_cap);
|
||||
out_free_reqcap:
|
||||
pool_free2(fe->req_cap_pool, s->req_cap);
|
||||
pool_free(fe->req_cap_pool, s->req_cap);
|
||||
out_return:
|
||||
return -1;
|
||||
}
|
||||
|
@ -796,7 +796,7 @@ static void sig_soft_stop(struct sig_handler *sh)
|
||||
{
|
||||
soft_stop();
|
||||
signal_unregister_handler(sh);
|
||||
pool_gc2(NULL);
|
||||
pool_gc(NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -805,7 +805,7 @@ static void sig_soft_stop(struct sig_handler *sh)
|
||||
static void sig_pause(struct sig_handler *sh)
|
||||
{
|
||||
pause_proxies();
|
||||
pool_gc2(NULL);
|
||||
pool_gc(NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -869,7 +869,7 @@ static void dump(struct sig_handler *sh)
|
||||
{
|
||||
/* dump memory usage then free everything possible */
|
||||
dump_pools();
|
||||
pool_gc2(NULL);
|
||||
pool_gc(NULL);
|
||||
}
|
||||
|
||||
/* This function check if cfg_cfgfiles containes directories.
|
||||
@ -2094,7 +2094,7 @@ void deinit(void)
|
||||
while (h) {
|
||||
h_next = h->next;
|
||||
free(h->name);
|
||||
pool_destroy2(h->pool);
|
||||
pool_destroy(h->pool);
|
||||
free(h);
|
||||
h = h_next;
|
||||
}/* end while(h) */
|
||||
@ -2103,7 +2103,7 @@ void deinit(void)
|
||||
while (h) {
|
||||
h_next = h->next;
|
||||
free(h->name);
|
||||
pool_destroy2(h->pool);
|
||||
pool_destroy(h->pool);
|
||||
free(h);
|
||||
h = h_next;
|
||||
}/* end while(h) */
|
||||
@ -2183,9 +2183,9 @@ void deinit(void)
|
||||
free_http_res_rules(&p->http_res_rules);
|
||||
task_free(p->task);
|
||||
|
||||
pool_destroy2(p->req_cap_pool);
|
||||
pool_destroy2(p->rsp_cap_pool);
|
||||
pool_destroy2(p->table.pool);
|
||||
pool_destroy(p->req_cap_pool);
|
||||
pool_destroy(p->rsp_cap_pool);
|
||||
pool_destroy(p->table.pool);
|
||||
|
||||
p0 = p;
|
||||
p = p->next;
|
||||
@ -2251,17 +2251,17 @@ void deinit(void)
|
||||
|
||||
deinit_buffer();
|
||||
|
||||
pool_destroy2(pool2_stream);
|
||||
pool_destroy2(pool2_session);
|
||||
pool_destroy2(pool2_connection);
|
||||
pool_destroy2(pool2_connstream);
|
||||
pool_destroy2(pool2_requri);
|
||||
pool_destroy2(pool2_task);
|
||||
pool_destroy2(pool2_capture);
|
||||
pool_destroy2(pool2_pendconn);
|
||||
pool_destroy2(pool2_sig_handlers);
|
||||
pool_destroy2(pool2_hdr_idx);
|
||||
pool_destroy2(pool2_http_txn);
|
||||
pool_destroy(pool_head_stream);
|
||||
pool_destroy(pool_head_session);
|
||||
pool_destroy(pool_head_connection);
|
||||
pool_destroy(pool_head_connstream);
|
||||
pool_destroy(pool_head_requri);
|
||||
pool_destroy(pool_head_task);
|
||||
pool_destroy(pool_head_capture);
|
||||
pool_destroy(pool_head_pendconn);
|
||||
pool_destroy(pool_head_sig_handlers);
|
||||
pool_destroy(pool_head_hdr_idx);
|
||||
pool_destroy(pool_head_http_txn);
|
||||
deinit_pollers();
|
||||
} /* end deinit() */
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include <common/memory.h>
|
||||
#include <proto/hdr_idx.h>
|
||||
|
||||
struct pool_head *pool2_hdr_idx = NULL;
|
||||
struct pool_head *pool_head_hdr_idx = NULL;
|
||||
|
||||
/*
|
||||
* Add a header entry to <list> after element <after>. <after> is ignored when
|
||||
|
22
src/hlua.c
22
src/hlua.c
@ -160,7 +160,7 @@ struct hlua gL;
|
||||
/* This is the memory pool containing struct lua for applets
|
||||
* (including cli).
|
||||
*/
|
||||
struct pool_head *pool2_hlua;
|
||||
struct pool_head *pool_head_hlua;
|
||||
|
||||
/* Used for Socket connection. */
|
||||
static struct proxy socket_proxy;
|
||||
@ -876,7 +876,7 @@ void hlua_ctx_destroy(struct hlua *lua)
|
||||
lua->T = NULL;
|
||||
|
||||
end:
|
||||
pool_free2(pool2_hlua, lua);
|
||||
pool_free(pool_head_hlua, lua);
|
||||
}
|
||||
|
||||
/* This function is used to restore the Lua context when a coroutine
|
||||
@ -2498,7 +2498,7 @@ __LJMP static int hlua_socket_new(lua_State *L)
|
||||
socket->tid = tid;
|
||||
|
||||
/* Check if the various memory pools are intialized. */
|
||||
if (!pool2_stream || !pool2_buffer) {
|
||||
if (!pool_head_stream || !pool_head_buffer) {
|
||||
hlua_pusherror(L, "socket: uninitialized pools.");
|
||||
goto out_fail_conf;
|
||||
}
|
||||
@ -5578,7 +5578,7 @@ static int hlua_register_task(lua_State *L)
|
||||
|
||||
ref = MAY_LJMP(hlua_checkfunction(L, 1));
|
||||
|
||||
hlua = pool_alloc2(pool2_hlua);
|
||||
hlua = pool_alloc(pool_head_hlua);
|
||||
if (!hlua)
|
||||
WILL_LJMP(luaL_error(L, "lua out of memory error."));
|
||||
|
||||
@ -5618,7 +5618,7 @@ static int hlua_sample_conv_wrapper(const struct arg *arg_p, struct sample *smp,
|
||||
* Lua initialization cause 5% performances loss.
|
||||
*/
|
||||
if (!stream->hlua) {
|
||||
stream->hlua = pool_alloc2(pool2_hlua);
|
||||
stream->hlua = pool_alloc(pool_head_hlua);
|
||||
if (!stream->hlua) {
|
||||
SEND_ERR(stream->be, "Lua converter '%s': can't initialize Lua context.\n", fcn->name);
|
||||
return 0;
|
||||
@ -5738,7 +5738,7 @@ static int hlua_sample_fetch_wrapper(const struct arg *arg_p, struct sample *smp
|
||||
* Lua initialization cause 5% performances loss.
|
||||
*/
|
||||
if (!stream->hlua) {
|
||||
stream->hlua = pool_alloc2(pool2_hlua);
|
||||
stream->hlua = pool_alloc(pool_head_hlua);
|
||||
if (!stream->hlua) {
|
||||
SEND_ERR(stream->be, "Lua sample-fetch '%s': can't initialize Lua context.\n", fcn->name);
|
||||
return 0;
|
||||
@ -5996,7 +5996,7 @@ static enum act_return hlua_action(struct act_rule *rule, struct proxy *px,
|
||||
* Lua initialization cause 5% performances loss.
|
||||
*/
|
||||
if (!s->hlua) {
|
||||
s->hlua = pool_alloc2(pool2_hlua);
|
||||
s->hlua = pool_alloc(pool_head_hlua);
|
||||
if (!s->hlua) {
|
||||
SEND_ERR(px, "Lua action '%s': can't initialize Lua context.\n",
|
||||
rule->arg.hlua_rule->fcn.name);
|
||||
@ -6152,7 +6152,7 @@ static int hlua_applet_tcp_init(struct appctx *ctx, struct proxy *px, struct str
|
||||
char **arg;
|
||||
const char *error;
|
||||
|
||||
hlua = pool_alloc2(pool2_hlua);
|
||||
hlua = pool_alloc(pool_head_hlua);
|
||||
if (!hlua) {
|
||||
SEND_ERR(px, "Lua applet tcp '%s': out of memory.\n",
|
||||
ctx->rule->arg.hlua_rule->fcn.name);
|
||||
@ -6349,7 +6349,7 @@ static int hlua_applet_http_init(struct appctx *ctx, struct proxy *px, struct st
|
||||
if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_KAL)
|
||||
txn->flags = (txn->flags & ~TX_CON_WANT_MSK) | TX_CON_WANT_SCL;
|
||||
|
||||
hlua = pool_alloc2(pool2_hlua);
|
||||
hlua = pool_alloc(pool_head_hlua);
|
||||
if (!hlua) {
|
||||
SEND_ERR(px, "Lua applet http '%s': out of memory.\n",
|
||||
ctx->rule->arg.hlua_rule->fcn.name);
|
||||
@ -6897,7 +6897,7 @@ static int hlua_cli_parse_fct(char **args, struct appctx *appctx, void *private)
|
||||
fcn = private;
|
||||
appctx->ctx.hlua_cli.fcn = private;
|
||||
|
||||
hlua = pool_alloc2(pool2_hlua);
|
||||
hlua = pool_alloc(pool_head_hlua);
|
||||
if (!hlua) {
|
||||
SEND_ERR(NULL, "Lua cli '%s': out of memory.\n", fcn->name);
|
||||
return 1;
|
||||
@ -7371,7 +7371,7 @@ void hlua_init(void)
|
||||
HA_SPIN_INIT(&hlua_global_lock);
|
||||
|
||||
/* Initialise struct hlua and com signals pool */
|
||||
pool2_hlua = create_pool("hlua", sizeof(struct hlua), MEM_F_SHARED);
|
||||
pool_head_hlua = create_pool("hlua", sizeof(struct hlua), MEM_F_SHARED);
|
||||
|
||||
/* Register configuration keywords. */
|
||||
cfg_register_keywords(&cfg_kws);
|
||||
|
@ -2378,7 +2378,7 @@ void strm_log(struct stream *s)
|
||||
|
||||
/* if unique-id was not generated */
|
||||
if (!s->unique_id && !LIST_ISEMPTY(&sess->fe->format_unique_id)) {
|
||||
if ((s->unique_id = pool_alloc2(pool2_uniqueid)) != NULL)
|
||||
if ((s->unique_id = pool_alloc(pool_head_uniqueid)) != NULL)
|
||||
build_logline(s, s->unique_id, UNIQUEID_LEN, &sess->fe->format_unique_id);
|
||||
}
|
||||
|
||||
|
12
src/memory.c
12
src/memory.c
@ -123,7 +123,7 @@ void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
|
||||
if (failed)
|
||||
return NULL;
|
||||
failed++;
|
||||
pool_gc2(pool);
|
||||
pool_gc(pool);
|
||||
continue;
|
||||
}
|
||||
if (++pool->allocated > avail)
|
||||
@ -151,7 +151,7 @@ void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
|
||||
/*
|
||||
* This function frees whatever can be freed in pool <pool>.
|
||||
*/
|
||||
void pool_flush2(struct pool_head *pool)
|
||||
void pool_flush(struct pool_head *pool)
|
||||
{
|
||||
void *temp, *next;
|
||||
if (!pool)
|
||||
@ -175,11 +175,11 @@ void pool_flush2(struct pool_head *pool)
|
||||
* the minimum thresholds imposed by owners. It takes care of avoiding
|
||||
* recursion because it may be called from a signal handler.
|
||||
*
|
||||
* <pool_ctx> is used when pool_gc2 is called to release resources to allocate
|
||||
* <pool_ctx> is used when pool_gc is called to release resources to allocate
|
||||
* an element in __pool_refill_alloc. It is important because <pool_ctx> is
|
||||
* already locked, so we need to skip the lock here.
|
||||
*/
|
||||
void pool_gc2(struct pool_head *pool_ctx)
|
||||
void pool_gc(struct pool_head *pool_ctx)
|
||||
{
|
||||
static int recurse;
|
||||
int cur_recurse = 0;
|
||||
@ -216,10 +216,10 @@ void pool_gc2(struct pool_head *pool_ctx)
|
||||
* pointer, otherwise it returns the pool.
|
||||
* .
|
||||
*/
|
||||
void *pool_destroy2(struct pool_head *pool)
|
||||
void *pool_destroy(struct pool_head *pool)
|
||||
{
|
||||
if (pool) {
|
||||
pool_flush2(pool);
|
||||
pool_flush(pool);
|
||||
if (pool->used)
|
||||
return pool;
|
||||
pool->users--;
|
||||
|
30
src/mux_h2.c
30
src/mux_h2.c
@ -30,9 +30,9 @@ static const struct h2s *h2_closed_stream;
|
||||
static const struct h2s *h2_idle_stream;
|
||||
|
||||
/* the h2c connection pool */
|
||||
static struct pool_head *pool2_h2c;
|
||||
static struct pool_head *pool_head_h2c;
|
||||
/* the h2s stream pool */
|
||||
static struct pool_head *pool2_h2s;
|
||||
static struct pool_head *pool_head_h2s;
|
||||
|
||||
/* Connection flags (32 bit), in h2c->flags */
|
||||
#define H2_CF_NONE 0x00000000
|
||||
@ -328,7 +328,7 @@ static int h2c_frt_init(struct connection *conn)
|
||||
struct task *t = NULL;
|
||||
struct session *sess = conn->owner;
|
||||
|
||||
h2c = pool_alloc2(pool2_h2c);
|
||||
h2c = pool_alloc(pool_head_h2c);
|
||||
if (!h2c)
|
||||
goto fail;
|
||||
|
||||
@ -387,7 +387,7 @@ static int h2c_frt_init(struct connection *conn)
|
||||
fail:
|
||||
if (t)
|
||||
task_free(t);
|
||||
pool_free2(pool2_h2c, h2c);
|
||||
pool_free(pool_head_h2c, h2c);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -448,7 +448,7 @@ static void h2_release(struct connection *conn)
|
||||
h2c->task = NULL;
|
||||
}
|
||||
|
||||
pool_free2(pool2_h2c, h2c);
|
||||
pool_free(pool_head_h2c, h2c);
|
||||
}
|
||||
|
||||
conn->mux = NULL;
|
||||
@ -597,7 +597,7 @@ static struct h2s *h2c_stream_new(struct h2c *h2c, int id)
|
||||
struct conn_stream *cs;
|
||||
struct h2s *h2s;
|
||||
|
||||
h2s = pool_alloc2(pool2_h2s);
|
||||
h2s = pool_alloc(pool_head_h2s);
|
||||
if (!h2s)
|
||||
goto out;
|
||||
|
||||
@ -631,7 +631,7 @@ static struct h2s *h2c_stream_new(struct h2c *h2c, int id)
|
||||
cs_free(cs);
|
||||
out_close:
|
||||
eb32_delete(&h2s->by_id);
|
||||
pool_free2(pool2_h2s, h2s);
|
||||
pool_free(pool_head_h2s, h2s);
|
||||
h2s = NULL;
|
||||
out:
|
||||
return h2s;
|
||||
@ -992,7 +992,7 @@ static void h2_wake_some_streams(struct h2c *h2c, int last, uint32_t flags)
|
||||
if (!h2s->cs) {
|
||||
/* this stream was already orphaned */
|
||||
eb32_delete(&h2s->by_id);
|
||||
pool_free2(pool2_h2s, h2s);
|
||||
pool_free(pool_head_h2s, h2s);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1905,7 +1905,7 @@ static int h2_process_mux(struct h2c *h2c)
|
||||
else {
|
||||
/* just sent the last frame for this orphaned stream */
|
||||
eb32_delete(&h2s->by_id);
|
||||
pool_free2(pool2_h2s, h2s);
|
||||
pool_free(pool_head_h2s, h2s);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1947,7 +1947,7 @@ static int h2_process_mux(struct h2c *h2c)
|
||||
else {
|
||||
/* just sent the last frame for this orphaned stream */
|
||||
eb32_delete(&h2s->by_id);
|
||||
pool_free2(pool2_h2s, h2s);
|
||||
pool_free(pool_head_h2s, h2s);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2322,7 +2322,7 @@ static void h2_detach(struct conn_stream *cs)
|
||||
h2c->task->expire = TICK_ETERNITY;
|
||||
}
|
||||
}
|
||||
pool_free2(pool2_h2s, h2s);
|
||||
pool_free(pool_head_h2s, h2s);
|
||||
}
|
||||
|
||||
static void h2_shutr(struct conn_stream *cs, enum cs_shr_mode mode)
|
||||
@ -3202,8 +3202,8 @@ static struct cfg_kw_list cfg_kws = {ILH, {
|
||||
|
||||
static void __h2_deinit(void)
|
||||
{
|
||||
pool_destroy2(pool2_h2s);
|
||||
pool_destroy2(pool2_h2c);
|
||||
pool_destroy(pool_head_h2s);
|
||||
pool_destroy(pool_head_h2c);
|
||||
}
|
||||
|
||||
__attribute__((constructor))
|
||||
@ -3212,6 +3212,6 @@ static void __h2_init(void)
|
||||
alpn_register_mux(&alpn_mux_h2);
|
||||
cfg_register_keywords(&cfg_kws);
|
||||
hap_register_post_deinit(__h2_deinit);
|
||||
pool2_h2c = create_pool("h2c", sizeof(struct h2c), MEM_F_SHARED);
|
||||
pool2_h2s = create_pool("h2s", sizeof(struct h2s), MEM_F_SHARED);
|
||||
pool_head_h2c = create_pool("h2c", sizeof(struct h2c), MEM_F_SHARED);
|
||||
pool_head_h2s = create_pool("h2s", sizeof(struct h2s), MEM_F_SHARED);
|
||||
}
|
||||
|
@ -1941,7 +1941,7 @@ static struct appctx *peer_session_create(struct peers *peers, struct peer *peer
|
||||
conn_free(conn);
|
||||
out_free_strm:
|
||||
LIST_DEL(&s->list);
|
||||
pool_free2(pool2_stream, s);
|
||||
pool_free(pool_head_stream, s);
|
||||
out_free_sess:
|
||||
session_free(sess);
|
||||
out_free_appctx:
|
||||
|
10
src/pipe.c
10
src/pipe.c
@ -20,7 +20,7 @@
|
||||
#include <types/global.h>
|
||||
#include <types/pipe.h>
|
||||
|
||||
struct pool_head *pool2_pipe = NULL;
|
||||
struct pool_head *pool_head_pipe = NULL;
|
||||
struct pipe *pipes_live = NULL; /* pipes which are still ready to use */
|
||||
|
||||
__decl_hathreads(HA_SPINLOCK_T pipes_lock); /* lock used to protect pipes list */
|
||||
@ -31,7 +31,7 @@ int pipes_free = 0; /* # of pipes unused */
|
||||
/* allocate memory for the pipes */
|
||||
static void init_pipe()
|
||||
{
|
||||
pool2_pipe = create_pool("pipe", sizeof(struct pipe), MEM_F_SHARED);
|
||||
pool_head_pipe = create_pool("pipe", sizeof(struct pipe), MEM_F_SHARED);
|
||||
pipes_used = 0;
|
||||
pipes_free = 0;
|
||||
HA_SPIN_INIT(&pipes_lock);
|
||||
@ -57,12 +57,12 @@ struct pipe *get_pipe()
|
||||
if (pipes_used >= global.maxpipes)
|
||||
goto out;
|
||||
|
||||
ret = pool_alloc2(pool2_pipe);
|
||||
ret = pool_alloc(pool_head_pipe);
|
||||
if (!ret)
|
||||
goto out;
|
||||
|
||||
if (pipe(pipefd) < 0) {
|
||||
pool_free2(pool2_pipe, ret);
|
||||
pool_free(pool_head_pipe, ret);
|
||||
goto out;
|
||||
}
|
||||
#ifdef F_SETPIPE_SZ
|
||||
@ -83,7 +83,7 @@ static void inline __kill_pipe(struct pipe *p)
|
||||
{
|
||||
close(p->prod);
|
||||
close(p->cons);
|
||||
pool_free2(pool2_pipe, p);
|
||||
pool_free(pool_head_pipe, p);
|
||||
pipes_used--;
|
||||
return;
|
||||
}
|
||||
|
@ -470,8 +470,8 @@ void init_proto_http()
|
||||
FD_SET(0x7f, http_encode_map);
|
||||
|
||||
/* memory allocations */
|
||||
pool2_http_txn = create_pool("http_txn", sizeof(struct http_txn), MEM_F_SHARED);
|
||||
pool2_uniqueid = create_pool("uniqueid", UNIQUEID_LEN, MEM_F_SHARED);
|
||||
pool_head_http_txn = create_pool("http_txn", sizeof(struct http_txn), MEM_F_SHARED);
|
||||
pool_head_uniqueid = create_pool("uniqueid", UNIQUEID_LEN, MEM_F_SHARED);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1188,10 +1188,10 @@ void http_return_srv_error(struct stream *s, struct stream_interface *si)
|
||||
extern const char sess_term_cond[8];
|
||||
extern const char sess_fin_state[8];
|
||||
extern const char *monthname[12];
|
||||
struct pool_head *pool2_http_txn;
|
||||
struct pool_head *pool2_requri;
|
||||
struct pool_head *pool2_capture = NULL;
|
||||
struct pool_head *pool2_uniqueid;
|
||||
struct pool_head *pool_head_http_txn;
|
||||
struct pool_head *pool_head_requri;
|
||||
struct pool_head *pool_head_capture = NULL;
|
||||
struct pool_head *pool_head_uniqueid;
|
||||
|
||||
/*
|
||||
* Capture headers from message starting at <som> according to header list
|
||||
@ -1224,7 +1224,7 @@ void capture_headers(char *som, struct hdr_idx *idx,
|
||||
(strncasecmp(sol, h->name, h->namelen) == 0)) {
|
||||
if (cap[h->index] == NULL)
|
||||
cap[h->index] =
|
||||
pool_alloc2(h->pool);
|
||||
pool_alloc(h->pool);
|
||||
|
||||
if (cap[h->index] == NULL) {
|
||||
ha_alert("HTTP capture : out of memory.\n");
|
||||
@ -1958,7 +1958,7 @@ int http_wait_for_request(struct stream *s, struct channel *req, int an_bit)
|
||||
*/
|
||||
if (unlikely(s->logs.logwait & LW_REQ)) {
|
||||
/* we have a complete HTTP request that we must log */
|
||||
if ((txn->uri = pool_alloc2(pool2_requri)) != NULL) {
|
||||
if ((txn->uri = pool_alloc(pool_head_requri)) != NULL) {
|
||||
int urilen = msg->sl.rq.l;
|
||||
|
||||
if (urilen >= global.tune.requri_len )
|
||||
@ -3731,7 +3731,7 @@ int http_process_request(struct stream *s, struct channel *req, int an_bit)
|
||||
/* add unique-id if "header-unique-id" is specified */
|
||||
|
||||
if (!LIST_ISEMPTY(&sess->fe->format_unique_id) && !s->unique_id) {
|
||||
if ((s->unique_id = pool_alloc2(pool2_uniqueid)) == NULL)
|
||||
if ((s->unique_id = pool_alloc(pool_head_uniqueid)) == NULL)
|
||||
goto return_bad_req;
|
||||
s->unique_id[0] = '\0';
|
||||
build_logline(s, s->unique_id, UNIQUEID_LEN, &sess->fe->format_unique_id);
|
||||
@ -6890,7 +6890,7 @@ void manage_client_side_cookies(struct stream *s, struct channel *req)
|
||||
memcmp(att_beg, sess->fe->capture_name, sess->fe->capture_namelen) == 0) {
|
||||
int log_len = val_end - att_beg;
|
||||
|
||||
if ((txn->cli_cookie = pool_alloc2(pool2_capture)) == NULL) {
|
||||
if ((txn->cli_cookie = pool_alloc(pool_head_capture)) == NULL) {
|
||||
ha_alert("HTTP logging : out of memory.\n");
|
||||
} else {
|
||||
if (log_len > sess->fe->capture_len)
|
||||
@ -7542,7 +7542,7 @@ void manage_server_side_cookies(struct stream *s, struct channel *res)
|
||||
(val_end - att_beg >= sess->fe->capture_namelen) &&
|
||||
memcmp(att_beg, sess->fe->capture_name, sess->fe->capture_namelen) == 0) {
|
||||
int log_len = val_end - att_beg;
|
||||
if ((txn->srv_cookie = pool_alloc2(pool2_capture)) == NULL) {
|
||||
if ((txn->srv_cookie = pool_alloc(pool_head_capture)) == NULL) {
|
||||
ha_alert("HTTP logging : out of memory.\n");
|
||||
}
|
||||
else {
|
||||
@ -7988,14 +7988,14 @@ struct http_txn *http_alloc_txn(struct stream *s)
|
||||
if (txn)
|
||||
return txn;
|
||||
|
||||
txn = pool_alloc2(pool2_http_txn);
|
||||
txn = pool_alloc(pool_head_http_txn);
|
||||
if (!txn)
|
||||
return txn;
|
||||
|
||||
txn->hdr_idx.size = global.tune.max_http_hdr;
|
||||
txn->hdr_idx.v = pool_alloc2(pool2_hdr_idx);
|
||||
txn->hdr_idx.v = pool_alloc(pool_head_hdr_idx);
|
||||
if (!txn->hdr_idx.v) {
|
||||
pool_free2(pool2_http_txn, txn);
|
||||
pool_free(pool_head_http_txn, txn);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -8069,10 +8069,10 @@ void http_end_txn(struct stream *s)
|
||||
struct proxy *fe = strm_fe(s);
|
||||
|
||||
/* these ones will have been dynamically allocated */
|
||||
pool_free2(pool2_requri, txn->uri);
|
||||
pool_free2(pool2_capture, txn->cli_cookie);
|
||||
pool_free2(pool2_capture, txn->srv_cookie);
|
||||
pool_free2(pool2_uniqueid, s->unique_id);
|
||||
pool_free(pool_head_requri, txn->uri);
|
||||
pool_free(pool_head_capture, txn->cli_cookie);
|
||||
pool_free(pool_head_capture, txn->srv_cookie);
|
||||
pool_free(pool_head_uniqueid, s->unique_id);
|
||||
|
||||
s->unique_id = NULL;
|
||||
txn->uri = NULL;
|
||||
@ -8082,14 +8082,14 @@ void http_end_txn(struct stream *s)
|
||||
if (s->req_cap) {
|
||||
struct cap_hdr *h;
|
||||
for (h = fe->req_cap; h; h = h->next)
|
||||
pool_free2(h->pool, s->req_cap[h->index]);
|
||||
pool_free(h->pool, s->req_cap[h->index]);
|
||||
memset(s->req_cap, 0, fe->nb_req_cap * sizeof(void *));
|
||||
}
|
||||
|
||||
if (s->res_cap) {
|
||||
struct cap_hdr *h;
|
||||
for (h = fe->rsp_cap; h; h = h->next)
|
||||
pool_free2(h->pool, s->res_cap[h->index]);
|
||||
pool_free(h->pool, s->res_cap[h->index]);
|
||||
memset(s->res_cap, 0, fe->nb_rsp_cap * sizeof(void *));
|
||||
}
|
||||
|
||||
@ -9550,7 +9550,7 @@ smp_fetch_uniqueid(const struct arg *args, struct sample *smp, const char *kw, v
|
||||
return 0;
|
||||
|
||||
if (!smp->strm->unique_id) {
|
||||
if ((smp->strm->unique_id = pool_alloc2(pool2_uniqueid)) == NULL)
|
||||
if ((smp->strm->unique_id = pool_alloc(pool_head_uniqueid)) == NULL)
|
||||
return 0;
|
||||
smp->strm->unique_id[0] = '\0';
|
||||
}
|
||||
@ -11592,7 +11592,7 @@ static int smp_conv_req_capture(const struct arg *args, struct sample *smp, void
|
||||
|
||||
/* check for the memory allocation */
|
||||
if (smp->strm->req_cap[hdr->index] == NULL)
|
||||
smp->strm->req_cap[hdr->index] = pool_alloc2(hdr->pool);
|
||||
smp->strm->req_cap[hdr->index] = pool_alloc(hdr->pool);
|
||||
if (smp->strm->req_cap[hdr->index] == NULL)
|
||||
return 0;
|
||||
|
||||
@ -11633,7 +11633,7 @@ static int smp_conv_res_capture(const struct arg *args, struct sample *smp, void
|
||||
|
||||
/* check for the memory allocation */
|
||||
if (smp->strm->res_cap[hdr->index] == NULL)
|
||||
smp->strm->res_cap[hdr->index] = pool_alloc2(hdr->pool);
|
||||
smp->strm->res_cap[hdr->index] = pool_alloc(hdr->pool);
|
||||
if (smp->strm->res_cap[hdr->index] == NULL)
|
||||
return 0;
|
||||
|
||||
@ -11981,7 +11981,7 @@ enum act_return http_action_req_capture(struct act_rule *rule, struct proxy *px,
|
||||
return ACT_RET_CONT;
|
||||
|
||||
if (cap[h->index] == NULL)
|
||||
cap[h->index] = pool_alloc2(h->pool);
|
||||
cap[h->index] = pool_alloc(h->pool);
|
||||
|
||||
if (cap[h->index] == NULL) /* no more capture memory */
|
||||
return ACT_RET_CONT;
|
||||
@ -12022,7 +12022,7 @@ enum act_return http_action_req_capture_by_id(struct act_rule *rule, struct prox
|
||||
return ACT_RET_CONT;
|
||||
|
||||
if (cap[h->index] == NULL)
|
||||
cap[h->index] = pool_alloc2(h->pool);
|
||||
cap[h->index] = pool_alloc(h->pool);
|
||||
|
||||
if (cap[h->index] == NULL) /* no more capture memory */
|
||||
return ACT_RET_CONT;
|
||||
@ -12208,7 +12208,7 @@ enum act_return http_action_res_capture_by_id(struct act_rule *rule, struct prox
|
||||
return ACT_RET_CONT;
|
||||
|
||||
if (cap[h->index] == NULL)
|
||||
cap[h->index] = pool_alloc2(h->pool);
|
||||
cap[h->index] = pool_alloc(h->pool);
|
||||
|
||||
if (cap[h->index] == NULL) /* no more capture memory */
|
||||
return ACT_RET_CONT;
|
||||
|
@ -855,7 +855,7 @@ struct task *manage_proxy(struct task *t)
|
||||
p->id, p->fe_counters.cum_conn, p->be_counters.cum_conn);
|
||||
stop_proxy(p);
|
||||
/* try to free more memory */
|
||||
pool_gc2(NULL);
|
||||
pool_gc(NULL);
|
||||
}
|
||||
else {
|
||||
next = tick_first(next, p->stop_time);
|
||||
@ -872,7 +872,7 @@ struct task *manage_proxy(struct task *t)
|
||||
if (unlikely(stopping && p->state == PR_STSTOPPED && p->table.current)) {
|
||||
if (!p->table.syncing) {
|
||||
stktable_trash_oldest(&p->table, p->table.current);
|
||||
pool_gc2(NULL);
|
||||
pool_gc(NULL);
|
||||
}
|
||||
if (p->table.current) {
|
||||
/* some entries still remain, let's recheck in one second */
|
||||
|
12
src/queue.c
12
src/queue.c
@ -22,15 +22,15 @@
|
||||
#include <proto/task.h>
|
||||
|
||||
|
||||
struct pool_head *pool2_pendconn;
|
||||
struct pool_head *pool_head_pendconn;
|
||||
|
||||
static void __pendconn_free(struct pendconn *p);
|
||||
|
||||
/* perform minimal intializations, report 0 in case of error, 1 if OK. */
|
||||
int init_pendconn()
|
||||
{
|
||||
pool2_pendconn = create_pool("pendconn", sizeof(struct pendconn), MEM_F_SHARED);
|
||||
return pool2_pendconn != NULL;
|
||||
pool_head_pendconn = create_pool("pendconn", sizeof(struct pendconn), MEM_F_SHARED);
|
||||
return pool_head_pendconn != NULL;
|
||||
}
|
||||
|
||||
/* returns the effective dynamic maxconn for a server, considering the minconn
|
||||
@ -172,7 +172,7 @@ struct pendconn *pendconn_add(struct stream *strm)
|
||||
struct server *srv;
|
||||
int count;
|
||||
|
||||
p = pool_alloc2(pool2_pendconn);
|
||||
p = pool_alloc(pool_head_pendconn);
|
||||
if (!p)
|
||||
return NULL;
|
||||
|
||||
@ -281,7 +281,7 @@ void pendconn_free(struct pendconn *p)
|
||||
}
|
||||
p->strm->pend_pos = NULL;
|
||||
HA_ATOMIC_SUB(&p->strm->be->totpend, 1);
|
||||
pool_free2(pool2_pendconn, p);
|
||||
pool_free(pool_head_pendconn, p);
|
||||
}
|
||||
|
||||
/* Lock-free version of pendconn_free. */
|
||||
@ -297,7 +297,7 @@ static void __pendconn_free(struct pendconn *p)
|
||||
}
|
||||
p->strm->pend_pos = NULL;
|
||||
HA_ATOMIC_SUB(&p->strm->be->totpend, 1);
|
||||
pool_free2(pool2_pendconn, p);
|
||||
pool_free(pool_head_pendconn, p);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -28,7 +28,7 @@
|
||||
#include <proto/tcp_rules.h>
|
||||
#include <proto/vars.h>
|
||||
|
||||
struct pool_head *pool2_session;
|
||||
struct pool_head *pool_head_session;
|
||||
|
||||
static int conn_complete_session(struct connection *conn);
|
||||
static struct task *session_expire_embryonic(struct task *t);
|
||||
@ -42,7 +42,7 @@ struct session *session_new(struct proxy *fe, struct listener *li, enum obj_type
|
||||
{
|
||||
struct session *sess;
|
||||
|
||||
sess = pool_alloc2(pool2_session);
|
||||
sess = pool_alloc(pool_head_session);
|
||||
if (sess) {
|
||||
sess->listener = li;
|
||||
sess->fe = fe;
|
||||
@ -69,7 +69,7 @@ void session_free(struct session *sess)
|
||||
listener_release(sess->listener);
|
||||
session_store_counters(sess);
|
||||
vars_prune_per_sess(&sess->vars);
|
||||
pool_free2(pool2_session, sess);
|
||||
pool_free(pool_head_session, sess);
|
||||
HA_ATOMIC_SUB(&jobs, 1);
|
||||
}
|
||||
|
||||
@ -84,8 +84,8 @@ void conn_session_free(struct connection *conn)
|
||||
/* perform minimal intializations, report 0 in case of error, 1 if OK. */
|
||||
int init_session()
|
||||
{
|
||||
pool2_session = create_pool("session", sizeof(struct session), MEM_F_SHARED);
|
||||
return pool2_session != NULL;
|
||||
pool_head_session = create_pool("session", sizeof(struct session), MEM_F_SHARED);
|
||||
return pool_head_session != NULL;
|
||||
}
|
||||
|
||||
/* count a new session to keep frontend, listener and track stats up to date */
|
||||
|
16
src/signal.c
16
src/signal.c
@ -27,7 +27,7 @@
|
||||
int signal_queue_len; /* length of signal queue, <= MAX_SIGNAL (1 entry per signal max) */
|
||||
int signal_queue[MAX_SIGNAL]; /* in-order queue of received signals */
|
||||
struct signal_descriptor signal_state[MAX_SIGNAL];
|
||||
struct pool_head *pool2_sig_handlers = NULL;
|
||||
struct pool_head *pool_head_sig_handlers = NULL;
|
||||
sigset_t blocked_sig;
|
||||
int signal_pending = 0; /* non-zero if t least one signal remains unprocessed */
|
||||
|
||||
@ -130,8 +130,8 @@ int signal_init()
|
||||
for (sig = 0; sig < MAX_SIGNAL; sig++)
|
||||
LIST_INIT(&signal_state[sig].handlers);
|
||||
|
||||
pool2_sig_handlers = create_pool("sig_handlers", sizeof(struct sig_handler), MEM_F_SHARED);
|
||||
return pool2_sig_handlers != NULL;
|
||||
pool_head_sig_handlers = create_pool("sig_handlers", sizeof(struct sig_handler), MEM_F_SHARED);
|
||||
return pool_head_sig_handlers != NULL;
|
||||
}
|
||||
|
||||
/* releases all registered signal handlers */
|
||||
@ -145,7 +145,7 @@ void deinit_signals()
|
||||
signal(sig, SIG_DFL);
|
||||
list_for_each_entry_safe(sh, shb, &signal_state[sig].handlers, list) {
|
||||
LIST_DEL(&sh->list);
|
||||
pool_free2(pool2_sig_handlers, sh);
|
||||
pool_free(pool_head_sig_handlers, sh);
|
||||
}
|
||||
}
|
||||
HA_SPIN_DESTROY(&signals_lock);
|
||||
@ -172,7 +172,7 @@ struct sig_handler *signal_register_fct(int sig, void (*fct)(struct sig_handler
|
||||
if (!fct)
|
||||
return NULL;
|
||||
|
||||
sh = pool_alloc2(pool2_sig_handlers);
|
||||
sh = pool_alloc(pool_head_sig_handlers);
|
||||
if (!sh)
|
||||
return NULL;
|
||||
|
||||
@ -204,7 +204,7 @@ struct sig_handler *signal_register_task(int sig, struct task *task, int reason)
|
||||
if (!task)
|
||||
return NULL;
|
||||
|
||||
sh = pool_alloc2(pool2_sig_handlers);
|
||||
sh = pool_alloc(pool_head_sig_handlers);
|
||||
if (!sh)
|
||||
return NULL;
|
||||
|
||||
@ -221,7 +221,7 @@ struct sig_handler *signal_register_task(int sig, struct task *task, int reason)
|
||||
void signal_unregister_handler(struct sig_handler *handler)
|
||||
{
|
||||
LIST_DEL(&handler->list);
|
||||
pool_free2(pool2_sig_handlers, handler);
|
||||
pool_free(pool_head_sig_handlers, handler);
|
||||
}
|
||||
|
||||
/* Immediately unregister a handler so that no further signals may be delivered
|
||||
@ -243,7 +243,7 @@ void signal_unregister_target(int sig, void *target)
|
||||
list_for_each_entry_safe(sh, shb, &signal_state[sig].handlers, list) {
|
||||
if (sh->handler == target) {
|
||||
LIST_DEL(&sh->list);
|
||||
pool_free2(pool2_sig_handlers, sh);
|
||||
pool_free(pool_head_sig_handlers, sh);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -258,7 +258,7 @@ struct ssl_capture {
|
||||
unsigned char ciphersuite_len;
|
||||
char ciphersuite[0];
|
||||
};
|
||||
struct pool_head *pool2_ssl_capture = NULL;
|
||||
struct pool_head *pool_head_ssl_capture = NULL;
|
||||
static int ssl_capture_ptr_index = -1;
|
||||
|
||||
#if (defined SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB && TLS_TICKETS_NO > 0)
|
||||
@ -1536,7 +1536,7 @@ void ssl_sock_parse_clienthello(int write_p, int version, int content_type,
|
||||
if (msg + rec_len > end || msg + rec_len < msg)
|
||||
return;
|
||||
|
||||
capture = pool_alloc_dirty(pool2_ssl_capture);
|
||||
capture = pool_alloc_dirty(pool_head_ssl_capture);
|
||||
if (!capture)
|
||||
return;
|
||||
/* Compute the xxh64 of the ciphersuite. */
|
||||
@ -4906,7 +4906,7 @@ static int ssl_sock_init(struct connection *conn)
|
||||
conn->xprt_ctx = SSL_new(objt_server(conn->target)->ssl_ctx.ctx);
|
||||
if (!conn->xprt_ctx) {
|
||||
if (may_retry--) {
|
||||
pool_gc2(NULL);
|
||||
pool_gc(NULL);
|
||||
goto retry_connect;
|
||||
}
|
||||
conn->err_code = CO_ER_SSL_NO_MEM;
|
||||
@ -4918,7 +4918,7 @@ static int ssl_sock_init(struct connection *conn)
|
||||
SSL_free(conn->xprt_ctx);
|
||||
conn->xprt_ctx = NULL;
|
||||
if (may_retry--) {
|
||||
pool_gc2(NULL);
|
||||
pool_gc(NULL);
|
||||
goto retry_connect;
|
||||
}
|
||||
conn->err_code = CO_ER_SSL_NO_MEM;
|
||||
@ -4930,7 +4930,7 @@ static int ssl_sock_init(struct connection *conn)
|
||||
SSL_free(conn->xprt_ctx);
|
||||
conn->xprt_ctx = NULL;
|
||||
if (may_retry--) {
|
||||
pool_gc2(NULL);
|
||||
pool_gc(NULL);
|
||||
goto retry_connect;
|
||||
}
|
||||
conn->err_code = CO_ER_SSL_NO_MEM;
|
||||
@ -4965,7 +4965,7 @@ static int ssl_sock_init(struct connection *conn)
|
||||
conn->xprt_ctx = SSL_new(objt_listener(conn->target)->bind_conf->initial_ctx);
|
||||
if (!conn->xprt_ctx) {
|
||||
if (may_retry--) {
|
||||
pool_gc2(NULL);
|
||||
pool_gc(NULL);
|
||||
goto retry_accept;
|
||||
}
|
||||
conn->err_code = CO_ER_SSL_NO_MEM;
|
||||
@ -4977,7 +4977,7 @@ static int ssl_sock_init(struct connection *conn)
|
||||
SSL_free(conn->xprt_ctx);
|
||||
conn->xprt_ctx = NULL;
|
||||
if (may_retry--) {
|
||||
pool_gc2(NULL);
|
||||
pool_gc(NULL);
|
||||
goto retry_accept;
|
||||
}
|
||||
conn->err_code = CO_ER_SSL_NO_MEM;
|
||||
@ -4989,7 +4989,7 @@ static int ssl_sock_init(struct connection *conn)
|
||||
SSL_free(conn->xprt_ctx);
|
||||
conn->xprt_ctx = NULL;
|
||||
if (may_retry--) {
|
||||
pool_gc2(NULL);
|
||||
pool_gc(NULL);
|
||||
goto retry_accept;
|
||||
}
|
||||
conn->err_code = CO_ER_SSL_NO_MEM;
|
||||
@ -8113,13 +8113,13 @@ static int ssl_parse_global_capture_cipherlist(char **args, int section_type, st
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
if (pool2_ssl_capture) {
|
||||
if (pool_head_ssl_capture) {
|
||||
memprintf(err, "'%s' is already configured.", args[0]);
|
||||
return -1;
|
||||
}
|
||||
|
||||
pool2_ssl_capture = create_pool("ssl-capture", sizeof(struct ssl_capture) + global_ssl.capture_cipherlist, MEM_F_SHARED);
|
||||
if (!pool2_ssl_capture) {
|
||||
pool_head_ssl_capture = create_pool("ssl-capture", sizeof(struct ssl_capture) + global_ssl.capture_cipherlist, MEM_F_SHARED);
|
||||
if (!pool_head_ssl_capture) {
|
||||
memprintf(err, "Out of memory error.");
|
||||
return -1;
|
||||
}
|
||||
@ -8742,7 +8742,7 @@ static void ssl_sock_sctl_free_func(void *parent, void *ptr, CRYPTO_EX_DATA *ad,
|
||||
#endif
|
||||
static void ssl_sock_capture_free_func(void *parent, void *ptr, CRYPTO_EX_DATA *ad, int idx, long argl, void *argp)
|
||||
{
|
||||
pool_free2(pool2_ssl_capture, ptr);
|
||||
pool_free(pool_head_ssl_capture, ptr);
|
||||
}
|
||||
|
||||
__attribute__((constructor))
|
||||
|
@ -51,7 +51,7 @@ static THREAD_LOCAL struct stktable_key static_table_key;
|
||||
void __stksess_free(struct stktable *t, struct stksess *ts)
|
||||
{
|
||||
t->current--;
|
||||
pool_free2(t->pool, (void *)ts - t->data_size);
|
||||
pool_free(t->pool, (void *)ts - t->data_size);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -226,7 +226,7 @@ struct stksess *__stksess_new(struct stktable *t, struct stktable_key *key)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ts = pool_alloc2(t->pool);
|
||||
ts = pool_alloc(t->pool);
|
||||
if (ts) {
|
||||
t->current++;
|
||||
ts = (void *)ts + t->data_size;
|
||||
|
42
src/stream.c
42
src/stream.c
@ -60,7 +60,7 @@
|
||||
#include <proto/tcp_rules.h>
|
||||
#include <proto/vars.h>
|
||||
|
||||
struct pool_head *pool2_stream;
|
||||
struct pool_head *pool_head_stream;
|
||||
struct list streams;
|
||||
__decl_hathreads(HA_SPINLOCK_T streams_lock);
|
||||
|
||||
@ -100,7 +100,7 @@ struct stream *stream_new(struct session *sess, enum obj_type *origin)
|
||||
struct conn_stream *cs = objt_cs(origin);
|
||||
struct appctx *appctx = objt_appctx(origin);
|
||||
|
||||
if (unlikely((s = pool_alloc2(pool2_stream)) == NULL))
|
||||
if (unlikely((s = pool_alloc(pool_head_stream)) == NULL))
|
||||
goto out_fail_alloc;
|
||||
|
||||
/* minimum stream initialization required for an embryonic stream is
|
||||
@ -284,7 +284,7 @@ struct stream *stream_new(struct session *sess, enum obj_type *origin)
|
||||
task_free(t);
|
||||
out_fail_alloc:
|
||||
LIST_DEL(&s->list);
|
||||
pool_free2(pool2_stream, s);
|
||||
pool_free(pool_head_stream, s);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -355,8 +355,8 @@ static void stream_free(struct stream *s)
|
||||
}
|
||||
|
||||
if (s->txn) {
|
||||
pool_free2(pool2_hdr_idx, s->txn->hdr_idx.v);
|
||||
pool_free2(pool2_http_txn, s->txn);
|
||||
pool_free(pool_head_hdr_idx, s->txn->hdr_idx.v);
|
||||
pool_free(pool_head_http_txn, s->txn);
|
||||
s->txn = NULL;
|
||||
}
|
||||
|
||||
@ -364,8 +364,8 @@ static void stream_free(struct stream *s)
|
||||
flt_stream_release(s, 0);
|
||||
|
||||
if (fe) {
|
||||
pool_free2(fe->rsp_cap_pool, s->res_cap);
|
||||
pool_free2(fe->req_cap_pool, s->req_cap);
|
||||
pool_free(fe->rsp_cap_pool, s->res_cap);
|
||||
pool_free(fe->req_cap_pool, s->req_cap);
|
||||
}
|
||||
|
||||
/* Cleanup all variable contexts. */
|
||||
@ -394,21 +394,21 @@ static void stream_free(struct stream *s)
|
||||
/* FIXME: for now we have a 1:1 relation between stream and session so
|
||||
* the stream must free the session.
|
||||
*/
|
||||
pool_free2(pool2_stream, s);
|
||||
pool_free(pool_head_stream, s);
|
||||
|
||||
/* We may want to free the maximum amount of pools if the proxy is stopping */
|
||||
if (fe && unlikely(fe->state == PR_STSTOPPED)) {
|
||||
pool_flush2(pool2_buffer);
|
||||
pool_flush2(pool2_http_txn);
|
||||
pool_flush2(pool2_hdr_idx);
|
||||
pool_flush2(pool2_requri);
|
||||
pool_flush2(pool2_capture);
|
||||
pool_flush2(pool2_stream);
|
||||
pool_flush2(pool2_session);
|
||||
pool_flush2(pool2_connection);
|
||||
pool_flush2(pool2_pendconn);
|
||||
pool_flush2(fe->req_cap_pool);
|
||||
pool_flush2(fe->rsp_cap_pool);
|
||||
pool_flush(pool_head_buffer);
|
||||
pool_flush(pool_head_http_txn);
|
||||
pool_flush(pool_head_hdr_idx);
|
||||
pool_flush(pool_head_requri);
|
||||
pool_flush(pool_head_capture);
|
||||
pool_flush(pool_head_stream);
|
||||
pool_flush(pool_head_session);
|
||||
pool_flush(pool_head_connection);
|
||||
pool_flush(pool_head_pendconn);
|
||||
pool_flush(fe->req_cap_pool);
|
||||
pool_flush(fe->rsp_cap_pool);
|
||||
}
|
||||
}
|
||||
|
||||
@ -470,8 +470,8 @@ int init_stream()
|
||||
{
|
||||
LIST_INIT(&streams);
|
||||
HA_SPIN_INIT(&streams_lock);
|
||||
pool2_stream = create_pool("stream", sizeof(struct stream), MEM_F_SHARED);
|
||||
return pool2_stream != NULL;
|
||||
pool_head_stream = create_pool("stream", sizeof(struct stream), MEM_F_SHARED);
|
||||
return pool_head_stream != NULL;
|
||||
}
|
||||
|
||||
void stream_process_counters(struct stream *s)
|
||||
|
12
src/task.c
12
src/task.c
@ -24,12 +24,12 @@
|
||||
#include <proto/stream.h>
|
||||
#include <proto/task.h>
|
||||
|
||||
struct pool_head *pool2_task;
|
||||
struct pool_head *pool_head_task;
|
||||
|
||||
/* This is the memory pool containing all the signal structs. These
|
||||
* struct are used to store each requiered signal between two tasks.
|
||||
*/
|
||||
struct pool_head *pool2_notification;
|
||||
struct pool_head *pool_head_notification;
|
||||
|
||||
unsigned int nb_tasks = 0;
|
||||
unsigned long active_tasks_mask = 0; /* Mask of threads with active tasks */
|
||||
@ -342,11 +342,11 @@ int init_task()
|
||||
memset(&rqueue, 0, sizeof(rqueue));
|
||||
HA_SPIN_INIT(&wq_lock);
|
||||
HA_SPIN_INIT(&rq_lock);
|
||||
pool2_task = create_pool("task", sizeof(struct task), MEM_F_SHARED);
|
||||
if (!pool2_task)
|
||||
pool_head_task = create_pool("task", sizeof(struct task), MEM_F_SHARED);
|
||||
if (!pool_head_task)
|
||||
return 0;
|
||||
pool2_notification = create_pool("notification", sizeof(struct notification), MEM_F_SHARED);
|
||||
if (!pool2_notification)
|
||||
pool_head_notification = create_pool("notification", sizeof(struct notification), MEM_F_SHARED);
|
||||
if (!pool_head_notification)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
@ -214,7 +214,7 @@ resume_execution:
|
||||
goto missing_data;
|
||||
|
||||
if (cap[h->index] == NULL)
|
||||
cap[h->index] = pool_alloc2(h->pool);
|
||||
cap[h->index] = pool_alloc(h->pool);
|
||||
|
||||
if (cap[h->index] == NULL) /* no more capture memory */
|
||||
continue;
|
||||
|
@ -103,7 +103,7 @@ unsigned int var_clear(struct var *var)
|
||||
size += var->data.u.meth.str.len;
|
||||
}
|
||||
LIST_DEL(&var->l);
|
||||
pool_free2(var_pool, var);
|
||||
pool_free(var_pool, var);
|
||||
size += sizeof(struct var);
|
||||
return size;
|
||||
}
|
||||
@ -359,7 +359,7 @@ static int sample_store(struct vars *vars, const char *name, struct sample *smp)
|
||||
return 0;
|
||||
|
||||
/* Create new entry. */
|
||||
var = pool_alloc2(var_pool);
|
||||
var = pool_alloc(var_pool);
|
||||
if (!var)
|
||||
return 0;
|
||||
LIST_ADDQ(&vars->head, &var->l);
|
||||
|
Loading…
x
Reference in New Issue
Block a user