MAJOR: channel: replace the struct buffer with a pointer to a buffer

With this commit, we now separate the channel from the buffer. This will
allow us to replace buffers on the fly without touching the channel. Since
nobody is supposed to keep a reference to a buffer anymore, doing so is not
a problem and will also permit some copy-less data manipulation.

Interestingly, these changes have shown a 2% performance increase on some
workloads, probably due to a better cache placement of data.
This commit is contained in:
Willy Tarreau 2012-10-12 23:49:43 +02:00
parent f332af7715
commit 9b28e03b66
14 changed files with 353 additions and 313 deletions

View File

@ -28,6 +28,7 @@
#include <common/chunk.h>
#include <common/config.h>
#include <common/memory.h>
struct buffer {
@ -38,7 +39,9 @@ struct buffer {
char data[0]; /* <size> bytes */
};
extern struct pool_head *pool2_buffer;
int init_buffer();
int buffer_replace2(struct buffer *b, char *pos, char *end, const char *str, int len);
int buffer_insert_line2(struct buffer *b, char *pos, const char *str, int len);
void buffer_dump(FILE *o, struct buffer *b, int from, int to);

View File

@ -51,9 +51,9 @@ int bo_getblk(struct channel *chn, char *blk, int len, int offset);
/* Initialize all fields in the channel. */
static inline void channel_init(struct channel *chn)
{
chn->buf.o = 0;
chn->buf.i = 0;
chn->buf.p = chn->buf.data;
chn->buf->o = 0;
chn->buf->i = 0;
chn->buf->p = chn->buf->data;
chn->to_forward = 0;
chn->total = 0;
chn->pipe = NULL;
@ -73,7 +73,7 @@ static inline void channel_init(struct channel *chn)
*/
static inline unsigned int channel_is_empty(struct channel *c)
{
return !(c->buf.o | (long)c->pipe);
return !(c->buf->o | (long)c->pipe);
}
/* Returns non-zero if the buffer input is considered full. The reserved space
@ -83,20 +83,20 @@ static inline unsigned int channel_is_empty(struct channel *c)
*/
static inline int channel_full(const struct channel *chn)
{
int rem = chn->buf.size;
int rem = chn->buf->size;
rem -= chn->buf.o;
rem -= chn->buf.i;
rem -= chn->buf->o;
rem -= chn->buf->i;
if (!rem)
return 1; /* buffer already full */
if (chn->to_forward >= chn->buf.size ||
(CHN_INFINITE_FORWARD < MAX_RANGE(typeof(chn->buf.size)) && // just there to ensure gcc
if (chn->to_forward >= chn->buf->size ||
(CHN_INFINITE_FORWARD < MAX_RANGE(typeof(chn->buf->size)) && // just there to ensure gcc
chn->to_forward == CHN_INFINITE_FORWARD)) // avoids the useless second
return 0; // test whenever possible
rem -= global.tune.maxrewrite;
rem += chn->buf.o;
rem += chn->buf->o;
rem += chn->to_forward;
return rem <= 0;
}
@ -139,10 +139,10 @@ static inline void channel_check_timeouts(struct channel *chn)
*/
static inline void channel_erase(struct channel *chn)
{
chn->buf.o = 0;
chn->buf.i = 0;
chn->buf->o = 0;
chn->buf->i = 0;
chn->to_forward = 0;
chn->buf.p = chn->buf.data;
chn->buf->p = chn->buf->data;
}
/* marks the channel as "shutdown" ASAP for reads */
@ -236,7 +236,7 @@ static inline void channel_dont_read(struct channel *chn)
*/
static inline int buffer_reserved(const struct channel *chn)
{
int ret = global.tune.maxrewrite - chn->to_forward - chn->buf.o;
int ret = global.tune.maxrewrite - chn->to_forward - chn->buf->o;
if (chn->to_forward == CHN_INFINITE_FORWARD)
return 0;
@ -251,7 +251,7 @@ static inline int buffer_reserved(const struct channel *chn)
*/
static inline int buffer_max_len(const struct channel *chn)
{
return chn->buf.size - buffer_reserved(chn);
return chn->buf->size - buffer_reserved(chn);
}
/* Return the amount of bytes that can be written into the buffer at once,
@ -259,7 +259,7 @@ static inline int buffer_max_len(const struct channel *chn)
*/
static inline int buffer_contig_space_res(const struct channel *chn)
{
return buffer_contig_space_with_res(&chn->buf, buffer_reserved(chn));
return buffer_contig_space_with_res(chn->buf, buffer_reserved(chn));
}
/* Returns the amount of space available at the input of the buffer, taking the
@ -269,21 +269,21 @@ static inline int buffer_contig_space_res(const struct channel *chn)
*/
static inline int bi_avail(const struct channel *chn)
{
int rem = chn->buf.size;
int rem = chn->buf->size;
int rem2;
rem -= chn->buf.o;
rem -= chn->buf.i;
rem -= chn->buf->o;
rem -= chn->buf->i;
if (!rem)
return rem; /* buffer already full */
if (chn->to_forward >= chn->buf.size ||
(CHN_INFINITE_FORWARD < MAX_RANGE(typeof(chn->buf.size)) && // just there to ensure gcc
if (chn->to_forward >= chn->buf->size ||
(CHN_INFINITE_FORWARD < MAX_RANGE(typeof(chn->buf->size)) && // just there to ensure gcc
chn->to_forward == CHN_INFINITE_FORWARD)) // avoids the useless second
return rem; // test whenever possible
rem2 = rem - global.tune.maxrewrite;
rem2 += chn->buf.o;
rem2 += chn->buf->o;
rem2 += chn->to_forward;
if (rem > rem2)
@ -300,14 +300,14 @@ static inline int bi_avail(const struct channel *chn)
*/
static inline void bi_erase(struct channel *chn)
{
if (!chn->buf.o)
if (!chn->buf->o)
return channel_erase(chn);
chn->to_forward = 0;
if (!chn->buf.i)
if (!chn->buf->i)
return;
chn->buf.i = 0;
chn->buf->i = 0;
}
/*
@ -319,10 +319,10 @@ static inline void bi_erase(struct channel *chn)
*/
static inline void bo_skip(struct channel *chn, int len)
{
chn->buf.o -= len;
chn->buf->o -= len;
if (buffer_len(&chn->buf) == 0)
chn->buf.p = chn->buf.data;
if (buffer_len(chn->buf) == 0)
chn->buf->p = chn->buf->data;
/* notify that some data was written to the SI from the buffer */
chn->flags |= CF_WRITE_PARTIAL;
@ -374,7 +374,7 @@ static inline int bo_getchr(struct channel *chn)
return -2;
return -1;
}
return *buffer_wrap_sub(&chn->buf, chn->buf.p - chn->buf.o);
return *buffer_wrap_sub(chn->buf, chn->buf->p - chn->buf->o);
}

View File

@ -186,7 +186,7 @@ struct channel {
struct stream_interface *prod; /* producer attached to this channel */
struct stream_interface *cons; /* consumer attached to this channel */
struct pipe *pipe; /* non-NULL only when data present */
struct buffer buf; /* embedded buffer for now, will move */
struct buffer *buf; /* buffer attached to the channel, always present but may move */
};

View File

@ -108,7 +108,7 @@ acl_fetch_req_len(struct proxy *px, struct session *l4, void *l7, unsigned int o
return 0;
smp->type = SMP_T_UINT;
smp->data.uint = l4->req->buf.i;
smp->data.uint = l4->req->buf->i;
smp->flags = SMP_F_VOLATILE | SMP_F_MAY_CHANGE;
return 1;
}
@ -128,8 +128,8 @@ acl_fetch_ssl_hello_type(struct proxy *px, struct session *l4, void *l7, unsigne
chn = ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? l4->rep : l4->req;
bleft = chn->buf.i;
data = (const unsigned char *)chn->buf.p;
bleft = chn->buf->i;
data = (const unsigned char *)chn->buf->p;
if (!bleft)
goto too_short;
@ -194,11 +194,11 @@ acl_fetch_req_ssl_ver(struct proxy *px, struct session *l4, void *l7, unsigned i
return 0;
msg_len = 0;
bleft = l4->req->buf.i;
bleft = l4->req->buf->i;
if (!bleft)
goto too_short;
data = (const unsigned char *)l4->req->buf.p;
data = (const unsigned char *)l4->req->buf->p;
if ((*data >= 0x14 && *data <= 0x17) || (*data == 0xFF)) {
/* SSLv3 header format */
if (bleft < 5)
@ -266,8 +266,8 @@ acl_fetch_req_ssl_ver(struct proxy *px, struct session *l4, void *l7, unsigned i
* all the part of the request which fits in a buffer is already
* there.
*/
if (msg_len > buffer_max_len(l4->req) + l4->req->buf.data - l4->req->buf.p)
msg_len = buffer_max_len(l4->req) + l4->req->buf.data - l4->req->buf.p;
if (msg_len > buffer_max_len(l4->req) + l4->req->buf->data - l4->req->buf->p)
msg_len = buffer_max_len(l4->req) + l4->req->buf->data - l4->req->buf->p;
if (bleft < msg_len)
goto too_short;
@ -332,8 +332,8 @@ acl_fetch_ssl_hello_sni(struct proxy *px, struct session *l4, void *l7, unsigned
chn = ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? l4->rep : l4->req;
bleft = chn->buf.i;
data = (unsigned char *)chn->buf.p;
bleft = chn->buf->i;
data = (unsigned char *)chn->buf->p;
/* Check for SSL/TLS Handshake */
if (!bleft)

View File

@ -258,11 +258,11 @@ struct server *get_server_ph_post(struct session *s)
struct proxy *px = s->be;
unsigned int plen = px->url_param_len;
unsigned long len = msg->body_len;
const char *params = b_ptr(&req->buf, (int)(msg->sov - req->buf.o));
const char *params = b_ptr(req->buf, (int)(msg->sov - req->buf->o));
const char *p = params;
if (len > buffer_len(&req->buf) - msg->sov)
len = buffer_len(&req->buf) - msg->sov;
if (len > buffer_len(req->buf) - msg->sov)
len = buffer_len(req->buf) - msg->sov;
if (len == 0)
return NULL;
@ -343,7 +343,7 @@ struct server *get_server_hh(struct session *s)
ctx.idx = 0;
/* if the message is chunked, we skip the chunk size, but use the value as len */
http_find_header2(px->hh_name, plen, b_ptr(&s->req->buf, (int)-s->req->buf.o), &txn->hdr_idx, &ctx);
http_find_header2(px->hh_name, plen, b_ptr(s->req->buf, (int)-s->req->buf->o), &txn->hdr_idx, &ctx);
/* if the header is not found or empty, let's fallback to round robin */
if (!ctx.idx || !ctx.vlen)
@ -419,12 +419,12 @@ struct server *get_server_rch(struct session *s)
args[0].data.str.len = px->hh_len;
args[1].type = ARGT_STOP;
b_rew(&s->req->buf, rewind = s->req->buf.o);
b_rew(s->req->buf, rewind = s->req->buf->o);
ret = smp_fetch_rdp_cookie(px, s, NULL, SMP_OPT_DIR_REQ|SMP_OPT_FINAL, args, &smp);
len = smp.data.str.len;
b_adv(&s->req->buf, rewind);
b_adv(s->req->buf, rewind);
if (ret == 0 || (smp.flags & SMP_F_MAY_CHANGE) || len == 0)
return NULL;
@ -569,7 +569,7 @@ int assign_server(struct session *s)
if (s->txn.req.msg_state < HTTP_MSG_BODY)
break;
srv = get_server_uh(s->be,
b_ptr(&s->req->buf, (int)(s->txn.req.sl.rq.u - s->req->buf.o)),
b_ptr(s->req->buf, (int)(s->txn.req.sl.rq.u - s->req->buf->o)),
s->txn.req.sl.rq.u_l);
break;
@ -579,7 +579,7 @@ int assign_server(struct session *s)
break;
srv = get_server_ph(s->be,
b_ptr(&s->req->buf, (int)(s->txn.req.sl.rq.u - s->req->buf.o)),
b_ptr(s->req->buf, (int)(s->txn.req.sl.rq.u - s->req->buf->o)),
s->txn.req.sl.rq.u_l);
if (!srv && s->txn.meth == HTTP_METH_POST)
@ -905,13 +905,13 @@ static void assign_tproxy_address(struct session *s)
((struct sockaddr_in *)&s->req->cons->conn.addr.from)->sin_port = 0;
((struct sockaddr_in *)&s->req->cons->conn.addr.from)->sin_addr.s_addr = 0;
b_rew(&s->req->buf, rewind = s->req->buf.o);
b_rew(s->req->buf, rewind = s->req->buf->o);
if (http_get_hdr(&s->txn.req, srv->bind_hdr_name, srv->bind_hdr_len,
&s->txn.hdr_idx, srv->bind_hdr_occ, NULL, &vptr, &vlen)) {
((struct sockaddr_in *)&s->req->cons->conn.addr.from)->sin_addr.s_addr =
htonl(inetaddr_host_lim(vptr, vptr + vlen));
}
b_adv(&s->req->buf, rewind);
b_adv(s->req->buf, rewind);
}
break;
default:
@ -939,13 +939,13 @@ static void assign_tproxy_address(struct session *s)
((struct sockaddr_in *)&s->req->cons->conn.addr.from)->sin_port = 0;
((struct sockaddr_in *)&s->req->cons->conn.addr.from)->sin_addr.s_addr = 0;
b_rew(&s->req->buf, rewind = s->req->buf.o);
b_rew(s->req->buf, rewind = s->req->buf->o);
if (http_get_hdr(&s->txn.req, s->be->bind_hdr_name, s->be->bind_hdr_len,
&s->txn.hdr_idx, s->be->bind_hdr_occ, NULL, &vptr, &vlen)) {
((struct sockaddr_in *)&s->req->cons->conn.addr.from)->sin_addr.s_addr =
htonl(inetaddr_host_lim(vptr, vptr + vlen));
}
b_adv(&s->req->buf, rewind);
b_adv(s->req->buf, rewind);
}
break;
default:
@ -1144,7 +1144,7 @@ int tcp_persist_rdp_cookie(struct session *s, struct channel *req, int an_bit)
req,
req->rex, req->wex,
req->flags,
req->buf.i,
req->buf->i,
req->analysers);
if (s->flags & SN_ASSIGNED)

View File

@ -15,9 +15,20 @@
#include <common/config.h>
#include <common/buffer.h>
#include <common/memory.h>
#include <types/global.h>
struct pool_head *pool2_buffer;
/* perform minimal intializations, report 0 in case of error, 1 if OK. */
int init_buffer()
{
pool2_buffer = create_pool("buffer", sizeof (struct buffer) + global.tune.bufsize, MEM_F_SHARED);
return pool2_buffer != NULL;
}
/* This function writes the string <str> at position <pos> which must be in
* buffer <b>, and moves <end> just after the end of <str>. <b>'s parameters
* <l> and <r> are updated to be valid after the shift. The shift value

View File

@ -18,9 +18,8 @@
#include <common/config.h>
#include <common/memory.h>
#include <common/buffer.h>
#include <proto/channel.h>
#include <types/global.h>
#include <proto/channel.h>
struct pool_head *pool2_channel;
@ -28,7 +27,7 @@ struct pool_head *pool2_channel;
/* perform minimal intializations, report 0 in case of error, 1 if OK. */
int init_channel()
{
pool2_channel = create_pool("channel", sizeof(struct channel) + global.tune.bufsize, MEM_F_SHARED);
pool2_channel = create_pool("channel", sizeof(struct channel), MEM_F_SHARED);
return pool2_channel != NULL;
}
@ -53,17 +52,17 @@ unsigned long long channel_forward(struct channel *chn, unsigned long long bytes
* once anyway.
*/
if (bytes <= ~0U) {
if (bytes32 <= chn->buf.i) {
if (bytes32 <= chn->buf->i) {
/* OK this amount of bytes might be forwarded at once */
if (!bytes32)
return 0;
b_adv(&chn->buf, bytes32);
b_adv(chn->buf, bytes32);
return bytes;
}
}
forwarded = chn->buf.i;
b_adv(&chn->buf, chn->buf.i);
forwarded = chn->buf->i;
b_adv(chn->buf, chn->buf->i);
/* Note: the case below is the only case where we may return
* a byte count that does not fit into a 32-bit number.
@ -105,7 +104,7 @@ int bo_inject(struct channel *chn, const char *msg, int len)
if (len == 0)
return -1;
if (len > chn->buf.size) {
if (len > chn->buf->size) {
/* we can't write this chunk and will never be able to, because
* it is larger than the buffer. This must be reported as an
* error. Then we return -2 so that writers that don't care can
@ -114,14 +113,14 @@ int bo_inject(struct channel *chn, const char *msg, int len)
return -2;
}
max = buffer_realign(&chn->buf);
max = buffer_realign(chn->buf);
if (len > max)
return max;
memcpy(chn->buf.p, msg, len);
chn->buf.o += len;
chn->buf.p = b_ptr(&chn->buf, len);
memcpy(chn->buf->p, msg, len);
chn->buf->o += len;
chn->buf->p = b_ptr(chn->buf, len);
chn->total += len;
return -1;
}
@ -140,15 +139,15 @@ int bi_putchr(struct channel *chn, char c)
if (channel_full(chn))
return -1;
*bi_end(&chn->buf) = c;
*bi_end(chn->buf) = c;
chn->buf.i++;
chn->buf->i++;
chn->flags |= CF_READ_PARTIAL;
if (chn->to_forward >= 1) {
if (chn->to_forward != CHN_INFINITE_FORWARD)
chn->to_forward--;
b_adv(&chn->buf, 1);
b_adv(chn->buf, 1);
}
chn->total++;
@ -171,7 +170,7 @@ int bi_putblk(struct channel *chn, const char *blk, int len)
return -2;
max = buffer_max_len(chn);
if (unlikely(len > max - buffer_len(&chn->buf))) {
if (unlikely(len > max - buffer_len(chn->buf))) {
/* we can't write this chunk right now because the buffer is
* almost full or because the block is too large. Return the
* available space or -2 if impossible.
@ -186,12 +185,12 @@ int bi_putblk(struct channel *chn, const char *blk, int len)
return 0;
/* OK so the data fits in the buffer in one or two blocks */
max = buffer_contig_space_with_res(&chn->buf, chn->buf.size - max);
memcpy(bi_end(&chn->buf), blk, MIN(len, max));
max = buffer_contig_space_with_res(chn->buf, chn->buf->size - max);
memcpy(bi_end(chn->buf), blk, MIN(len, max));
if (len > max)
memcpy(chn->buf.data, blk + max, len - max);
memcpy(chn->buf->data, blk + max, len - max);
chn->buf.i += len;
chn->buf->i += len;
chn->total += len;
if (chn->to_forward) {
unsigned long fwd = len;
@ -200,7 +199,7 @@ int bi_putblk(struct channel *chn, const char *blk, int len)
fwd = chn->to_forward;
chn->to_forward -= fwd;
}
b_adv(&chn->buf, fwd);
b_adv(chn->buf, fwd);
}
/* notify that some data was read from the SI into the buffer */
@ -233,10 +232,10 @@ int bo_getline(struct channel *chn, char *str, int len)
goto out;
}
p = bo_ptr(&chn->buf);
p = bo_ptr(chn->buf);
if (max > chn->buf.o) {
max = chn->buf.o;
if (max > chn->buf->o) {
max = chn->buf->o;
str[max-1] = 0;
}
while (max) {
@ -246,9 +245,9 @@ int bo_getline(struct channel *chn, char *str, int len)
if (*p == '\n')
break;
p = buffer_wrap_add(&chn->buf, p + 1);
p = buffer_wrap_add(chn->buf, p + 1);
}
if (ret > 0 && ret < len && ret < chn->buf.o &&
if (ret > 0 && ret < len && ret < chn->buf->o &&
*(str-1) != '\n' &&
!(chn->flags & (CF_SHUTW|CF_SHUTW_NOW)))
ret = 0;
@ -273,25 +272,25 @@ int bo_getblk(struct channel *chn, char *blk, int len, int offset)
if (chn->flags & CF_SHUTW)
return -1;
if (len + offset > chn->buf.o) {
if (len + offset > chn->buf->o) {
if (chn->flags & (CF_SHUTW|CF_SHUTW_NOW))
return -1;
return 0;
}
firstblock = chn->buf.data + chn->buf.size - bo_ptr(&chn->buf);
firstblock = chn->buf->data + chn->buf->size - bo_ptr(chn->buf);
if (firstblock > offset) {
if (firstblock >= len + offset) {
memcpy(blk, bo_ptr(&chn->buf) + offset, len);
memcpy(blk, bo_ptr(chn->buf) + offset, len);
return len;
}
memcpy(blk, bo_ptr(&chn->buf) + offset, firstblock - offset);
memcpy(blk + firstblock - offset, chn->buf.data, len - firstblock + offset);
memcpy(blk, bo_ptr(chn->buf) + offset, firstblock - offset);
memcpy(blk + firstblock - offset, chn->buf->data, len - firstblock + offset);
return len;
}
memcpy(blk, chn->buf.data + offset - firstblock, len);
memcpy(blk, chn->buf->data + offset - firstblock, len);
return len;
}

View File

@ -1462,7 +1462,7 @@ static void cli_io_handler(struct stream_interface *si)
/* ensure we have some output room left in the event we
* would want to return some info right after parsing.
*/
if (buffer_almost_full(&si->ib->buf))
if (buffer_almost_full(si->ib->buf))
break;
reql = bo_getline(si->ob, trash, trashlen);
@ -1576,7 +1576,7 @@ static void cli_io_handler(struct stream_interface *si)
* buffer is empty. This still allows pipelined requests
* to be sent in non-interactive mode.
*/
if ((res->flags & (CF_SHUTW|CF_SHUTW_NOW)) || (!si->applet.st1 && !req->buf.o)) {
if ((res->flags & (CF_SHUTW|CF_SHUTW_NOW)) || (!si->applet.st1 && !req->buf->o)) {
si->applet.st0 = STAT_CLI_END;
continue;
}
@ -1618,7 +1618,7 @@ static void cli_io_handler(struct stream_interface *si)
out:
DPRINTF(stderr, "%s@%d: st=%d, rqf=%x, rpf=%x, rqh=%d, rqs=%d, rh=%d, rs=%d\n",
__FUNCTION__, __LINE__,
si->state, req->flags, res->flags, req->buf.i, req->buf.o, res->buf.i, res->buf.o);
si->state, req->flags, res->flags, req->buf->i, req->buf->o, res->buf->i, res->buf->o);
if (unlikely(si->state == SI_ST_DIS || si->state == SI_ST_CLO)) {
/* check that we have released everything then unregister */
@ -2178,7 +2178,7 @@ static int stats_dump_http(struct stream_interface *si, struct uri_auth *uri)
case STAT_ST_LIST:
/* dump proxies */
while (si->applet.ctx.stats.px) {
if (buffer_almost_full(&rep->buf))
if (buffer_almost_full(rep->buf))
return 0;
px = si->applet.ctx.stats.px;
/* skip the disabled proxies, global frontend and non-networked ones */
@ -2494,7 +2494,7 @@ static int stats_dump_proxy(struct stream_interface *si, struct proxy *px, struc
case STAT_PX_ST_LI:
/* stats.l has been initialized above */
for (; si->applet.ctx.stats.l != &px->conf.listeners; si->applet.ctx.stats.l = l->by_fe.n) {
if (buffer_almost_full(&rep->buf))
if (buffer_almost_full(rep->buf))
return 0;
l = LIST_ELEM(si->applet.ctx.stats.l, struct listener *, by_fe);
@ -2632,7 +2632,7 @@ static int stats_dump_proxy(struct stream_interface *si, struct proxy *px, struc
for (; si->applet.ctx.stats.sv != NULL; si->applet.ctx.stats.sv = sv->next) {
int sv_state; /* 0=DOWN, 1=going up, 2=going down, 3=UP, 4,5=NOLB, 6=unchecked */
if (buffer_almost_full(&rep->buf))
if (buffer_almost_full(rep->buf))
return 0;
sv = si->applet.ctx.stats.sv;
@ -3453,7 +3453,7 @@ static int stats_dump_full_sess_to_buffer(struct stream_interface *si)
" an_exp=%s",
sess->req,
sess->req->flags, sess->req->analysers,
sess->req->buf.i, sess->req->buf.o,
sess->req->buf->i, sess->req->buf->o,
sess->req->pipe ? sess->req->pipe->data : 0,
sess->req->to_forward,
sess->req->analyse_exp ?
@ -3472,8 +3472,8 @@ static int stats_dump_full_sess_to_buffer(struct stream_interface *si)
sess->req->wex ?
human_time(TICKS_TO_MS(sess->req->wex - now_ms),
TICKS_TO_MS(1000)) : "<NEVER>",
sess->req->buf.data,
(int)(sess->req->buf.p - sess->req->buf.data),
sess->req->buf->data,
(int)(sess->req->buf->p - sess->req->buf->data),
sess->txn.req.next,
sess->req->total);
@ -3482,7 +3482,7 @@ static int stats_dump_full_sess_to_buffer(struct stream_interface *si)
" an_exp=%s",
sess->rep,
sess->rep->flags, sess->rep->analysers,
sess->rep->buf.i, sess->rep->buf.o,
sess->rep->buf->i, sess->rep->buf->o,
sess->rep->pipe ? sess->rep->pipe->data : 0,
sess->rep->to_forward,
sess->rep->analyse_exp ?
@ -3501,8 +3501,8 @@ static int stats_dump_full_sess_to_buffer(struct stream_interface *si)
sess->rep->wex ?
human_time(TICKS_TO_MS(sess->rep->wex - now_ms),
TICKS_TO_MS(1000)) : "<NEVER>",
sess->rep->buf.data,
(int)(sess->rep->buf.p - sess->rep->buf.data),
sess->rep->buf->data,
(int)(sess->rep->buf->p - sess->rep->buf->data),
sess->txn.rsp.next,
sess->rep->total);
@ -3623,7 +3623,7 @@ static int stats_dump_sess_to_buffer(struct stream_interface *si)
chunk_printf(&msg,
" rq[f=%06xh,i=%d,an=%02xh,rx=%s",
curr_sess->req->flags,
curr_sess->req->buf.i,
curr_sess->req->buf->i,
curr_sess->req->analysers,
curr_sess->req->rex ?
human_time(TICKS_TO_MS(curr_sess->req->rex - now_ms),
@ -3644,7 +3644,7 @@ static int stats_dump_sess_to_buffer(struct stream_interface *si)
chunk_printf(&msg,
" rp[f=%06xh,i=%d,an=%02xh,rx=%s",
curr_sess->rep->flags,
curr_sess->rep->buf.i,
curr_sess->rep->buf->i,
curr_sess->rep->analysers,
curr_sess->rep->rex ?
human_time(TICKS_TO_MS(curr_sess->rep->rex - now_ms),

View File

@ -628,6 +628,7 @@ void init(int argc, char **argv)
/* now we know the buffer size, we can initialize the channels and buffers */
init_channel();
init_buffer();
if (have_appsession)
appsession_init();
@ -1112,6 +1113,7 @@ void deinit(void)
}
pool_destroy2(pool2_session);
pool_destroy2(pool2_buffer);
pool_destroy2(pool2_channel);
pool_destroy2(pool2_requri);
pool_destroy2(pool2_task);

View File

@ -1221,7 +1221,10 @@ static struct session *peer_session_create(struct peer *peer, struct peer_sessio
if ((s->req = pool_alloc2(pool2_channel)) == NULL)
goto out_fail_req; /* no memory */
s->req->buf.size = global.tune.bufsize;
if ((s->req->buf = pool_alloc2(pool2_buffer)) == NULL)
goto out_fail_req_buf; /* no memory */
s->req->buf->size = global.tune.bufsize;
channel_init(s->req);
s->req->prod = &s->si[0];
s->req->cons = &s->si[1];
@ -1244,7 +1247,10 @@ static struct session *peer_session_create(struct peer *peer, struct peer_sessio
if ((s->rep = pool_alloc2(pool2_channel)) == NULL)
goto out_fail_rep; /* no memory */
s->rep->buf.size = global.tune.bufsize;
if ((s->rep->buf = pool_alloc2(pool2_buffer)) == NULL)
goto out_fail_rep_buf; /* no memory */
s->rep->buf->size = global.tune.bufsize;
channel_init(s->rep);
s->rep->prod = &s->si[1];
s->rep->cons = &s->si[0];
@ -1278,7 +1284,11 @@ static struct session *peer_session_create(struct peer *peer, struct peer_sessio
return s;
/* Error unrolling */
out_fail_rep_buf:
pool_free2(pool2_channel, s->rep);
out_fail_rep:
pool_free2(pool2_buffer, s->req->buf);
out_fail_req_buf:
pool_free2(pool2_channel, s->req);
out_fail_req:
task_free(t);

File diff suppressed because it is too large Load Diff

View File

@ -792,7 +792,7 @@ int tcp_inspect_request(struct session *s, struct channel *req, int an_bit)
req,
req->rex, req->wex,
req->flags,
req->buf.i,
req->buf->i,
req->analysers);
/* We don't know whether we have enough data, so must proceed
@ -805,7 +805,7 @@ int tcp_inspect_request(struct session *s, struct channel *req, int an_bit)
* - if one rule returns KO, then return KO
*/
if ((req->flags & CF_SHUTR) || buffer_full(&req->buf, global.tune.maxrewrite) ||
if ((req->flags & CF_SHUTR) || buffer_full(req->buf, global.tune.maxrewrite) ||
!s->be->tcp_req.inspect_delay || tick_is_expired(req->analyse_exp, now_ms))
partial = SMP_OPT_FINAL;
else
@ -911,7 +911,7 @@ int tcp_inspect_response(struct session *s, struct channel *rep, int an_bit)
rep,
rep->rex, rep->wex,
rep->flags,
rep->buf.i,
rep->buf->i,
rep->analysers);
/* We don't know whether we have enough data, so must proceed
@ -1400,11 +1400,11 @@ smp_fetch_rdp_cookie(struct proxy *px, struct session *l4, void *l7, unsigned in
smp->flags = 0;
smp->type = SMP_T_CSTR;
bleft = l4->req->buf.i;
bleft = l4->req->buf->i;
if (bleft <= 11)
goto too_short;
data = (const unsigned char *)l4->req->buf.p + 11;
data = (const unsigned char *)l4->req->buf->p + 11;
bleft -= 11;
if (bleft <= 7)
@ -1595,11 +1595,11 @@ smp_fetch_payload_lv(struct proxy *px, struct session *l4, void *l7, unsigned in
if (!chn)
return 0;
if (len_offset + len_size > chn->buf.i)
if (len_offset + len_size > chn->buf->i)
goto too_short;
for (i = 0; i < len_size; i++) {
buf_size = (buf_size << 8) + ((unsigned char *)chn->buf.p)[i + len_offset];
buf_size = (buf_size << 8) + ((unsigned char *)chn->buf->p)[i + len_offset];
}
/* buf offset may be implicit, absolute or relative */
@ -1609,18 +1609,18 @@ smp_fetch_payload_lv(struct proxy *px, struct session *l4, void *l7, unsigned in
else if (arg_p[2].type == ARGT_SINT)
buf_offset += arg_p[2].data.sint;
if (!buf_size || buf_size > chn->buf.size || buf_offset + buf_size > chn->buf.size) {
if (!buf_size || buf_size > chn->buf->size || buf_offset + buf_size > chn->buf->size) {
/* will never match */
smp->flags = 0;
return 0;
}
if (buf_offset + buf_size > chn->buf.i)
if (buf_offset + buf_size > chn->buf->i)
goto too_short;
/* init chunk as read only */
smp->type = SMP_T_CBIN;
chunk_initlen(&smp->data.str, chn->buf.p + buf_offset, 0, buf_size);
chunk_initlen(&smp->data.str, chn->buf->p + buf_offset, 0, buf_size);
smp->flags = SMP_F_VOLATILE;
return 1;
@ -1645,18 +1645,18 @@ smp_fetch_payload(struct proxy *px, struct session *l4, void *l7, unsigned int o
if (!chn)
return 0;
if (!buf_size || buf_size > chn->buf.size || buf_offset + buf_size > chn->buf.size) {
if (!buf_size || buf_size > chn->buf->size || buf_offset + buf_size > chn->buf->size) {
/* will never match */
smp->flags = 0;
return 0;
}
if (buf_offset + buf_size > chn->buf.i)
if (buf_offset + buf_size > chn->buf->i)
goto too_short;
/* init chunk as read only */
smp->type = SMP_T_CBIN;
chunk_initlen(&smp->data.str, chn->buf.p + buf_offset, 0, buf_size);
chunk_initlen(&smp->data.str, chn->buf->p + buf_offset, 0, buf_size);
smp->flags = SMP_F_VOLATILE;
return 1;

View File

@ -15,6 +15,7 @@
#include <fcntl.h>
#include <common/config.h>
#include <common/buffer.h>
#include <common/debug.h>
#include <common/memory.h>
@ -419,11 +420,17 @@ int session_complete(struct session *s)
if (unlikely((s->req = pool_alloc2(pool2_channel)) == NULL))
goto out_free_task; /* no memory */
if (unlikely((s->rep = pool_alloc2(pool2_channel)) == NULL))
if (unlikely((s->req->buf = pool_alloc2(pool2_buffer)) == NULL))
goto out_free_req; /* no memory */
if (unlikely((s->rep = pool_alloc2(pool2_channel)) == NULL))
goto out_free_req_buf; /* no memory */
if (unlikely((s->rep->buf = pool_alloc2(pool2_buffer)) == NULL))
goto out_free_rep; /* no memory */
/* initialize the request buffer */
s->req->buf.size = global.tune.bufsize;
s->req->buf->size = global.tune.bufsize;
channel_init(s->req);
s->req->prod = &s->si[0];
s->req->cons = &s->si[1];
@ -440,7 +447,7 @@ int session_complete(struct session *s)
s->req->analyse_exp = TICK_ETERNITY;
/* initialize response buffer */
s->rep->buf.size = global.tune.bufsize;
s->rep->buf->size = global.tune.bufsize;
channel_init(s->rep);
s->rep->prod = &s->si[1];
s->rep->cons = &s->si[0];
@ -485,7 +492,7 @@ int session_complete(struct session *s)
* finished (=0, eg: monitoring), in both situations,
* we can release everything and close.
*/
goto out_free_rep;
goto out_free_rep_buf;
}
/* if logs require transport layer information, note it on the connection */
@ -503,8 +510,12 @@ int session_complete(struct session *s)
return 1;
/* Error unrolling */
out_free_rep_buf:
pool_free2(pool2_buffer, s->rep->buf);
out_free_rep:
pool_free2(pool2_channel, s->rep);
out_free_req_buf:
pool_free2(pool2_buffer, s->req->buf);
out_free_req:
pool_free2(pool2_channel, s->req);
out_free_task:
@ -547,6 +558,9 @@ static void session_free(struct session *s)
if (s->rep->pipe)
put_pipe(s->rep->pipe);
pool_free2(pool2_buffer, s->req->buf);
pool_free2(pool2_buffer, s->rep->buf);
pool_free2(pool2_channel, s->req);
pool_free2(pool2_channel, s->rep);
@ -587,6 +601,7 @@ static void session_free(struct session *s)
/* We may want to free the maximum amount of pools if the proxy is stopping */
if (fe && unlikely(fe->state == PR_STSTOPPED)) {
pool_flush2(pool2_buffer);
pool_flush2(pool2_channel);
pool_flush2(pool2_hdr_idx);
pool_flush2(pool2_requri);
@ -915,7 +930,7 @@ static void sess_update_stream_int(struct session *s, struct stream_interface *s
s->req, s->rep,
s->req->rex, s->rep->wex,
s->req->flags, s->rep->flags,
s->req->buf.i, s->req->buf.o, s->rep->buf.i, s->rep->buf.o, s->rep->cons->state, s->req->cons->state);
s->req->buf->i, s->req->buf->o, s->rep->buf->i, s->rep->buf->o, s->rep->cons->state, s->req->cons->state);
if (si->state == SI_ST_ASS) {
/* Server assigned to connection request, we have to try to connect now */
@ -1103,7 +1118,7 @@ static void sess_prepare_conn_req(struct session *s, struct stream_interface *si
s->req, s->rep,
s->req->rex, s->rep->wex,
s->req->flags, s->rep->flags,
s->req->buf.i, s->req->buf.o, s->rep->buf.i, s->rep->buf.o, s->rep->cons->state, s->req->cons->state);
s->req->buf->i, s->req->buf->o, s->rep->buf->i, s->rep->buf->o, s->rep->cons->state, s->req->cons->state);
if (si->state != SI_ST_REQ)
return;
@ -1152,7 +1167,7 @@ static int process_switching_rules(struct session *s, struct channel *req, int a
req,
req->rex, req->wex,
req->flags,
req->buf.i,
req->buf->i,
req->analysers);
/* now check whether we have some switching rules for this request */
@ -1247,7 +1262,7 @@ static int process_server_rules(struct session *s, struct channel *req, int an_b
req,
req->rex, req->wex,
req->flags,
req->buf.i + req->buf.o,
req->buf->i + req->buf->o,
req->analysers);
if (!(s->flags & SN_ASSIGNED)) {
@ -1296,7 +1311,7 @@ static int process_sticking_rules(struct session *s, struct channel *req, int an
req,
req->rex, req->wex,
req->flags,
req->buf.i,
req->buf->i,
req->analysers);
list_for_each_entry(rule, &px->sticking_rules, list) {
@ -1386,7 +1401,7 @@ static int process_store_rules(struct session *s, struct channel *rep, int an_bi
rep,
rep->rex, rep->wex,
rep->flags,
rep->buf.i,
rep->buf->i,
rep->analysers);
list_for_each_entry(rule, &px->storersp_rules, list) {
@ -1633,7 +1648,7 @@ struct task *process_session(struct task *t)
s->req, s->rep,
s->req->rex, s->rep->wex,
s->req->flags, s->rep->flags,
s->req->buf.i, s->req->buf.o, s->rep->buf.i, s->rep->buf.o, s->rep->cons->state, s->req->cons->state,
s->req->buf->i, s->req->buf->o, s->rep->buf->i, s->rep->buf->o, s->rep->cons->state, s->req->cons->state,
s->rep->cons->err_type, s->req->cons->err_type,
s->req->cons->conn_retries);
@ -2036,7 +2051,7 @@ struct task *process_session(struct task *t)
channel_auto_read(s->req);
channel_auto_connect(s->req);
channel_auto_close(s->req);
buffer_flush(&s->req->buf);
buffer_flush(s->req->buf);
/* We'll let data flow between the producer (if still connected)
* to the consumer (which might possibly not be connected yet).
@ -2172,7 +2187,7 @@ struct task *process_session(struct task *t)
*/
channel_auto_read(s->rep);
channel_auto_close(s->rep);
buffer_flush(&s->rep->buf);
buffer_flush(s->rep->buf);
/* We'll let data flow between the producer (if still connected)
* to the consumer.

View File

@ -617,7 +617,7 @@ static int si_conn_wake_cb(struct connection *conn)
*/
if (((si->ib->flags & CF_READ_PARTIAL) && !channel_is_empty(si->ib)) &&
(si->ib->pipe /* always try to send spliced data */ ||
(si->ib->buf.i == 0 && (si->ib->cons->flags & SI_FL_WAIT_DATA)))) {
(si->ib->buf->i == 0 && (si->ib->cons->flags & SI_FL_WAIT_DATA)))) {
int last_len = si->ib->pipe ? si->ib->pipe->data : 0;
si_chk_snd(si->ib->cons);
@ -691,7 +691,7 @@ static int si_conn_send_loop(struct connection *conn)
/* At this point, the pipe is empty, but we may still have data pending
* in the normal buffer.
*/
if (!chn->buf.o)
if (!chn->buf->o)
return 0;
/* when we're in this loop, we already know that there is no spliced
@ -716,13 +716,13 @@ static int si_conn_send_loop(struct connection *conn)
((chn->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_HIJACK)) == CF_SHUTW_NOW))
send_flag |= MSG_MORE;
ret = conn->xprt->snd_buf(conn, &chn->buf, send_flag);
ret = conn->xprt->snd_buf(conn, chn->buf, send_flag);
if (ret <= 0)
break;
chn->flags |= CF_WRITE_PARTIAL;
if (!chn->buf.o) {
if (!chn->buf->o) {
/* Always clear both flags once everything has been sent, they're one-shot */
chn->flags &= ~(CF_EXPECT_MORE | CF_SEND_DONTWAIT);
break;
@ -970,7 +970,7 @@ static void si_conn_recv_cb(struct connection *conn)
*/
if (conn->xprt->rcv_pipe &&
chn->to_forward >= MIN_SPLICE_FORWARD && chn->flags & CF_KERN_SPLICING) {
if (buffer_not_empty(&chn->buf)) {
if (buffer_not_empty(chn->buf)) {
/* We're embarrassed, there are already data pending in
* the buffer and we don't want to have them at two
* locations at a time. Let's indicate we need some
@ -1028,7 +1028,7 @@ static void si_conn_recv_cb(struct connection *conn)
break;
}
ret = conn->xprt->rcv_buf(conn, &chn->buf, max);
ret = conn->xprt->rcv_buf(conn, chn->buf, max);
if (ret <= 0)
break;
@ -1042,7 +1042,7 @@ static void si_conn_recv_cb(struct connection *conn)
fwd = chn->to_forward;
chn->to_forward -= fwd;
}
b_adv(&chn->buf, fwd);
b_adv(chn->buf, fwd);
}
chn->flags |= CF_READ_PARTIAL;
@ -1052,7 +1052,7 @@ static void si_conn_recv_cb(struct connection *conn)
/* The buffer is now full, there's no point in going through
* the loop again.
*/
if (!(chn->flags & CF_STREAMER_FAST) && (cur_read == buffer_len(&chn->buf))) {
if (!(chn->flags & CF_STREAMER_FAST) && (cur_read == buffer_len(chn->buf))) {
chn->xfer_small = 0;
chn->xfer_large++;
if (chn->xfer_large >= 3) {
@ -1064,7 +1064,7 @@ static void si_conn_recv_cb(struct connection *conn)
}
}
else if ((chn->flags & (CF_STREAMER | CF_STREAMER_FAST)) &&
(cur_read <= chn->buf.size / 2)) {
(cur_read <= chn->buf->size / 2)) {
chn->xfer_large = 0;
chn->xfer_small++;
if (chn->xfer_small >= 2) {
@ -1094,7 +1094,7 @@ static void si_conn_recv_cb(struct connection *conn)
*/
if (ret < max) {
if ((chn->flags & (CF_STREAMER | CF_STREAMER_FAST)) &&
(cur_read <= chn->buf.size / 2)) {
(cur_read <= chn->buf->size / 2)) {
chn->xfer_large = 0;
chn->xfer_small++;
if (chn->xfer_small >= 3) {