BUG/MEDIUM: mux-h1: Fix how timeouts are applied on H1 connections
There were several flaws in the way the different timeouts were applied on H1 connections. First, the H1C task handling timeouts was not created if no client/server timeout was specified. But there are other timeouts to consider. First, the client-fin/server-fin timeouts. But for frontend connections, http-keey-alive and http-request timeouts may also be used. And finally, on soft-stop, the close-spread-time value must be considered too. So at the end, it is probably easier to always create a task to manage H1C timeouts. Especially since the client/server timeouts are most often set. Then, when the expiration date of the H1C's task must only be updated if the considered timeout is set. So tick_add_ifset() must be used instead of tick_add(). Otherwise, if a timeout is undefined, the taks may expire immediately while it should in fact never expire. Finally, the idle expiration date must only be considered for idle connections. This patch should be backported in all stable versions, at least as far as 2.6. On the 2.4, it will have to be slightly adapted for the idle_exp part. On 2.2 and 2.0, the patch will have to be rewrite because h1_refresh_timeout() is quite different. (cherry picked from commit 3c09b34325a073e2c110e046f9705b2fddfa91c5) Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
This commit is contained in:
parent
af94845eb5
commit
127462ca1c
30
src/mux_h1.c
30
src/mux_h1.c
@ -647,24 +647,24 @@ static void h1_refresh_timeout(struct h1c *h1c)
|
||||
* timeouts so that we don't hang too long on clients that have
|
||||
* gone away (especially in tunnel mode).
|
||||
*/
|
||||
h1c->task->expire = tick_add(now_ms, h1c->shut_timeout);
|
||||
h1c->task->expire = tick_add_ifset(now_ms, h1c->shut_timeout);
|
||||
TRACE_DEVEL("refreshing connection's timeout (dead or half-closed)", H1_EV_H1C_SEND|H1_EV_H1C_RECV, h1c->conn);
|
||||
is_idle_conn = 1;
|
||||
}
|
||||
else if (b_data(&h1c->obuf)) {
|
||||
/* alive connection with pending outgoing data, need a timeout (server or client). */
|
||||
h1c->task->expire = tick_add(now_ms, h1c->timeout);
|
||||
h1c->task->expire = tick_add_ifset(now_ms, h1c->timeout);
|
||||
TRACE_DEVEL("refreshing connection's timeout (pending outgoing data)", H1_EV_H1C_SEND|H1_EV_H1C_RECV, h1c->conn);
|
||||
}
|
||||
else if (!(h1c->flags & H1C_F_IS_BACK) && (h1c->state == H1_CS_IDLE)) {
|
||||
/* idle front connections. */
|
||||
h1c->task->expire = (tick_isset(h1c->idle_exp) ? h1c->idle_exp : tick_add(now_ms, h1c->timeout));
|
||||
h1c->task->expire = (tick_isset(h1c->idle_exp) ? h1c->idle_exp : tick_add_ifset(now_ms, h1c->timeout));
|
||||
TRACE_DEVEL("refreshing connection's timeout (idle front h1c)", H1_EV_H1C_SEND|H1_EV_H1C_RECV, h1c->conn);
|
||||
is_idle_conn = 1;
|
||||
}
|
||||
else if (!(h1c->flags & H1C_F_IS_BACK) && (h1c->state != H1_CS_RUNNING)) {
|
||||
/* alive front connections waiting for a fully usable stream need a timeout. */
|
||||
h1c->task->expire = tick_add(now_ms, h1c->timeout);
|
||||
h1c->task->expire = tick_first(h1c->idle_exp, tick_add_ifset(now_ms, h1c->timeout));
|
||||
TRACE_DEVEL("refreshing connection's timeout (alive front h1c but not ready)", H1_EV_H1C_SEND|H1_EV_H1C_RECV, h1c->conn);
|
||||
/* A frontend connection not yet ready could be treated the same way as an idle
|
||||
* one in case of soft-close.
|
||||
@ -677,9 +677,6 @@ static void h1_refresh_timeout(struct h1c *h1c)
|
||||
TRACE_DEVEL("no connection timeout (alive back h1c or front h1c with an SC)", H1_EV_H1C_SEND|H1_EV_H1C_RECV, h1c->conn);
|
||||
}
|
||||
|
||||
/* Finally set the idle expiration date if shorter */
|
||||
h1c->task->expire = tick_first(h1c->task->expire, h1c->idle_exp);
|
||||
|
||||
if ((h1c->px->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) &&
|
||||
is_idle_conn && tick_isset(global.close_spread_end)) {
|
||||
/* If a soft-stop is in progress and a close-spread-time
|
||||
@ -1202,19 +1199,16 @@ static int h1_init(struct connection *conn, struct proxy *proxy, struct session
|
||||
LIST_APPEND(&mux_stopping_data[tid].list,
|
||||
&h1c->conn->stopping_list);
|
||||
}
|
||||
if (tick_isset(h1c->timeout)) {
|
||||
t = task_new_here();
|
||||
if (!t) {
|
||||
TRACE_ERROR("H1C task allocation failure", H1_EV_H1C_NEW|H1_EV_H1C_END|H1_EV_H1C_ERR);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
h1c->task = t;
|
||||
t->process = h1_timeout_task;
|
||||
t->context = h1c;
|
||||
|
||||
t->expire = tick_add(now_ms, h1c->timeout);
|
||||
t = task_new_here();
|
||||
if (!t) {
|
||||
TRACE_ERROR("H1C task allocation failure", H1_EV_H1C_NEW|H1_EV_H1C_END|H1_EV_H1C_ERR);
|
||||
goto fail;
|
||||
}
|
||||
h1c->task = t;
|
||||
t->process = h1_timeout_task;
|
||||
t->context = h1c;
|
||||
t->expire = tick_add_ifset(now_ms, h1c->timeout);
|
||||
|
||||
conn->ctx = h1c;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user