CLEANUP: conn_stream: apply cs_endp_flags.cocci tree-wide
This changes all main uses of cs->endp->flags to the sc_ep_*() equivalent by applying coccinelle script cs_endp_flags.cocci. Note: 143 locations were touched, manually reviewed and found to be OK, except a single one that was adjusted in cs_reset_endp() where the flags are read and filtered to be used as-is and not as a boolean, hence was replaced with sc_ep_get() & $FLAGS. The script was applied with all includes: spatch --in-place --recursive-includes -I include --sp-file $script $files
This commit is contained in:
parent
87b60b2c9d
commit
0cfcc40812
@ -140,7 +140,7 @@ static inline struct connection *__cs_conn(const struct conn_stream *cs)
|
||||
}
|
||||
static inline struct connection *cs_conn(const struct conn_stream *cs)
|
||||
{
|
||||
if (cs->endp->flags & CS_EP_T_MUX)
|
||||
if (sc_ep_test(cs, CS_EP_T_MUX))
|
||||
return __cs_conn(cs);
|
||||
return NULL;
|
||||
}
|
||||
@ -165,7 +165,7 @@ static inline void *__cs_mux(const struct conn_stream *cs)
|
||||
}
|
||||
static inline struct appctx *cs_mux(const struct conn_stream *cs)
|
||||
{
|
||||
if (cs->endp->flags & CS_EP_T_MUX)
|
||||
if (sc_ep_test(cs, CS_EP_T_MUX))
|
||||
return __cs_mux(cs);
|
||||
return NULL;
|
||||
}
|
||||
@ -180,7 +180,7 @@ static inline struct appctx *__cs_appctx(const struct conn_stream *cs)
|
||||
}
|
||||
static inline struct appctx *cs_appctx(const struct conn_stream *cs)
|
||||
{
|
||||
if (cs->endp->flags & CS_EP_T_APPLET)
|
||||
if (sc_ep_test(cs, CS_EP_T_APPLET))
|
||||
return __cs_appctx(cs);
|
||||
return NULL;
|
||||
}
|
||||
@ -229,14 +229,14 @@ static inline void cs_conn_shutr(struct conn_stream *cs, enum co_shr_mode mode)
|
||||
|
||||
BUG_ON(!cs_conn(cs));
|
||||
|
||||
if (cs->endp->flags & CS_EP_SHR)
|
||||
if (sc_ep_test(cs, CS_EP_SHR))
|
||||
return;
|
||||
|
||||
/* clean data-layer shutdown */
|
||||
mux = cs_conn_mux(cs);
|
||||
if (mux && mux->shutr)
|
||||
mux->shutr(cs, mode);
|
||||
cs->endp->flags |= (mode == CO_SHR_DRAIN) ? CS_EP_SHRD : CS_EP_SHRR;
|
||||
sc_ep_set(cs, (mode == CO_SHR_DRAIN) ? CS_EP_SHRD : CS_EP_SHRR);
|
||||
}
|
||||
|
||||
/* shut write */
|
||||
@ -246,14 +246,14 @@ static inline void cs_conn_shutw(struct conn_stream *cs, enum co_shw_mode mode)
|
||||
|
||||
BUG_ON(!cs_conn(cs));
|
||||
|
||||
if (cs->endp->flags & CS_EP_SHW)
|
||||
if (sc_ep_test(cs, CS_EP_SHW))
|
||||
return;
|
||||
|
||||
/* clean data-layer shutdown */
|
||||
mux = cs_conn_mux(cs);
|
||||
if (mux && mux->shutw)
|
||||
mux->shutw(cs, mode);
|
||||
cs->endp->flags |= (mode == CO_SHW_NORMAL) ? CS_EP_SHWN : CS_EP_SHWS;
|
||||
sc_ep_set(cs, (mode == CO_SHW_NORMAL) ? CS_EP_SHWN : CS_EP_SHWS);
|
||||
}
|
||||
|
||||
/* completely close a conn_stream (but do not detach it) */
|
||||
@ -301,7 +301,7 @@ static inline struct conn_stream *cs_conn_get_first(const struct connection *con
|
||||
/* Returns non-zero if the conn-stream's Rx path is blocked */
|
||||
static inline int cs_rx_blocked(const struct conn_stream *cs)
|
||||
{
|
||||
return !!(cs->endp->flags & CS_EP_RXBLK_ANY);
|
||||
return !!sc_ep_test(cs, CS_EP_RXBLK_ANY);
|
||||
}
|
||||
|
||||
|
||||
@ -310,55 +310,55 @@ static inline int cs_rx_blocked(const struct conn_stream *cs)
|
||||
*/
|
||||
static inline int cs_rx_blocked_room(const struct conn_stream *cs)
|
||||
{
|
||||
return !!(cs->endp->flags & CS_EP_RXBLK_ROOM);
|
||||
return !!sc_ep_test(cs, CS_EP_RXBLK_ROOM);
|
||||
}
|
||||
|
||||
/* Returns non-zero if the conn-stream's endpoint is ready to receive */
|
||||
static inline int cs_rx_endp_ready(const struct conn_stream *cs)
|
||||
{
|
||||
return !(cs->endp->flags & CS_EP_RX_WAIT_EP);
|
||||
return !sc_ep_test(cs, CS_EP_RX_WAIT_EP);
|
||||
}
|
||||
|
||||
/* The conn-stream announces it is ready to try to deliver more data to the input buffer */
|
||||
static inline void cs_rx_endp_more(struct conn_stream *cs)
|
||||
{
|
||||
cs->endp->flags &= ~CS_EP_RX_WAIT_EP;
|
||||
sc_ep_clr(cs, CS_EP_RX_WAIT_EP);
|
||||
}
|
||||
|
||||
/* The conn-stream announces it doesn't have more data for the input buffer */
|
||||
static inline void cs_rx_endp_done(struct conn_stream *cs)
|
||||
{
|
||||
cs->endp->flags |= CS_EP_RX_WAIT_EP;
|
||||
sc_ep_set(cs, CS_EP_RX_WAIT_EP);
|
||||
}
|
||||
|
||||
/* Tell a conn-stream the input channel is OK with it sending it some data */
|
||||
static inline void cs_rx_chan_rdy(struct conn_stream *cs)
|
||||
{
|
||||
cs->endp->flags &= ~CS_EP_RXBLK_CHAN;
|
||||
sc_ep_clr(cs, CS_EP_RXBLK_CHAN);
|
||||
}
|
||||
|
||||
/* Tell a conn-stream the input channel is not OK with it sending it some data */
|
||||
static inline void cs_rx_chan_blk(struct conn_stream *cs)
|
||||
{
|
||||
cs->endp->flags |= CS_EP_RXBLK_CHAN;
|
||||
sc_ep_set(cs, CS_EP_RXBLK_CHAN);
|
||||
}
|
||||
|
||||
/* Tell a conn-stream the other side is connected */
|
||||
static inline void cs_rx_conn_rdy(struct conn_stream *cs)
|
||||
{
|
||||
cs->endp->flags &= ~CS_EP_RXBLK_CONN;
|
||||
sc_ep_clr(cs, CS_EP_RXBLK_CONN);
|
||||
}
|
||||
|
||||
/* Tell a conn-stream it must wait for the other side to connect */
|
||||
static inline void cs_rx_conn_blk(struct conn_stream *cs)
|
||||
{
|
||||
cs->endp->flags |= CS_EP_RXBLK_CONN;
|
||||
sc_ep_set(cs, CS_EP_RXBLK_CONN);
|
||||
}
|
||||
|
||||
/* The conn-stream just got the input buffer it was waiting for */
|
||||
static inline void cs_rx_buff_rdy(struct conn_stream *cs)
|
||||
{
|
||||
cs->endp->flags &= ~CS_EP_RXBLK_BUFF;
|
||||
sc_ep_clr(cs, CS_EP_RXBLK_BUFF);
|
||||
}
|
||||
|
||||
/* The conn-stream failed to get an input buffer and is waiting for it.
|
||||
@ -368,13 +368,13 @@ static inline void cs_rx_buff_rdy(struct conn_stream *cs)
|
||||
*/
|
||||
static inline void cs_rx_buff_blk(struct conn_stream *cs)
|
||||
{
|
||||
cs->endp->flags |= CS_EP_RXBLK_BUFF;
|
||||
sc_ep_set(cs, CS_EP_RXBLK_BUFF);
|
||||
}
|
||||
|
||||
/* Tell a conn-stream some room was made in the input buffer */
|
||||
static inline void cs_rx_room_rdy(struct conn_stream *cs)
|
||||
{
|
||||
cs->endp->flags &= ~CS_EP_RXBLK_ROOM;
|
||||
sc_ep_clr(cs, CS_EP_RXBLK_ROOM);
|
||||
}
|
||||
|
||||
/* The conn-stream announces it failed to put data into the input buffer
|
||||
@ -384,7 +384,7 @@ static inline void cs_rx_room_rdy(struct conn_stream *cs)
|
||||
*/
|
||||
static inline void cs_rx_room_blk(struct conn_stream *cs)
|
||||
{
|
||||
cs->endp->flags |= CS_EP_RXBLK_ROOM;
|
||||
sc_ep_set(cs, CS_EP_RXBLK_ROOM);
|
||||
}
|
||||
|
||||
/* The conn-stream announces it will never put new data into the input
|
||||
@ -393,43 +393,43 @@ static inline void cs_rx_room_blk(struct conn_stream *cs)
|
||||
*/
|
||||
static inline void cs_rx_shut_blk(struct conn_stream *cs)
|
||||
{
|
||||
cs->endp->flags |= CS_EP_RXBLK_SHUT;
|
||||
sc_ep_set(cs, CS_EP_RXBLK_SHUT);
|
||||
}
|
||||
|
||||
/* Returns non-zero if the conn-stream's Tx path is blocked */
|
||||
static inline int cs_tx_blocked(const struct conn_stream *cs)
|
||||
{
|
||||
return !!(cs->endp->flags & CS_EP_WAIT_DATA);
|
||||
return !!sc_ep_test(cs, CS_EP_WAIT_DATA);
|
||||
}
|
||||
|
||||
/* Returns non-zero if the conn-stream's endpoint is ready to transmit */
|
||||
static inline int cs_tx_endp_ready(const struct conn_stream *cs)
|
||||
{
|
||||
return (cs->endp->flags & CS_EP_WANT_GET);
|
||||
return sc_ep_test(cs, CS_EP_WANT_GET);
|
||||
}
|
||||
|
||||
/* Report that a conn-stream wants to get some data from the output buffer */
|
||||
static inline void cs_want_get(struct conn_stream *cs)
|
||||
{
|
||||
cs->endp->flags |= CS_EP_WANT_GET;
|
||||
sc_ep_set(cs, CS_EP_WANT_GET);
|
||||
}
|
||||
|
||||
/* Report that a conn-stream failed to get some data from the output buffer */
|
||||
static inline void cs_cant_get(struct conn_stream *cs)
|
||||
{
|
||||
cs->endp->flags |= CS_EP_WANT_GET | CS_EP_WAIT_DATA;
|
||||
sc_ep_set(cs, CS_EP_WANT_GET | CS_EP_WAIT_DATA);
|
||||
}
|
||||
|
||||
/* Report that a conn-stream doesn't want to get data from the output buffer */
|
||||
static inline void cs_stop_get(struct conn_stream *cs)
|
||||
{
|
||||
cs->endp->flags &= ~CS_EP_WANT_GET;
|
||||
sc_ep_clr(cs, CS_EP_WANT_GET);
|
||||
}
|
||||
|
||||
/* Report that a conn-stream won't get any more data from the output buffer */
|
||||
static inline void cs_done_get(struct conn_stream *cs)
|
||||
{
|
||||
cs->endp->flags &= ~(CS_EP_WANT_GET | CS_EP_WAIT_DATA);
|
||||
sc_ep_clr(cs, CS_EP_WANT_GET | CS_EP_WAIT_DATA);
|
||||
}
|
||||
|
||||
#endif /* _HAPROXY_CONN_STREAM_H */
|
||||
|
@ -268,7 +268,7 @@ static inline int cs_get_dst(struct conn_stream *cs)
|
||||
/* Marks on the conn-stream that next shutw must kill the whole connection */
|
||||
static inline void cs_must_kill_conn(struct conn_stream *cs)
|
||||
{
|
||||
cs->endp->flags |= CS_EP_KILL_CONN;
|
||||
sc_ep_set(cs, CS_EP_KILL_CONN);
|
||||
}
|
||||
|
||||
|
||||
@ -292,7 +292,7 @@ static inline void cs_shutw(struct conn_stream *cs)
|
||||
*/
|
||||
static inline void cs_chk_rcv(struct conn_stream *cs)
|
||||
{
|
||||
if (cs->endp->flags & CS_EP_RXBLK_CONN && cs_state_in(cs_opposite(cs)->state, CS_SB_RDY|CS_SB_EST|CS_SB_DIS|CS_SB_CLO))
|
||||
if (sc_ep_test(cs, CS_EP_RXBLK_CONN) && cs_state_in(cs_opposite(cs)->state, CS_SB_RDY|CS_SB_EST|CS_SB_DIS|CS_SB_CLO))
|
||||
cs_rx_conn_rdy(cs);
|
||||
|
||||
if (cs_rx_blocked(cs) || !cs_rx_endp_ready(cs))
|
||||
@ -301,7 +301,7 @@ static inline void cs_chk_rcv(struct conn_stream *cs)
|
||||
if (!cs_state_in(cs->state, CS_SB_RDY|CS_SB_EST))
|
||||
return;
|
||||
|
||||
cs->endp->flags |= CS_EP_RX_WAIT_EP;
|
||||
sc_ep_set(cs, CS_EP_RX_WAIT_EP);
|
||||
cs->ops->chk_rcv(cs);
|
||||
}
|
||||
|
||||
|
@ -1578,7 +1578,7 @@ static int connect_server(struct stream *s)
|
||||
srv_conn = NULL;
|
||||
if (cs_reset_endp(s->csb) < 0)
|
||||
return SF_ERR_INTERNAL;
|
||||
s->csb->endp->flags &= CS_EP_DETACHED;
|
||||
sc_ep_clr(s->csb, ~CS_EP_DETACHED);
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -1826,7 +1826,7 @@ skip_reuse:
|
||||
* loopback on a heavily loaded system.
|
||||
*/
|
||||
if (srv_conn->flags & CO_FL_ERROR)
|
||||
s->csb->endp->flags |= CS_EP_ERROR;
|
||||
sc_ep_set(s->csb, CS_EP_ERROR);
|
||||
|
||||
/* If we had early data, and the handshake ended, then
|
||||
* we can remove the flag, and attempt to wake the task up,
|
||||
@ -1834,7 +1834,7 @@ skip_reuse:
|
||||
* the handshake.
|
||||
*/
|
||||
if (!(srv_conn->flags & (CO_FL_WAIT_XPRT | CO_FL_EARLY_SSL_HS)))
|
||||
s->csb->endp->flags &= ~CS_EP_WAIT_FOR_HS;
|
||||
sc_ep_clr(s->csb, CS_EP_WAIT_FOR_HS);
|
||||
|
||||
if (!cs_state_in(s->csb->state, CS_SB_EST|CS_SB_DIS|CS_SB_CLO) &&
|
||||
(srv_conn->flags & CO_FL_WAIT_XPRT) == 0) {
|
||||
@ -1851,7 +1851,7 @@ skip_reuse:
|
||||
* wake callback. Otherwise si_cs_recv()/si_cs_send() already take
|
||||
* care of it.
|
||||
*/
|
||||
if ((s->csb->endp->flags & CS_EP_EOI) && !(cs_ic(s->csb)->flags & CF_EOI))
|
||||
if (sc_ep_test(s->csb, CS_EP_EOI) && !(cs_ic(s->csb)->flags & CF_EOI))
|
||||
cs_ic(s->csb)->flags |= (CF_EOI|CF_READ_PARTIAL);
|
||||
|
||||
/* catch all sync connect while the mux is not already installed */
|
||||
@ -2045,7 +2045,7 @@ void back_try_conn_req(struct stream *s)
|
||||
* allocation problem, so we want to retry now.
|
||||
*/
|
||||
cs->state = CS_ST_CER;
|
||||
cs->endp->flags &= ~CS_EP_ERROR;
|
||||
sc_ep_clr(cs, CS_EP_ERROR);
|
||||
back_handle_st_cer(s);
|
||||
|
||||
DBG_TRACE_STATE("connection error, retry", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
|
||||
@ -2262,9 +2262,9 @@ void back_handle_st_con(struct stream *s)
|
||||
|
||||
done:
|
||||
/* retryable error ? */
|
||||
if ((s->flags & SF_CONN_EXP) || (cs->endp->flags & CS_EP_ERROR)) {
|
||||
if ((s->flags & SF_CONN_EXP) || sc_ep_test(cs, CS_EP_ERROR)) {
|
||||
if (!s->conn_err_type) {
|
||||
if (cs->endp->flags & CS_EP_ERROR)
|
||||
if (sc_ep_test(cs, CS_EP_ERROR))
|
||||
s->conn_err_type = STRM_ET_CONN_ERR;
|
||||
else
|
||||
s->conn_err_type = STRM_ET_CONN_TO;
|
||||
@ -2290,7 +2290,7 @@ void back_handle_st_con(struct stream *s)
|
||||
void back_handle_st_cer(struct stream *s)
|
||||
{
|
||||
struct conn_stream *cs = s->csb;
|
||||
int must_tar = (cs->endp->flags & CS_EP_ERROR);
|
||||
int must_tar = sc_ep_test(cs, CS_EP_ERROR);
|
||||
|
||||
DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
|
||||
|
||||
@ -2310,7 +2310,7 @@ void back_handle_st_cer(struct stream *s)
|
||||
_HA_ATOMIC_DEC(&__objt_server(s->target)->cur_sess);
|
||||
}
|
||||
|
||||
if ((cs->endp->flags & CS_EP_ERROR) &&
|
||||
if (sc_ep_test(cs, CS_EP_ERROR) &&
|
||||
conn && conn->err_code == CO_ER_SSL_MISMATCH_SNI) {
|
||||
/* We tried to connect to a server which is configured
|
||||
* with "verify required" and which doesn't have the
|
||||
@ -2489,7 +2489,7 @@ void back_handle_st_rdy(struct stream *s)
|
||||
}
|
||||
|
||||
/* retryable error ? */
|
||||
if (cs->endp->flags & CS_EP_ERROR) {
|
||||
if (sc_ep_test(cs, CS_EP_ERROR)) {
|
||||
if (!s->conn_err_type)
|
||||
s->conn_err_type = STRM_ET_CONN_ERR;
|
||||
cs->state = CS_ST_CER;
|
||||
|
14
src/check.c
14
src/check.c
@ -793,7 +793,7 @@ void chk_report_conn_err(struct check *check, int errno_bck, int expired)
|
||||
retrieve_errno_from_socket(conn);
|
||||
|
||||
if (conn && !(conn->flags & CO_FL_ERROR) &&
|
||||
cs && !(cs->endp->flags & CS_EP_ERROR) && !expired)
|
||||
cs && !sc_ep_test(cs, CS_EP_ERROR) && !expired)
|
||||
return;
|
||||
|
||||
TRACE_ENTER(CHK_EV_HCHK_END|CHK_EV_HCHK_ERR, check, 0, 0, (size_t[]){expired});
|
||||
@ -912,7 +912,7 @@ void chk_report_conn_err(struct check *check, int errno_bck, int expired)
|
||||
}
|
||||
else if (conn->flags & CO_FL_WAIT_L4_CONN) {
|
||||
/* L4 not established (yet) */
|
||||
if (conn->flags & CO_FL_ERROR || cs->endp->flags & CS_EP_ERROR)
|
||||
if (conn->flags & CO_FL_ERROR || sc_ep_test(cs, CS_EP_ERROR))
|
||||
set_server_check_status(check, HCHK_STATUS_L4CON, err_msg);
|
||||
else if (expired)
|
||||
set_server_check_status(check, HCHK_STATUS_L4TOUT, err_msg);
|
||||
@ -927,12 +927,12 @@ void chk_report_conn_err(struct check *check, int errno_bck, int expired)
|
||||
}
|
||||
else if (conn->flags & CO_FL_WAIT_L6_CONN) {
|
||||
/* L6 not established (yet) */
|
||||
if (conn->flags & CO_FL_ERROR || cs->endp->flags & CS_EP_ERROR)
|
||||
if (conn->flags & CO_FL_ERROR || sc_ep_test(cs, CS_EP_ERROR))
|
||||
set_server_check_status(check, HCHK_STATUS_L6RSP, err_msg);
|
||||
else if (expired)
|
||||
set_server_check_status(check, HCHK_STATUS_L6TOUT, err_msg);
|
||||
}
|
||||
else if (conn->flags & CO_FL_ERROR || cs->endp->flags & CS_EP_ERROR) {
|
||||
else if (conn->flags & CO_FL_ERROR || sc_ep_test(cs, CS_EP_ERROR)) {
|
||||
/* I/O error after connection was established and before we could diagnose */
|
||||
set_server_check_status(check, HCHK_STATUS_SOCKERR, err_msg);
|
||||
}
|
||||
@ -1038,7 +1038,7 @@ static int wake_srv_chk(struct conn_stream *cs)
|
||||
cs = check->cs;
|
||||
conn = cs_conn(cs);
|
||||
|
||||
if (unlikely(!conn || !cs || conn->flags & CO_FL_ERROR || cs->endp->flags & CS_EP_ERROR)) {
|
||||
if (unlikely(!conn || !cs || conn->flags & CO_FL_ERROR || sc_ep_test(cs, CS_EP_ERROR))) {
|
||||
/* We may get error reports bypassing the I/O handlers, typically
|
||||
* the case when sending a pure TCP check which fails, then the I/O
|
||||
* handlers above are not called. This is completely handled by the
|
||||
@ -1146,7 +1146,7 @@ struct task *process_chk_conn(struct task *t, void *context, unsigned int state)
|
||||
/* Here the connection must be defined. Otherwise the
|
||||
* error would have already been detected
|
||||
*/
|
||||
if ((conn && ((conn->flags & CO_FL_ERROR) || (cs->endp->flags & CS_EP_ERROR))) || expired) {
|
||||
if ((conn && ((conn->flags & CO_FL_ERROR) || sc_ep_test(cs, CS_EP_ERROR))) || expired) {
|
||||
TRACE_ERROR("report connection error", CHK_EV_TASK_WAKE|CHK_EV_HCHK_END|CHK_EV_HCHK_ERR, check);
|
||||
chk_report_conn_err(check, 0, expired);
|
||||
}
|
||||
@ -1159,7 +1159,7 @@ struct task *process_chk_conn(struct task *t, void *context, unsigned int state)
|
||||
/* error will be handled by tcpcheck_main().
|
||||
* On success, remove all flags except CS_EP_DETACHED
|
||||
*/
|
||||
check->cs->endp->flags &= CS_EP_DETACHED;
|
||||
sc_ep_clr(check->cs, ~CS_EP_DETACHED);
|
||||
}
|
||||
tcpcheck_main(check);
|
||||
}
|
||||
|
@ -183,7 +183,7 @@ struct conn_stream *cs_new_from_strm(struct stream *strm, unsigned int flags)
|
||||
if (unlikely(!cs))
|
||||
return NULL;
|
||||
cs->flags |= flags;
|
||||
cs->endp->flags |= CS_EP_DETACHED;
|
||||
sc_ep_set(cs, CS_EP_DETACHED);
|
||||
cs->app = &strm->obj_type;
|
||||
cs->ops = &cs_app_embedded_ops;
|
||||
cs->data_cb = NULL;
|
||||
@ -202,7 +202,7 @@ struct conn_stream *cs_new_from_check(struct check *check, unsigned int flags)
|
||||
if (unlikely(!cs))
|
||||
return NULL;
|
||||
cs->flags |= flags;
|
||||
cs->endp->flags |= CS_EP_DETACHED;
|
||||
sc_ep_set(cs, CS_EP_DETACHED);
|
||||
cs->app = &check->obj_type;
|
||||
cs->data_cb = &check_conn_cb;
|
||||
return cs;
|
||||
@ -216,7 +216,7 @@ void cs_free(struct conn_stream *cs)
|
||||
sockaddr_free(&cs->src);
|
||||
sockaddr_free(&cs->dst);
|
||||
if (cs->endp) {
|
||||
BUG_ON(!(cs->endp->flags & CS_EP_DETACHED));
|
||||
BUG_ON(!sc_ep_test(cs, CS_EP_DETACHED));
|
||||
cs_endpoint_free(cs->endp);
|
||||
}
|
||||
if (cs->wait_event.tasklet)
|
||||
@ -232,7 +232,7 @@ static void cs_free_cond(struct conn_stream **csp)
|
||||
{
|
||||
struct conn_stream *cs = *csp;
|
||||
|
||||
if (!cs->app && (!cs->endp || (cs->endp->flags & CS_EP_DETACHED))) {
|
||||
if (!cs->app && (!cs->endp || sc_ep_test(cs, CS_EP_DETACHED))) {
|
||||
cs_free(cs);
|
||||
*csp = NULL;
|
||||
}
|
||||
@ -249,8 +249,8 @@ int cs_attach_mux(struct conn_stream *cs, void *target, void *ctx)
|
||||
|
||||
cs->endp->target = target;
|
||||
cs->endp->conn = ctx;
|
||||
cs->endp->flags |= CS_EP_T_MUX;
|
||||
cs->endp->flags &= ~CS_EP_DETACHED;
|
||||
sc_ep_set(cs, CS_EP_T_MUX);
|
||||
sc_ep_clr(cs, CS_EP_DETACHED);
|
||||
if (!conn->ctx)
|
||||
conn->ctx = cs;
|
||||
if (cs_strm(cs)) {
|
||||
@ -289,8 +289,8 @@ int cs_attach_mux(struct conn_stream *cs, void *target, void *ctx)
|
||||
static void cs_attach_applet(struct conn_stream *cs, void *target)
|
||||
{
|
||||
cs->endp->target = target;
|
||||
cs->endp->flags |= CS_EP_T_APPLET;
|
||||
cs->endp->flags &= ~CS_EP_DETACHED;
|
||||
sc_ep_set(cs, CS_EP_T_APPLET);
|
||||
sc_ep_clr(cs, CS_EP_DETACHED);
|
||||
if (cs_strm(cs)) {
|
||||
cs->ops = &cs_app_applet_ops;
|
||||
cs->data_cb = &cs_data_applet_cb;
|
||||
@ -305,8 +305,8 @@ static void cs_attach_applet(struct conn_stream *cs, void *target)
|
||||
int cs_attach_strm(struct conn_stream *cs, struct stream *strm)
|
||||
{
|
||||
cs->app = &strm->obj_type;
|
||||
cs->endp->flags &= ~CS_EP_ORPHAN;
|
||||
if (cs->endp->flags & CS_EP_T_MUX) {
|
||||
sc_ep_clr(cs, CS_EP_ORPHAN);
|
||||
if (sc_ep_test(cs, CS_EP_T_MUX)) {
|
||||
cs->wait_event.tasklet = tasklet_new();
|
||||
if (!cs->wait_event.tasklet)
|
||||
return -1;
|
||||
@ -317,7 +317,7 @@ int cs_attach_strm(struct conn_stream *cs, struct stream *strm)
|
||||
cs->ops = &cs_app_conn_ops;
|
||||
cs->data_cb = &cs_data_conn_cb;
|
||||
}
|
||||
else if (cs->endp->flags & CS_EP_T_APPLET) {
|
||||
else if (sc_ep_test(cs, CS_EP_T_APPLET)) {
|
||||
cs->ops = &cs_app_applet_ops;
|
||||
cs->data_cb = &cs_data_applet_cb;
|
||||
}
|
||||
@ -345,7 +345,7 @@ static void cs_detach_endp(struct conn_stream **csp)
|
||||
if (!cs->endp)
|
||||
goto reset_cs;
|
||||
|
||||
if (cs->endp->flags & CS_EP_T_MUX) {
|
||||
if (sc_ep_test(cs, CS_EP_T_MUX)) {
|
||||
struct connection *conn = __cs_conn(cs);
|
||||
struct cs_endpoint *endp = cs->endp;
|
||||
|
||||
@ -368,10 +368,10 @@ static void cs_detach_endp(struct conn_stream **csp)
|
||||
conn_free(conn);
|
||||
}
|
||||
}
|
||||
else if (cs->endp->flags & CS_EP_T_APPLET) {
|
||||
else if (sc_ep_test(cs, CS_EP_T_APPLET)) {
|
||||
struct appctx *appctx = __cs_appctx(cs);
|
||||
|
||||
cs->endp->flags |= CS_EP_ORPHAN;
|
||||
sc_ep_set(cs, CS_EP_ORPHAN);
|
||||
cs->endp->cs = NULL;
|
||||
cs->endp = NULL;
|
||||
appctx_shut(appctx);
|
||||
@ -382,8 +382,8 @@ static void cs_detach_endp(struct conn_stream **csp)
|
||||
/* the cs is the only one one the endpoint */
|
||||
cs->endp->target = NULL;
|
||||
cs->endp->conn = NULL;
|
||||
cs->endp->flags &= CS_EP_APP_MASK;
|
||||
cs->endp->flags |= CS_EP_DETACHED;
|
||||
sc_ep_clr(cs, ~CS_EP_APP_MASK);
|
||||
sc_ep_set(cs, CS_EP_DETACHED);
|
||||
}
|
||||
|
||||
reset_cs:
|
||||
@ -442,7 +442,7 @@ int cs_reset_endp(struct conn_stream *cs)
|
||||
|
||||
BUG_ON(!cs->app);
|
||||
|
||||
cs->endp->flags &= ~CS_EP_ERROR;
|
||||
sc_ep_clr(cs, CS_EP_ERROR);
|
||||
if (!__cs_endp_target(cs)) {
|
||||
/* endpoint not attached or attached to a mux with no
|
||||
* target. Thus the endpoint will not be release but just
|
||||
@ -457,17 +457,17 @@ int cs_reset_endp(struct conn_stream *cs)
|
||||
* fails */
|
||||
new_endp = cs_endpoint_new();
|
||||
if (!unlikely(new_endp)) {
|
||||
cs->endp->flags |= CS_EP_ERROR;
|
||||
sc_ep_set(cs, CS_EP_ERROR);
|
||||
return -1;
|
||||
}
|
||||
new_endp->flags = (cs->endp->flags & CS_EP_APP_MASK);
|
||||
new_endp->flags = sc_ep_get(cs) & CS_EP_APP_MASK;
|
||||
|
||||
/* The app is still attached, the cs will not be released */
|
||||
cs_detach_endp(&cs);
|
||||
BUG_ON(cs->endp);
|
||||
cs->endp = new_endp;
|
||||
cs->endp->cs = cs;
|
||||
cs->endp->flags |= CS_EP_DETACHED;
|
||||
sc_ep_set(cs, CS_EP_DETACHED);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -563,7 +563,7 @@ static void cs_app_shutw(struct conn_stream *cs)
|
||||
* However, if CS_FL_NOLINGER is explicitly set, we know there is
|
||||
* no risk so we close both sides immediately.
|
||||
*/
|
||||
if (!(cs->endp->flags & CS_EP_ERROR) && !(cs->flags & CS_FL_NOLINGER) &&
|
||||
if (!sc_ep_test(cs, CS_EP_ERROR) && !(cs->flags & CS_FL_NOLINGER) &&
|
||||
!(ic->flags & (CF_SHUTR|CF_DONT_READ)))
|
||||
return;
|
||||
|
||||
@ -620,14 +620,14 @@ static void cs_app_chk_snd(struct conn_stream *cs)
|
||||
if (unlikely(cs->state != CS_ST_EST || (oc->flags & CF_SHUTW)))
|
||||
return;
|
||||
|
||||
if (!(cs->endp->flags & CS_EP_WAIT_DATA) || /* not waiting for data */
|
||||
if (!sc_ep_test(cs, CS_EP_WAIT_DATA) || /* not waiting for data */
|
||||
channel_is_empty(oc)) /* called with nothing to send ! */
|
||||
return;
|
||||
|
||||
/* Otherwise there are remaining data to be sent in the buffer,
|
||||
* so we tell the handler.
|
||||
*/
|
||||
cs->endp->flags &= ~CS_EP_WAIT_DATA;
|
||||
sc_ep_clr(cs, CS_EP_WAIT_DATA);
|
||||
if (!tick_isset(oc->wex))
|
||||
oc->wex = tick_add_ifset(now_ms, oc->wto);
|
||||
|
||||
@ -708,7 +708,7 @@ static void cs_app_shutw_conn(struct conn_stream *cs)
|
||||
* no risk so we close both sides immediately.
|
||||
*/
|
||||
|
||||
if (cs->endp->flags & CS_EP_ERROR) {
|
||||
if (sc_ep_test(cs, CS_EP_ERROR)) {
|
||||
/* quick close, the socket is already shut anyway */
|
||||
}
|
||||
else if (cs->flags & CS_FL_NOLINGER) {
|
||||
@ -788,16 +788,16 @@ static void cs_app_chk_snd_conn(struct conn_stream *cs)
|
||||
return;
|
||||
|
||||
if (!oc->pipe && /* spliced data wants to be forwarded ASAP */
|
||||
!(cs->endp->flags & CS_EP_WAIT_DATA)) /* not waiting for data */
|
||||
!sc_ep_test(cs, CS_EP_WAIT_DATA)) /* not waiting for data */
|
||||
return;
|
||||
|
||||
if (!(cs->wait_event.events & SUB_RETRY_SEND) && !channel_is_empty(cs_oc(cs)))
|
||||
cs_conn_send(cs);
|
||||
|
||||
if (cs->endp->flags & (CS_EP_ERROR|CS_EP_ERR_PENDING) || cs_is_conn_error(cs)) {
|
||||
if (sc_ep_test(cs, CS_EP_ERROR | CS_EP_ERR_PENDING) || cs_is_conn_error(cs)) {
|
||||
/* Write error on the file descriptor */
|
||||
if (cs->state >= CS_ST_CON)
|
||||
cs->endp->flags |= CS_EP_ERROR;
|
||||
sc_ep_set(cs, CS_EP_ERROR);
|
||||
goto out_wakeup;
|
||||
}
|
||||
|
||||
@ -818,14 +818,14 @@ static void cs_app_chk_snd_conn(struct conn_stream *cs)
|
||||
}
|
||||
|
||||
if ((oc->flags & (CF_SHUTW|CF_SHUTW_NOW)) == 0)
|
||||
cs->endp->flags |= CS_EP_WAIT_DATA;
|
||||
sc_ep_set(cs, CS_EP_WAIT_DATA);
|
||||
oc->wex = TICK_ETERNITY;
|
||||
}
|
||||
else {
|
||||
/* Otherwise there are remaining data to be sent in the buffer,
|
||||
* which means we have to poll before doing so.
|
||||
*/
|
||||
cs->endp->flags &= ~CS_EP_WAIT_DATA;
|
||||
sc_ep_clr(cs, CS_EP_WAIT_DATA);
|
||||
if (!tick_isset(oc->wex))
|
||||
oc->wex = tick_add_ifset(now_ms, oc->wto);
|
||||
}
|
||||
@ -938,7 +938,7 @@ static void cs_app_shutw_applet(struct conn_stream *cs)
|
||||
* However, if CS_FL_NOLINGER is explicitly set, we know there is
|
||||
* no risk so we close both sides immediately.
|
||||
*/
|
||||
if (!(cs->endp->flags & CS_EP_ERROR) && !(cs->flags & CS_FL_NOLINGER) &&
|
||||
if (!sc_ep_test(cs, CS_EP_ERROR) && !(cs->flags & CS_FL_NOLINGER) &&
|
||||
!(ic->flags & (CF_SHUTR|CF_DONT_READ)))
|
||||
return;
|
||||
|
||||
@ -993,7 +993,7 @@ static void cs_app_chk_snd_applet(struct conn_stream *cs)
|
||||
|
||||
/* we only wake the applet up if it was waiting for some data */
|
||||
|
||||
if (!(cs->endp->flags & CS_EP_WAIT_DATA))
|
||||
if (!sc_ep_test(cs, CS_EP_WAIT_DATA))
|
||||
return;
|
||||
|
||||
if (!tick_isset(oc->wex))
|
||||
@ -1042,7 +1042,7 @@ void cs_update_rx(struct conn_stream *cs)
|
||||
*/
|
||||
cs_rx_room_rdy(cs);
|
||||
}
|
||||
if (cs->endp->flags & CS_EP_RXBLK_ANY)
|
||||
if (sc_ep_test(cs, CS_EP_RXBLK_ANY))
|
||||
ic->rex = TICK_ETERNITY;
|
||||
else if (!(ic->flags & CF_READ_NOEXP) && !tick_isset(ic->rex))
|
||||
ic->rex = tick_add_ifset(now_ms, ic->rto);
|
||||
@ -1070,9 +1070,9 @@ void cs_update_tx(struct conn_stream *cs)
|
||||
/* Write not closed, update FD status and timeout for writes */
|
||||
if (channel_is_empty(oc)) {
|
||||
/* stop writing */
|
||||
if (!(cs->endp->flags & CS_EP_WAIT_DATA)) {
|
||||
if (!sc_ep_test(cs, CS_EP_WAIT_DATA)) {
|
||||
if ((oc->flags & CF_SHUTW_NOW) == 0)
|
||||
cs->endp->flags |= CS_EP_WAIT_DATA;
|
||||
sc_ep_set(cs, CS_EP_WAIT_DATA);
|
||||
oc->wex = TICK_ETERNITY;
|
||||
}
|
||||
return;
|
||||
@ -1083,7 +1083,7 @@ void cs_update_tx(struct conn_stream *cs)
|
||||
* update it if is was not yet set. The stream socket handler will already
|
||||
* have updated it if there has been a completed I/O.
|
||||
*/
|
||||
cs->endp->flags &= ~CS_EP_WAIT_DATA;
|
||||
sc_ep_clr(cs, CS_EP_WAIT_DATA);
|
||||
if (!tick_isset(oc->wex)) {
|
||||
oc->wex = tick_add_ifset(now_ms, oc->wto);
|
||||
if (tick_isset(ic->rex) && !(cs->flags & CS_FL_INDEP_STR)) {
|
||||
@ -1129,9 +1129,9 @@ static void cs_notify(struct conn_stream *cs)
|
||||
* we're about to close and can't expect more data if SHUTW_NOW is there.
|
||||
*/
|
||||
if (!(oc->flags & (CF_SHUTW|CF_SHUTW_NOW)))
|
||||
cs->endp->flags |= CS_EP_WAIT_DATA;
|
||||
sc_ep_set(cs, CS_EP_WAIT_DATA);
|
||||
else if ((oc->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW)
|
||||
cs->endp->flags &= ~CS_EP_WAIT_DATA;
|
||||
sc_ep_clr(cs, CS_EP_WAIT_DATA);
|
||||
|
||||
/* update OC timeouts and wake the other side up if it's waiting for room */
|
||||
if (oc->flags & CF_WRITE_ACTIVITY) {
|
||||
@ -1162,7 +1162,7 @@ static void cs_notify(struct conn_stream *cs)
|
||||
* an HTTP parser might need more data to complete the parsing.
|
||||
*/
|
||||
if (!channel_is_empty(ic) &&
|
||||
(cso->endp->flags & CS_EP_WAIT_DATA) &&
|
||||
sc_ep_test(cso, CS_EP_WAIT_DATA) &&
|
||||
(!(ic->flags & CF_EXPECT_MORE) || c_full(ic) || ci_data(ic) == 0 || ic->pipe)) {
|
||||
int new_len, last_len;
|
||||
|
||||
@ -1202,7 +1202,7 @@ static void cs_notify(struct conn_stream *cs)
|
||||
if (/* changes on the production side */
|
||||
(ic->flags & (CF_READ_NULL|CF_READ_ERROR)) ||
|
||||
!cs_state_in(cs->state, CS_SB_CON|CS_SB_RDY|CS_SB_EST) ||
|
||||
(cs->endp->flags & CS_EP_ERROR) ||
|
||||
sc_ep_test(cs, CS_EP_ERROR) ||
|
||||
((ic->flags & CF_READ_PARTIAL) &&
|
||||
((ic->flags & CF_EOI) || !ic->to_forward || cso->state != CS_ST_EST)) ||
|
||||
|
||||
@ -1313,7 +1313,7 @@ static int cs_conn_recv(struct conn_stream *cs)
|
||||
return 0;
|
||||
|
||||
/* stop here if we reached the end of data */
|
||||
if (cs->endp->flags & CS_EP_EOS)
|
||||
if (sc_ep_test(cs, CS_EP_EOS))
|
||||
goto end_recv;
|
||||
|
||||
/* stop immediately on errors. Note that we DON'T want to stop on
|
||||
@ -1322,15 +1322,15 @@ static int cs_conn_recv(struct conn_stream *cs)
|
||||
* happens when we send too large a request to a backend server
|
||||
* which rejects it before reading it all.
|
||||
*/
|
||||
if (!(cs->endp->flags & CS_EP_RCV_MORE)) {
|
||||
if (!sc_ep_test(cs, CS_EP_RCV_MORE)) {
|
||||
if (!conn_xprt_ready(conn))
|
||||
return 0;
|
||||
if (cs->endp->flags & CS_EP_ERROR)
|
||||
if (sc_ep_test(cs, CS_EP_ERROR))
|
||||
goto end_recv;
|
||||
}
|
||||
|
||||
/* prepare to detect if the mux needs more room */
|
||||
cs->endp->flags &= ~CS_EP_WANT_ROOM;
|
||||
sc_ep_clr(cs, CS_EP_WANT_ROOM);
|
||||
|
||||
if ((ic->flags & (CF_STREAMER | CF_STREAMER_FAST)) && !co_data(ic) &&
|
||||
global.tune.idle_timer &&
|
||||
@ -1347,7 +1347,7 @@ static int cs_conn_recv(struct conn_stream *cs)
|
||||
/* First, let's see if we may splice data across the channel without
|
||||
* using a buffer.
|
||||
*/
|
||||
if (cs->endp->flags & CS_EP_MAY_SPLICE &&
|
||||
if (sc_ep_test(cs, CS_EP_MAY_SPLICE) &&
|
||||
(ic->pipe || ic->to_forward >= MIN_SPLICE_FORWARD) &&
|
||||
ic->flags & CF_KERN_SPLICING) {
|
||||
if (c_data(ic)) {
|
||||
@ -1382,7 +1382,7 @@ static int cs_conn_recv(struct conn_stream *cs)
|
||||
ic->flags |= CF_READ_PARTIAL;
|
||||
}
|
||||
|
||||
if (cs->endp->flags & (CS_EP_EOS|CS_EP_ERROR))
|
||||
if (sc_ep_test(cs, CS_EP_EOS | CS_EP_ERROR))
|
||||
goto end_recv;
|
||||
|
||||
if (conn->flags & CO_FL_WAIT_ROOM) {
|
||||
@ -1402,7 +1402,7 @@ static int cs_conn_recv(struct conn_stream *cs)
|
||||
ic->pipe = NULL;
|
||||
}
|
||||
|
||||
if (ic->pipe && ic->to_forward && !(flags & CO_RFL_BUF_FLUSH) && cs->endp->flags & CS_EP_MAY_SPLICE) {
|
||||
if (ic->pipe && ic->to_forward && !(flags & CO_RFL_BUF_FLUSH) && sc_ep_test(cs, CS_EP_MAY_SPLICE)) {
|
||||
/* don't break splicing by reading, but still call rcv_buf()
|
||||
* to pass the flag.
|
||||
*/
|
||||
@ -1437,9 +1437,9 @@ static int cs_conn_recv(struct conn_stream *cs)
|
||||
* that if such an event is not handled above in splice, it will be handled here by
|
||||
* recv().
|
||||
*/
|
||||
while ((cs->endp->flags & CS_EP_RCV_MORE) ||
|
||||
while (sc_ep_test(cs, CS_EP_RCV_MORE) ||
|
||||
(!(conn->flags & CO_FL_HANDSHAKE) &&
|
||||
(!(cs->endp->flags & (CS_EP_ERROR|CS_EP_EOS))) && !(ic->flags & CF_SHUTR))) {
|
||||
(!sc_ep_test(cs, CS_EP_ERROR | CS_EP_EOS)) && !(ic->flags & CF_SHUTR))) {
|
||||
int cur_flags = flags;
|
||||
|
||||
/* Compute transient CO_RFL_* flags */
|
||||
@ -1453,7 +1453,7 @@ static int cs_conn_recv(struct conn_stream *cs)
|
||||
max = channel_recv_max(ic);
|
||||
ret = conn->mux->rcv_buf(cs, &ic->buf, max, cur_flags);
|
||||
|
||||
if (cs->endp->flags & CS_EP_WANT_ROOM) {
|
||||
if (sc_ep_test(cs, CS_EP_WANT_ROOM)) {
|
||||
/* CS_EP_WANT_ROOM must not be reported if the channel's
|
||||
* buffer is empty.
|
||||
*/
|
||||
@ -1498,7 +1498,7 @@ static int cs_conn_recv(struct conn_stream *cs)
|
||||
* the channel's policies.This way, we are still able to receive
|
||||
* shutdowns.
|
||||
*/
|
||||
if (cs->endp->flags & CS_EP_EOI)
|
||||
if (sc_ep_test(cs, CS_EP_EOI))
|
||||
break;
|
||||
|
||||
if ((ic->flags & CF_READ_DONTWAIT) || --read_poll <= 0) {
|
||||
@ -1584,14 +1584,14 @@ static int cs_conn_recv(struct conn_stream *cs)
|
||||
|
||||
/* Report EOI on the channel if it was reached from the mux point of
|
||||
* view. */
|
||||
if ((cs->endp->flags & CS_EP_EOI) && !(ic->flags & CF_EOI)) {
|
||||
if (sc_ep_test(cs, CS_EP_EOI) && !(ic->flags & CF_EOI)) {
|
||||
ic->flags |= (CF_EOI|CF_READ_PARTIAL);
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
if (cs->endp->flags & CS_EP_ERROR)
|
||||
if (sc_ep_test(cs, CS_EP_ERROR))
|
||||
ret = 1;
|
||||
else if (cs->endp->flags & CS_EP_EOS) {
|
||||
else if (sc_ep_test(cs, CS_EP_EOS)) {
|
||||
/* we received a shutdown */
|
||||
ic->flags |= CF_READ_NULL;
|
||||
if (ic->flags & CF_AUTO_CLOSE)
|
||||
@ -1648,7 +1648,7 @@ static int cs_conn_send(struct conn_stream *cs)
|
||||
int ret;
|
||||
int did_send = 0;
|
||||
|
||||
if (cs->endp->flags & (CS_EP_ERROR|CS_EP_ERR_PENDING) || cs_is_conn_error(cs)) {
|
||||
if (sc_ep_test(cs, CS_EP_ERROR | CS_EP_ERR_PENDING) || cs_is_conn_error(cs)) {
|
||||
/* We're probably there because the tasklet was woken up,
|
||||
* but process_stream() ran before, detected there were an
|
||||
* error and put the CS back to CS_ST_TAR. There's still
|
||||
@ -1657,7 +1657,7 @@ static int cs_conn_send(struct conn_stream *cs)
|
||||
*/
|
||||
if (cs->state < CS_ST_CON)
|
||||
return 0;
|
||||
cs->endp->flags |= CS_EP_ERROR;
|
||||
sc_ep_set(cs, CS_EP_ERROR);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -1770,8 +1770,8 @@ static int cs_conn_send(struct conn_stream *cs)
|
||||
cs_rx_room_rdy(cs_opposite(cs));
|
||||
}
|
||||
|
||||
if (cs->endp->flags & (CS_EP_ERROR|CS_EP_ERR_PENDING)) {
|
||||
cs->endp->flags |= CS_EP_ERROR;
|
||||
if (sc_ep_test(cs, CS_EP_ERROR | CS_EP_ERR_PENDING)) {
|
||||
sc_ep_set(cs, CS_EP_ERROR);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -1839,7 +1839,7 @@ static int cs_conn_process(struct conn_stream *cs)
|
||||
|
||||
if (cs->state >= CS_ST_CON) {
|
||||
if (cs_is_conn_error(cs))
|
||||
cs->endp->flags |= CS_EP_ERROR;
|
||||
sc_ep_set(cs, CS_EP_ERROR);
|
||||
}
|
||||
|
||||
/* If we had early data, and the handshake ended, then
|
||||
@ -1848,8 +1848,8 @@ static int cs_conn_process(struct conn_stream *cs)
|
||||
* the handshake.
|
||||
*/
|
||||
if (!(conn->flags & (CO_FL_WAIT_XPRT | CO_FL_EARLY_SSL_HS)) &&
|
||||
(cs->endp->flags & CS_EP_WAIT_FOR_HS)) {
|
||||
cs->endp->flags &= ~CS_EP_WAIT_FOR_HS;
|
||||
sc_ep_test(cs, CS_EP_WAIT_FOR_HS)) {
|
||||
sc_ep_clr(cs, CS_EP_WAIT_FOR_HS);
|
||||
task_wakeup(cs_strm_task(cs), TASK_WOKEN_MSG);
|
||||
}
|
||||
|
||||
@ -1868,7 +1868,7 @@ static int cs_conn_process(struct conn_stream *cs)
|
||||
* wake callback. Otherwise cs_conn_recv()/cs_conn_send() already take
|
||||
* care of it.
|
||||
*/
|
||||
if (cs->endp->flags & CS_EP_EOS && !(ic->flags & CF_SHUTR)) {
|
||||
if (sc_ep_test(cs, CS_EP_EOS) && !(ic->flags & CF_SHUTR)) {
|
||||
/* we received a shutdown */
|
||||
ic->flags |= CF_READ_NULL;
|
||||
if (ic->flags & CF_AUTO_CLOSE)
|
||||
@ -1883,7 +1883,7 @@ static int cs_conn_process(struct conn_stream *cs)
|
||||
* wake callback. Otherwise cs_conn_recv()/cs_conn_send() already take
|
||||
* care of it.
|
||||
*/
|
||||
if ((cs->endp->flags & CS_EP_EOI) && !(ic->flags & CF_EOI))
|
||||
if (sc_ep_test(cs, CS_EP_EOI) && !(ic->flags & CF_EOI))
|
||||
ic->flags |= (CF_EOI|CF_READ_PARTIAL);
|
||||
|
||||
/* Second step : update the conn-stream and channels, try to forward any
|
||||
@ -1933,8 +1933,8 @@ static int cs_applet_process(struct conn_stream *cs)
|
||||
/* If the applet wants to write and the channel is closed, it's a
|
||||
* broken pipe and it must be reported.
|
||||
*/
|
||||
if (!(cs->endp->flags & CS_EP_RX_WAIT_EP) && (ic->flags & CF_SHUTR))
|
||||
cs->endp->flags |= CS_EP_ERROR;
|
||||
if (!sc_ep_test(cs, CS_EP_RX_WAIT_EP) && (ic->flags & CF_SHUTR))
|
||||
sc_ep_set(cs, CS_EP_ERROR);
|
||||
|
||||
/* automatically mark the applet having data available if it reported
|
||||
* begin blocked by the channel.
|
||||
|
@ -1268,7 +1268,7 @@ static __inline int do_l7_retry(struct stream *s, struct conn_stream *cs)
|
||||
s->flags |= SF_ERR_INTERNAL;
|
||||
return -1;
|
||||
}
|
||||
cs->endp->flags &= ~CS_EP_RXBLK_SHUT;
|
||||
sc_ep_clr(cs, CS_EP_RXBLK_SHUT);
|
||||
|
||||
b_free(&req->buf);
|
||||
/* Swap the L7 buffer with the channel buffer */
|
||||
@ -5172,7 +5172,7 @@ struct http_txn *http_create_txn(struct stream *s)
|
||||
return NULL;
|
||||
s->txn = txn;
|
||||
|
||||
txn->flags = ((cs && cs->endp->flags & CS_EP_NOT_FIRST) ? TX_NOT_FIRST : 0);
|
||||
txn->flags = ((cs && sc_ep_test(cs, CS_EP_NOT_FIRST)) ? TX_NOT_FIRST : 0);
|
||||
txn->status = -1;
|
||||
txn->http_reply = NULL;
|
||||
txn->l7_buffer = BUF_NULL;
|
||||
|
@ -7889,7 +7889,7 @@ enum act_return ssl_action_wait_for_hs(struct act_rule *rule, struct proxy *px,
|
||||
|
||||
if (conn && cs) {
|
||||
if (conn->flags & (CO_FL_EARLY_SSL_HS | CO_FL_SSL_WAIT_HS)) {
|
||||
cs->endp->flags |= CS_EP_WAIT_FOR_HS;
|
||||
sc_ep_set(cs, CS_EP_WAIT_FOR_HS);
|
||||
s->req.flags |= CF_READ_NULL;
|
||||
return ACT_RET_YIELD;
|
||||
}
|
||||
|
36
src/stream.c
36
src/stream.c
@ -313,10 +313,10 @@ int stream_buf_available(void *arg)
|
||||
{
|
||||
struct stream *s = arg;
|
||||
|
||||
if (!s->req.buf.size && !s->req.pipe && (s->csf->endp->flags & CS_EP_RXBLK_BUFF) &&
|
||||
if (!s->req.buf.size && !s->req.pipe && sc_ep_test(s->csf, CS_EP_RXBLK_BUFF) &&
|
||||
b_alloc(&s->req.buf))
|
||||
cs_rx_buff_rdy(s->csf);
|
||||
else if (!s->res.buf.size && !s->res.pipe && (s->csb->endp->flags & CS_EP_RXBLK_BUFF) &&
|
||||
else if (!s->res.buf.size && !s->res.pipe && sc_ep_test(s->csb, CS_EP_RXBLK_BUFF) &&
|
||||
b_alloc(&s->res.buf))
|
||||
cs_rx_buff_rdy(s->csb);
|
||||
else
|
||||
@ -463,7 +463,7 @@ struct stream *stream_new(struct session *sess, struct conn_stream *cs, struct b
|
||||
if (likely(sess->fe->options2 & PR_O2_INDEPSTR))
|
||||
s->csb->flags |= CS_FL_INDEP_STR;
|
||||
|
||||
if (cs->endp->flags & CS_EP_WEBSOCKET)
|
||||
if (sc_ep_test(cs, CS_EP_WEBSOCKET))
|
||||
s->flags |= SF_WEBSOCKET;
|
||||
if (cs_conn(cs)) {
|
||||
const struct mux_ops *mux = cs_conn_mux(cs);
|
||||
@ -886,7 +886,7 @@ static void back_establish(struct stream *s)
|
||||
s->flags &= ~SF_CONN_EXP;
|
||||
|
||||
/* errors faced after sending data need to be reported */
|
||||
if (s->csb->endp->flags & CS_EP_ERROR && req->flags & CF_WROTE_DATA) {
|
||||
if (sc_ep_test(s->csb, CS_EP_ERROR) && req->flags & CF_WROTE_DATA) {
|
||||
/* Don't add CF_WRITE_ERROR if we're here because
|
||||
* early data were rejected by the server, or
|
||||
* http_wait_for_response() will never be called
|
||||
@ -1716,7 +1716,7 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
|
||||
(CF_SHUTR|CF_READ_ACTIVITY|CF_READ_TIMEOUT|CF_SHUTW|
|
||||
CF_WRITE_ACTIVITY|CF_WRITE_TIMEOUT|CF_ANA_TIMEOUT)) &&
|
||||
!(s->flags & SF_CONN_EXP) &&
|
||||
!((csf->endp->flags | csb->flags) & CS_EP_ERROR) &&
|
||||
!((sc_ep_get(csf) | csb->flags) & CS_EP_ERROR) &&
|
||||
((s->pending_events & TASK_WOKEN_ANY) == TASK_WOKEN_TIMER)) {
|
||||
csf->flags &= ~CS_FL_DONT_WAKE;
|
||||
csb->flags &= ~CS_FL_DONT_WAKE;
|
||||
@ -1735,10 +1735,10 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
|
||||
* must be be reviewed too.
|
||||
*/
|
||||
if (!stream_alloc_work_buffer(s)) {
|
||||
s->csf->endp->flags |= CS_EP_ERROR;
|
||||
sc_ep_set(s->csf, CS_EP_ERROR);
|
||||
s->conn_err_type = STRM_ET_CONN_RES;
|
||||
|
||||
s->csb->endp->flags |= CS_EP_ERROR;
|
||||
sc_ep_set(s->csb, CS_EP_ERROR);
|
||||
s->conn_err_type = STRM_ET_CONN_RES;
|
||||
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
@ -1754,7 +1754,7 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
|
||||
* connection setup code must be able to deal with any type of abort.
|
||||
*/
|
||||
srv = objt_server(s->target);
|
||||
if (unlikely(csf->endp->flags & CS_EP_ERROR)) {
|
||||
if (unlikely(sc_ep_test(csf, CS_EP_ERROR))) {
|
||||
if (cs_state_in(csf->state, CS_SB_EST|CS_SB_DIS)) {
|
||||
cs_shutr(csf);
|
||||
cs_shutw(csf);
|
||||
@ -1774,7 +1774,7 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(csb->endp->flags & CS_EP_ERROR)) {
|
||||
if (unlikely(sc_ep_test(csb, CS_EP_ERROR))) {
|
||||
if (cs_state_in(csb->state, CS_SB_EST|CS_SB_DIS)) {
|
||||
cs_shutr(csb);
|
||||
cs_shutw(csb);
|
||||
@ -2328,8 +2328,8 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
|
||||
/* Benchmarks have shown that it's optimal to do a full resync now */
|
||||
if (csf->state == CS_ST_DIS ||
|
||||
cs_state_in(csb->state, CS_SB_RDY|CS_SB_DIS) ||
|
||||
(csf->endp->flags & CS_EP_ERROR && csf->state != CS_ST_CLO) ||
|
||||
(csb->endp->flags & CS_EP_ERROR && csb->state != CS_ST_CLO))
|
||||
(sc_ep_test(csf, CS_EP_ERROR) && csf->state != CS_ST_CLO) ||
|
||||
(sc_ep_test(csb, CS_EP_ERROR) && csb->state != CS_ST_CLO))
|
||||
goto resync_conn_stream;
|
||||
|
||||
/* otherwise we want to check if we need to resync the req buffer or not */
|
||||
@ -2452,8 +2452,8 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
|
||||
|
||||
if (csf->state == CS_ST_DIS ||
|
||||
cs_state_in(csb->state, CS_SB_RDY|CS_SB_DIS) ||
|
||||
(csf->endp->flags & CS_EP_ERROR && csf->state != CS_ST_CLO) ||
|
||||
(csb->endp->flags & CS_EP_ERROR && csb->state != CS_ST_CLO))
|
||||
(sc_ep_test(csf, CS_EP_ERROR) && csf->state != CS_ST_CLO) ||
|
||||
(sc_ep_test(csb, CS_EP_ERROR) && csb->state != CS_ST_CLO))
|
||||
goto resync_conn_stream;
|
||||
|
||||
if ((req->flags & ~rqf_last) & CF_MASK_ANALYSER)
|
||||
@ -3307,8 +3307,9 @@ static int stats_dump_full_strm_to_buffer(struct conn_stream *cs, struct stream
|
||||
csf = strm->csf;
|
||||
chunk_appendf(&trash, " csf=%p flags=0x%08x state=%s endp=%s,%p,0x%08x sub=%d\n",
|
||||
csf, csf->flags, cs_state_str(csf->state),
|
||||
(csf->endp->flags & CS_EP_T_MUX ? "CONN" : (csf->endp->flags & CS_EP_T_APPLET ? "APPCTX" : "NONE")),
|
||||
csf->endp->target, csf->endp->flags, csf->wait_event.events);
|
||||
(sc_ep_test(csf, CS_EP_T_MUX) ? "CONN" : (sc_ep_test(csf, CS_EP_T_APPLET) ? "APPCTX" : "NONE")),
|
||||
csf->endp->target, sc_ep_get(csf),
|
||||
csf->wait_event.events);
|
||||
|
||||
if ((conn = cs_conn(csf)) != NULL) {
|
||||
chunk_appendf(&trash,
|
||||
@ -3346,8 +3347,9 @@ static int stats_dump_full_strm_to_buffer(struct conn_stream *cs, struct stream
|
||||
csb = strm->csb;
|
||||
chunk_appendf(&trash, " csb=%p flags=0x%08x state=%s endp=%s,%p,0x%08x sub=%d\n",
|
||||
csb, csb->flags, cs_state_str(csb->state),
|
||||
(csb->endp->flags & CS_EP_T_MUX ? "CONN" : (csb->endp->flags & CS_EP_T_APPLET ? "APPCTX" : "NONE")),
|
||||
csb->endp->target, csb->endp->flags, csb->wait_event.events);
|
||||
(sc_ep_test(csb, CS_EP_T_MUX) ? "CONN" : (sc_ep_test(csb, CS_EP_T_APPLET) ? "APPCTX" : "NONE")),
|
||||
csb->endp->target, sc_ep_get(csb),
|
||||
csb->wait_event.events);
|
||||
if ((conn = cs_conn(csb)) != NULL) {
|
||||
chunk_appendf(&trash,
|
||||
" co1=%p ctrl=%s xprt=%s mux=%s data=%s target=%s:%p\n",
|
||||
|
@ -1484,7 +1484,7 @@ enum tcpcheck_eval_ret tcpcheck_eval_send(struct check *check, struct tcpcheck_r
|
||||
TRACE_DATA("send data", CHK_EV_TCPCHK_SND|CHK_EV_TX_DATA, check);
|
||||
if (conn->mux->snd_buf(cs, &check->bo,
|
||||
(IS_HTX_CONN(conn) ? (htxbuf(&check->bo))->data: b_data(&check->bo)), 0) <= 0) {
|
||||
if ((conn->flags & CO_FL_ERROR) || (cs->endp->flags & CS_EP_ERROR)) {
|
||||
if ((conn->flags & CO_FL_ERROR) || sc_ep_test(cs, CS_EP_ERROR)) {
|
||||
ret = TCPCHK_EVAL_STOP;
|
||||
TRACE_DEVEL("connection error during send", CHK_EV_TCPCHK_SND|CHK_EV_TX_DATA|CHK_EV_TX_ERR, check);
|
||||
goto out;
|
||||
@ -1548,7 +1548,7 @@ enum tcpcheck_eval_ret tcpcheck_eval_recv(struct check *check, struct tcpcheck_r
|
||||
goto wait_more_data;
|
||||
}
|
||||
|
||||
if (cs->endp->flags & CS_EP_EOS)
|
||||
if (sc_ep_test(cs, CS_EP_EOS))
|
||||
goto end_recv;
|
||||
|
||||
if (check->state & CHK_ST_IN_ALLOC) {
|
||||
@ -1565,15 +1565,15 @@ enum tcpcheck_eval_ret tcpcheck_eval_recv(struct check *check, struct tcpcheck_r
|
||||
/* errors on the connection and the conn-stream were already checked */
|
||||
|
||||
/* prepare to detect if the mux needs more room */
|
||||
cs->endp->flags &= ~CS_EP_WANT_ROOM;
|
||||
sc_ep_clr(cs, CS_EP_WANT_ROOM);
|
||||
|
||||
while ((cs->endp->flags & CS_EP_RCV_MORE) ||
|
||||
(!(conn->flags & CO_FL_ERROR) && !(cs->endp->flags & (CS_EP_ERROR|CS_EP_EOS)))) {
|
||||
while (sc_ep_test(cs, CS_EP_RCV_MORE) ||
|
||||
(!(conn->flags & CO_FL_ERROR) && !sc_ep_test(cs, CS_EP_ERROR | CS_EP_EOS))) {
|
||||
max = (IS_HTX_CS(cs) ? htx_free_space(htxbuf(&check->bi)) : b_room(&check->bi));
|
||||
read = conn->mux->rcv_buf(cs, &check->bi, max, 0);
|
||||
cur_read += read;
|
||||
if (!read ||
|
||||
(cs->endp->flags & CS_EP_WANT_ROOM) ||
|
||||
sc_ep_test(cs, CS_EP_WANT_ROOM) ||
|
||||
(--read_poll <= 0) ||
|
||||
(read < max && read >= global.tune.recv_enough))
|
||||
break;
|
||||
@ -1581,7 +1581,7 @@ enum tcpcheck_eval_ret tcpcheck_eval_recv(struct check *check, struct tcpcheck_r
|
||||
|
||||
end_recv:
|
||||
is_empty = (IS_HTX_CS(cs) ? htx_is_empty(htxbuf(&check->bi)) : !b_data(&check->bi));
|
||||
if (is_empty && ((conn->flags & CO_FL_ERROR) || (cs->endp->flags & CS_EP_ERROR))) {
|
||||
if (is_empty && ((conn->flags & CO_FL_ERROR) || sc_ep_test(cs, CS_EP_ERROR))) {
|
||||
/* Report network errors only if we got no other data. Otherwise
|
||||
* we'll let the upper layers decide whether the response is OK
|
||||
* or not. It is very common that an RST sent by the server is
|
||||
@ -1591,15 +1591,17 @@ enum tcpcheck_eval_ret tcpcheck_eval_recv(struct check *check, struct tcpcheck_r
|
||||
goto stop;
|
||||
}
|
||||
if (!cur_read) {
|
||||
if (cs->endp->flags & CS_EP_EOI) {
|
||||
if (sc_ep_test(cs, CS_EP_EOI)) {
|
||||
/* If EOI is set, it means there is a response or an error */
|
||||
goto out;
|
||||
}
|
||||
if (!(cs->endp->flags & (CS_EP_WANT_ROOM|CS_EP_ERROR|CS_EP_EOS))) {
|
||||
|
||||
if (!sc_ep_test(cs, CS_EP_WANT_ROOM | CS_EP_ERROR | CS_EP_EOS)) {
|
||||
conn->mux->subscribe(cs, SUB_RETRY_RECV, &cs->wait_event);
|
||||
TRACE_DEVEL("waiting for response", CHK_EV_RX_DATA, check);
|
||||
goto wait_more_data;
|
||||
}
|
||||
|
||||
if (is_empty) {
|
||||
int status;
|
||||
|
||||
@ -2141,7 +2143,7 @@ int tcpcheck_main(struct check *check)
|
||||
*/
|
||||
|
||||
/* 1- check for connection error, if any */
|
||||
if ((conn && conn->flags & CO_FL_ERROR) || (cs->endp->flags & CS_EP_ERROR))
|
||||
if ((conn && conn->flags & CO_FL_ERROR) || sc_ep_test(cs, CS_EP_ERROR))
|
||||
goto out_end_tcpcheck;
|
||||
|
||||
/* 2- check if a rule must be resume. It happens if check->current_step
|
||||
@ -2223,7 +2225,7 @@ int tcpcheck_main(struct check *check)
|
||||
goto out_end_tcpcheck;
|
||||
else if (eval_ret == TCPCHK_EVAL_WAIT)
|
||||
goto out;
|
||||
last_read = ((conn->flags & CO_FL_ERROR) || (cs->endp->flags & (CS_EP_ERROR|CS_EP_EOS)));
|
||||
last_read = ((conn->flags & CO_FL_ERROR) || sc_ep_test(cs, CS_EP_ERROR | CS_EP_EOS));
|
||||
must_read = 0;
|
||||
}
|
||||
|
||||
@ -2304,7 +2306,7 @@ int tcpcheck_main(struct check *check)
|
||||
TRACE_PROTO("tcp-check passed", CHK_EV_TCPCHK_EVAL, check);
|
||||
|
||||
out_end_tcpcheck:
|
||||
if ((conn && conn->flags & CO_FL_ERROR) || (cs->endp->flags & CS_EP_ERROR)) {
|
||||
if ((conn && conn->flags & CO_FL_ERROR) || sc_ep_test(cs, CS_EP_ERROR)) {
|
||||
TRACE_ERROR("report connection error", CHK_EV_TCPCHK_EVAL|CHK_EV_TCPCHK_ERR, check);
|
||||
chk_report_conn_err(check, errno, 0);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user