MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 10:53:00 +02:00
/*
* Pass - through mux - demux for connections
*
* Copyright 2017 Willy Tarreau < w @ 1 wt . eu >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*
*/
2020-05-27 12:58:42 +02:00
# include <haproxy/api.h>
2020-06-03 08:44:35 +02:00
# include <haproxy/buf.h>
2023-12-04 14:48:52 +01:00
# include <haproxy/cfgparse.h>
2020-06-04 18:02:10 +02:00
# include <haproxy/connection.h>
2023-10-06 15:32:47 +02:00
# include <haproxy/pipe.h>
2022-05-27 09:47:12 +02:00
# include <haproxy/stconn.h>
2020-06-04 23:46:14 +02:00
# include <haproxy/stream.h>
2020-06-04 17:25:40 +02:00
# include <haproxy/task.h>
2021-04-08 16:45:11 +02:00
# include <haproxy/trace.h>
2023-10-06 15:32:47 +02:00
# include <haproxy/xref.h>
MEDIUM: mux: Teach the mux_pt how to deal with idle connections.
In order to make the mux_pt able to handle idle connections, give it its
own context, where it'll stores the connection, the current conn_stream if
any, and a wait_event, so that it can subscribe to I/O events.
Add a new parameter to the detach() method, that gives the mux a hint
if it should destroy the connection or not when detaching a conn_stream.
If 1, then the mux_pt immediately destroys the connecion, if 0, then it
just subscribes to any read event. If a read happens, it will call
conn_sock_drain(), and if there's a connection error, it'll free the
connection, after removing it from the idle list.
2018-11-05 18:28:43 +01:00
struct mux_pt_ctx {
2022-05-27 16:17:23 +02:00
struct sedesc * sd ;
MEDIUM: mux: Teach the mux_pt how to deal with idle connections.
In order to make the mux_pt able to handle idle connections, give it its
own context, where it'll stores the connection, the current conn_stream if
any, and a wait_event, so that it can subscribe to I/O events.
Add a new parameter to the detach() method, that gives the mux a hint
if it should destroy the connection or not when detaching a conn_stream.
If 1, then the mux_pt immediately destroys the connecion, if 0, then it
just subscribes to any read event. If a read happens, it will call
conn_sock_drain(), and if there's a connection error, it'll free the
connection, after removing it from the idle list.
2018-11-05 18:28:43 +01:00
struct connection * conn ;
struct wait_event wait_event ;
} ;
2018-11-26 11:58:30 +01:00
DECLARE_STATIC_POOL ( pool_head_pt_ctx , " mux_pt " , sizeof ( struct mux_pt_ctx ) ) ;
2021-04-08 16:45:11 +02:00
/* trace source and events */
static void pt_trace ( enum trace_level level , uint64_t mask ,
const struct trace_source * src ,
const struct ist where , const struct ist func ,
const void * a1 , const void * a2 , const void * a3 , const void * a4 ) ;
/* The event representation is split like this :
* pt_ctx - internal PT context
* strm - application layer
*/
static const struct trace_event pt_trace_events [ ] = {
# define PT_EV_CONN_NEW (1ULL << 0)
{ . mask = PT_EV_CONN_NEW , . name = " pt_conn_new " , . desc = " new PT connection " } ,
# define PT_EV_CONN_WAKE (1ULL << 1)
{ . mask = PT_EV_CONN_WAKE , . name = " pt_conn_wake " , . desc = " PT connection woken up " } ,
# define PT_EV_CONN_END (1ULL << 2)
{ . mask = PT_EV_CONN_END , . name = " pt_conn_end " , . desc = " PT connection terminated " } ,
# define PT_EV_CONN_ERR (1ULL << 3)
{ . mask = PT_EV_CONN_ERR , . name = " pt_conn_err " , . desc = " error on PT connection " } ,
# define PT_EV_STRM_NEW (1ULL << 4)
{ . mask = PT_EV_STRM_NEW , . name = " strm_new " , . desc = " app-layer stream creation " } ,
# define PT_EV_STRM_SHUT (1ULL << 5)
{ . mask = PT_EV_STRM_SHUT , . name = " strm_shut " , . desc = " stream shutdown " } ,
# define PT_EV_STRM_END (1ULL << 6)
{ . mask = PT_EV_STRM_END , . name = " strm_end " , . desc = " detaching app-layer stream " } ,
# define PT_EV_STRM_ERR (1ULL << 7)
{ . mask = PT_EV_STRM_ERR , . name = " strm_err " , . desc = " stream error " } ,
# define PT_EV_RX_DATA (1ULL << 8)
{ . mask = PT_EV_RX_DATA , . name = " pt_rx_data " , . desc = " Rx on PT connection " } ,
# define PT_EV_TX_DATA (1ULL << 9)
{ . mask = PT_EV_TX_DATA , . name = " pt_tx_data " , . desc = " Tx on PT connection " } ,
{ }
} ;
static const struct name_desc pt_trace_decoding [ ] = {
# define PT_VERB_CLEAN 1
{ . name = " clean " , . desc = " only user-friendly stuff, generally suitable for level \" user \" " } ,
# define PT_VERB_MINIMAL 2
{ . name = " minimal " , . desc = " report only h1c/h1s state and flags, no real decoding " } ,
# define PT_VERB_SIMPLE 3
{ . name = " simple " , . desc = " add request/response status line or htx info when available " } ,
# define PT_VERB_ADVANCED 4
{ . name = " advanced " , . desc = " add header fields or frame decoding when available " } ,
# define PT_VERB_COMPLETE 5
{ . name = " complete " , . desc = " add full data dump when available " } ,
{ /* end */ }
} ;
2021-04-10 19:29:26 +02:00
static struct trace_source trace_pt __read_mostly = {
2021-04-08 16:45:11 +02:00
. name = IST ( " pt " ) ,
. desc = " Passthrough multiplexer " ,
. arg_def = TRC_ARG1_CONN , // TRACE()'s first argument is always a connection
. default_cb = pt_trace ,
. known_events = pt_trace_events ,
. lockon_args = NULL ,
. decoding = pt_trace_decoding ,
. report_events = ~ 0 , // report everything by default
} ;
# define TRACE_SOURCE &trace_pt
INITCALL1 ( STG_REGISTER , trace_register_source , TRACE_SOURCE ) ;
2022-05-18 07:36:10 +02:00
/* returns the stconn associated to the stream */
static forceinline struct stconn * pt_sc ( const struct mux_pt_ctx * pt )
{
2022-05-27 16:17:23 +02:00
return pt - > sd - > sc ;
2022-05-18 07:36:10 +02:00
}
2021-04-08 16:45:11 +02:00
static inline void pt_trace_buf ( const struct buffer * buf , size_t ofs , size_t len )
{
size_t block1 , block2 ;
int line , ptr , newptr ;
block1 = b_contig_data ( buf , ofs ) ;
block2 = 0 ;
if ( block1 > len )
block1 = len ;
block2 = len - block1 ;
ofs = b_peek_ofs ( buf , ofs ) ;
line = 0 ;
ptr = ofs ;
while ( ptr < ofs + block1 ) {
newptr = dump_text_line ( & trace_buf , b_orig ( buf ) , b_size ( buf ) , ofs + block1 , & line , ptr ) ;
if ( newptr = = ptr )
break ;
ptr = newptr ;
}
line = ptr = 0 ;
while ( ptr < block2 ) {
newptr = dump_text_line ( & trace_buf , b_orig ( buf ) , b_size ( buf ) , block2 , & line , ptr ) ;
if ( newptr = = ptr )
break ;
ptr = newptr ;
}
}
/* the PT traces always expect that arg1, if non-null, is of type connection
* ( from which we can derive the pt context ) , that arg2 , if non - null , is a
2022-05-17 19:07:51 +02:00
* stream connector , and that arg3 , if non - null , is a buffer .
2021-04-08 16:45:11 +02:00
*/
static void pt_trace ( enum trace_level level , uint64_t mask , const struct trace_source * src ,
const struct ist where , const struct ist func ,
const void * a1 , const void * a2 , const void * a3 , const void * a4 )
{
const struct connection * conn = a1 ;
const struct mux_pt_ctx * ctx = conn ? conn - > ctx : NULL ;
2022-05-27 10:43:18 +02:00
const struct stconn * sc = a2 ;
2021-04-08 16:45:11 +02:00
const struct buffer * buf = a3 ;
const size_t * val = a4 ;
2022-03-24 10:51:08 +01:00
if ( ! ctx | | src - > verbosity < PT_VERB_CLEAN )
2021-04-08 16:45:11 +02:00
return ;
/* Display frontend/backend info by default */
chunk_appendf ( & trace_buf , " : [%c] " , ( conn_is_back ( conn ) ? ' B ' : ' F ' ) ) ;
if ( src - > verbosity = = PT_VERB_CLEAN )
return ;
2022-05-27 10:43:18 +02:00
if ( ! sc )
sc = pt_sc ( ctx ) ;
2022-03-24 10:51:08 +01:00
2021-04-08 16:45:11 +02:00
/* Display the value to the 4th argument (level > STATE) */
if ( src - > level > TRACE_LEVEL_STATE & & val )
chunk_appendf ( & trace_buf , " - VAL=%lu " , ( long ) * val ) ;
2022-05-27 10:43:18 +02:00
/* Display conn and sc info, if defined (pointer + flags) */
2021-04-08 16:45:11 +02:00
chunk_appendf ( & trace_buf , " - conn=%p(0x%08x) " , conn , conn - > flags ) ;
2022-05-27 16:17:23 +02:00
chunk_appendf ( & trace_buf , " sd=%p(0x%08x) " , ctx - > sd , se_fl_get ( ctx - > sd ) ) ;
2022-05-27 10:43:18 +02:00
if ( sc )
chunk_appendf ( & trace_buf , " sc=%p(0x%08x) " , sc , sc - > flags ) ;
2021-04-08 16:45:11 +02:00
if ( src - > verbosity = = PT_VERB_MINIMAL )
return ;
/* Display buffer info, if defined (level > USER & verbosity > SIMPLE) */
if ( src - > level > TRACE_LEVEL_USER & & buf ) {
int full = 0 , max = 3000 , chunk = 1024 ;
/* Full info (level > STATE && verbosity > SIMPLE) */
if ( src - > level > TRACE_LEVEL_STATE ) {
if ( src - > verbosity = = PT_VERB_COMPLETE )
full = 1 ;
else if ( src - > verbosity = = PT_VERB_ADVANCED ) {
full = 1 ;
max = 256 ;
chunk = 64 ;
}
}
chunk_appendf ( & trace_buf , " buf=%u@%p+%u/%u " ,
( unsigned int ) b_data ( buf ) , b_orig ( buf ) ,
( unsigned int ) b_head_ofs ( buf ) , ( unsigned int ) b_size ( buf ) ) ;
if ( b_data ( buf ) & & full ) {
chunk_memcat ( & trace_buf , " \n " , 1 ) ;
if ( b_data ( buf ) < max )
pt_trace_buf ( buf , 0 , b_data ( buf ) ) ;
else {
pt_trace_buf ( buf , 0 , chunk ) ;
chunk_memcat ( & trace_buf , " ... \n " , 6 ) ;
pt_trace_buf ( buf , b_data ( buf ) - chunk , chunk ) ;
}
}
}
}
MEDIUM: mux: Teach the mux_pt how to deal with idle connections.
In order to make the mux_pt able to handle idle connections, give it its
own context, where it'll stores the connection, the current conn_stream if
any, and a wait_event, so that it can subscribe to I/O events.
Add a new parameter to the detach() method, that gives the mux a hint
if it should destroy the connection or not when detaching a conn_stream.
If 1, then the mux_pt immediately destroys the connecion, if 0, then it
just subscribes to any read event. If a read happens, it will call
conn_sock_drain(), and if there's a connection error, it'll free the
connection, after removing it from the idle list.
2018-11-05 18:28:43 +01:00
static void mux_pt_destroy ( struct mux_pt_ctx * ctx )
{
2020-11-03 09:11:43 +01:00
struct connection * conn = NULL ;
2021-04-08 16:45:11 +02:00
TRACE_POINT ( PT_EV_CONN_END ) ;
2022-04-14 11:36:41 +02:00
/* The connection must be attached to this mux to be released */
if ( ctx - > conn & & ctx - > conn - > ctx = = ctx )
conn = ctx - > conn ;
tasklet_free ( ctx - > wait_event . tasklet ) ;
if ( conn & & ctx - > wait_event . events ! = 0 )
conn - > xprt - > unsubscribe ( conn , conn - > xprt_ctx , ctx - > wait_event . events ,
& ctx - > wait_event ) ;
2022-05-27 16:17:23 +02:00
BUG_ON ( ctx - > sd & & ! se_fl_test ( ctx - > sd , SE_FL_ORPHAN ) ) ;
sedesc_free ( ctx - > sd ) ;
2022-04-14 11:36:41 +02:00
pool_free ( pool_head_pt_ctx , ctx ) ;
2020-11-03 09:11:43 +01:00
if ( conn ) {
2019-04-08 10:52:21 +02:00
conn - > mux = NULL ;
conn - > ctx = NULL ;
2021-04-08 16:45:11 +02:00
TRACE_DEVEL ( " freeing conn " , PT_EV_CONN_END , conn ) ;
2020-11-03 09:11:43 +01:00
conn_stop_tracking ( conn ) ;
conn_full_close ( conn ) ;
2019-04-08 10:52:21 +02:00
if ( conn - > destroy_cb )
conn - > destroy_cb ( conn ) ;
conn_free ( conn ) ;
}
MEDIUM: mux: Teach the mux_pt how to deal with idle connections.
In order to make the mux_pt able to handle idle connections, give it its
own context, where it'll stores the connection, the current conn_stream if
any, and a wait_event, so that it can subscribe to I/O events.
Add a new parameter to the detach() method, that gives the mux a hint
if it should destroy the connection or not when detaching a conn_stream.
If 1, then the mux_pt immediately destroys the connecion, if 0, then it
just subscribes to any read event. If a read happens, it will call
conn_sock_drain(), and if there's a connection error, it'll free the
connection, after removing it from the idle list.
2018-11-05 18:28:43 +01:00
}
2021-01-20 14:55:01 +01:00
/* Callback, used when we get I/Os while in idle mode. This one is exported so
* that " show fd " can resolve it .
*/
2021-03-02 16:09:26 +01:00
struct task * mux_pt_io_cb ( struct task * t , void * tctx , unsigned int status )
MEDIUM: mux: Teach the mux_pt how to deal with idle connections.
In order to make the mux_pt able to handle idle connections, give it its
own context, where it'll stores the connection, the current conn_stream if
any, and a wait_event, so that it can subscribe to I/O events.
Add a new parameter to the detach() method, that gives the mux a hint
if it should destroy the connection or not when detaching a conn_stream.
If 1, then the mux_pt immediately destroys the connecion, if 0, then it
just subscribes to any read event. If a read happens, it will call
conn_sock_drain(), and if there's a connection error, it'll free the
connection, after removing it from the idle list.
2018-11-05 18:28:43 +01:00
{
struct mux_pt_ctx * ctx = tctx ;
2022-03-24 10:51:08 +01:00
TRACE_ENTER ( PT_EV_CONN_WAKE , ctx - > conn ) ;
2022-05-27 16:17:23 +02:00
if ( ! se_fl_test ( ctx - > sd , SE_FL_ORPHAN ) ) {
2019-10-18 13:56:40 +02:00
/* There's a small race condition.
* mux_pt_io_cb ( ) is only supposed to be called if we have no
* stream attached . However , maybe the tasklet got woken up ,
* and this connection was then attached to a new stream .
2019-10-18 14:18:29 +02:00
* If this happened , just wake the tasklet up if anybody
* subscribed to receive events , and otherwise call the wake
* method , to make sure the event is noticed .
2019-10-18 13:56:40 +02:00
*/
2020-01-10 07:06:05 +01:00
if ( ctx - > conn - > subs ) {
ctx - > conn - > subs - > events = 0 ;
tasklet_wakeup ( ctx - > conn - > subs - > tasklet ) ;
ctx - > conn - > subs = NULL ;
2022-05-18 10:17:16 +02:00
} else if ( pt_sc ( ctx ) - > app_ops - > wake )
pt_sc ( ctx ) - > app_ops - > wake ( pt_sc ( ctx ) ) ;
2022-05-27 11:23:05 +02:00
TRACE_DEVEL ( " leaving waking up SC " , PT_EV_CONN_WAKE , ctx - > conn ) ;
2021-03-13 11:30:19 +01:00
return t ;
2019-10-18 13:56:40 +02:00
}
2020-12-11 16:20:34 +01:00
conn_ctrl_drain ( ctx - > conn ) ;
2021-03-13 11:30:19 +01:00
if ( ctx - > conn - > flags & ( CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH ) ) {
2021-04-10 09:02:32 +02:00
TRACE_DEVEL ( " leaving destroying pt context " , PT_EV_CONN_WAKE , ctx - > conn ) ;
2019-10-18 10:59:30 +02:00
mux_pt_destroy ( ctx ) ;
2021-03-13 11:30:19 +01:00
t = NULL ;
}
2021-04-08 16:45:11 +02:00
else {
2019-03-21 18:27:17 +01:00
ctx - > conn - > xprt - > subscribe ( ctx - > conn , ctx - > conn - > xprt_ctx , SUB_RETRY_RECV ,
2021-04-08 16:45:11 +02:00
& ctx - > wait_event ) ;
2021-04-10 09:02:32 +02:00
TRACE_DEVEL ( " leaving subscribing for reads " , PT_EV_CONN_WAKE , ctx - > conn ) ;
2021-04-08 16:45:11 +02:00
}
MEDIUM: mux: Teach the mux_pt how to deal with idle connections.
In order to make the mux_pt able to handle idle connections, give it its
own context, where it'll stores the connection, the current conn_stream if
any, and a wait_event, so that it can subscribe to I/O events.
Add a new parameter to the detach() method, that gives the mux a hint
if it should destroy the connection or not when detaching a conn_stream.
If 1, then the mux_pt immediately destroys the connecion, if 0, then it
just subscribes to any read event. If a read happens, it will call
conn_sock_drain(), and if there's a connection error, it'll free the
connection, after removing it from the idle list.
2018-11-05 18:28:43 +01:00
2021-03-13 11:30:19 +01:00
return t ;
MEDIUM: mux: Teach the mux_pt how to deal with idle connections.
In order to make the mux_pt able to handle idle connections, give it its
own context, where it'll stores the connection, the current conn_stream if
any, and a wait_event, so that it can subscribe to I/O events.
Add a new parameter to the detach() method, that gives the mux a hint
if it should destroy the connection or not when detaching a conn_stream.
If 1, then the mux_pt immediately destroys the connecion, if 0, then it
just subscribes to any read event. If a read happens, it will call
conn_sock_drain(), and if there's a connection error, it'll free the
connection, after removing it from the idle list.
2018-11-05 18:28:43 +01:00
}
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 10:53:00 +02:00
2022-05-17 19:07:51 +02:00
/* Initialize the mux once it's attached. It is expected that conn->ctx points
* to the existing stream connector ( for outgoing connections ) or NULL ( for
2017-09-13 18:30:23 +02:00
* incoming ones , in which case one will be allocated and a new stream will be
2020-07-05 16:36:08 +05:00
* instantiated ) . Returns < 0 on error .
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 10:53:00 +02:00
*/
2019-04-08 11:22:47 +02:00
static int mux_pt_init ( struct connection * conn , struct proxy * prx , struct session * sess ,
struct buffer * input )
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 10:53:00 +02:00
{
2022-05-27 10:43:18 +02:00
struct stconn * sc = conn - > ctx ;
MEDIUM: mux: Teach the mux_pt how to deal with idle connections.
In order to make the mux_pt able to handle idle connections, give it its
own context, where it'll stores the connection, the current conn_stream if
any, and a wait_event, so that it can subscribe to I/O events.
Add a new parameter to the detach() method, that gives the mux a hint
if it should destroy the connection or not when detaching a conn_stream.
If 1, then the mux_pt immediately destroys the connecion, if 0, then it
just subscribes to any read event. If a read happens, it will call
conn_sock_drain(), and if there's a connection error, it'll free the
connection, after removing it from the idle list.
2018-11-05 18:28:43 +01:00
struct mux_pt_ctx * ctx = pool_alloc ( pool_head_pt_ctx ) ;
2021-04-08 16:45:11 +02:00
TRACE_ENTER ( PT_EV_CONN_NEW ) ;
if ( ! ctx ) {
TRACE_ERROR ( " PT context allocation failure " , PT_EV_CONN_NEW | PT_EV_CONN_END | PT_EV_CONN_ERR ) ;
MEDIUM: mux: Teach the mux_pt how to deal with idle connections.
In order to make the mux_pt able to handle idle connections, give it its
own context, where it'll stores the connection, the current conn_stream if
any, and a wait_event, so that it can subscribe to I/O events.
Add a new parameter to the detach() method, that gives the mux a hint
if it should destroy the connection or not when detaching a conn_stream.
If 1, then the mux_pt immediately destroys the connecion, if 0, then it
just subscribes to any read event. If a read happens, it will call
conn_sock_drain(), and if there's a connection error, it'll free the
connection, after removing it from the idle list.
2018-11-05 18:28:43 +01:00
goto fail ;
2021-04-08 16:45:11 +02:00
}
MEDIUM: mux: Teach the mux_pt how to deal with idle connections.
In order to make the mux_pt able to handle idle connections, give it its
own context, where it'll stores the connection, the current conn_stream if
any, and a wait_event, so that it can subscribe to I/O events.
Add a new parameter to the detach() method, that gives the mux a hint
if it should destroy the connection or not when detaching a conn_stream.
If 1, then the mux_pt immediately destroys the connecion, if 0, then it
just subscribes to any read event. If a read happens, it will call
conn_sock_drain(), and if there's a connection error, it'll free the
connection, after removing it from the idle list.
2018-11-05 18:28:43 +01:00
2019-06-14 14:42:29 +02:00
ctx - > wait_event . tasklet = tasklet_new ( ) ;
if ( ! ctx - > wait_event . tasklet )
MEDIUM: mux: Teach the mux_pt how to deal with idle connections.
In order to make the mux_pt able to handle idle connections, give it its
own context, where it'll stores the connection, the current conn_stream if
any, and a wait_event, so that it can subscribe to I/O events.
Add a new parameter to the detach() method, that gives the mux a hint
if it should destroy the connection or not when detaching a conn_stream.
If 1, then the mux_pt immediately destroys the connecion, if 0, then it
just subscribes to any read event. If a read happens, it will call
conn_sock_drain(), and if there's a connection error, it'll free the
connection, after removing it from the idle list.
2018-11-05 18:28:43 +01:00
goto fail_free_ctx ;
2019-06-14 14:42:29 +02:00
ctx - > wait_event . tasklet - > context = ctx ;
ctx - > wait_event . tasklet - > process = mux_pt_io_cb ;
2018-12-19 13:59:17 +01:00
ctx - > wait_event . events = 0 ;
MEDIUM: mux: Teach the mux_pt how to deal with idle connections.
In order to make the mux_pt able to handle idle connections, give it its
own context, where it'll stores the connection, the current conn_stream if
any, and a wait_event, so that it can subscribe to I/O events.
Add a new parameter to the detach() method, that gives the mux a hint
if it should destroy the connection or not when detaching a conn_stream.
If 1, then the mux_pt immediately destroys the connecion, if 0, then it
just subscribes to any read event. If a read happens, it will call
conn_sock_drain(), and if there's a connection error, it'll free the
connection, after removing it from the idle list.
2018-11-05 18:28:43 +01:00
ctx - > conn = conn ;
2017-09-13 18:30:23 +02:00
2022-05-27 10:43:18 +02:00
if ( ! sc ) {
2022-05-27 16:17:23 +02:00
ctx - > sd = sedesc_new ( ) ;
if ( ! ctx - > sd ) {
2022-05-27 11:23:05 +02:00
TRACE_ERROR ( " SC allocation failure " , PT_EV_STRM_NEW | PT_EV_STRM_END | PT_EV_STRM_ERR , conn ) ;
2022-03-22 18:37:19 +01:00
goto fail_free_ctx ;
2022-03-23 15:15:29 +01:00
}
2022-05-27 16:17:23 +02:00
ctx - > sd - > se = ctx ;
ctx - > sd - > conn = conn ;
se_fl_set ( ctx - > sd , SE_FL_T_MUX | SE_FL_ORPHAN ) ;
2022-03-23 11:01:09 +01:00
2022-05-27 16:17:23 +02:00
sc = sc_new_from_endp ( ctx - > sd , sess , input ) ;
2022-05-27 10:43:18 +02:00
if ( ! sc ) {
2022-05-27 11:23:05 +02:00
TRACE_ERROR ( " SC allocation failure " , PT_EV_STRM_NEW | PT_EV_STRM_END | PT_EV_STRM_ERR , conn ) ;
2022-05-27 16:17:23 +02:00
goto fail_free_sd ;
2021-04-08 16:45:11 +02:00
}
2022-05-27 10:43:18 +02:00
TRACE_POINT ( PT_EV_STRM_NEW , conn , sc ) ;
2017-09-13 18:30:23 +02:00
}
2022-03-23 15:15:29 +01:00
else {
2022-05-27 10:43:18 +02:00
if ( sc_attach_mux ( sc , ctx , conn ) < 0 )
2022-03-31 19:27:18 +02:00
goto fail_free_ctx ;
2022-05-27 16:17:23 +02:00
ctx - > sd = sc - > sedesc ;
2022-03-23 15:15:29 +01:00
}
2018-12-19 14:12:10 +01:00
conn - > ctx = ctx ;
2022-05-27 16:17:23 +02:00
se_fl_set ( ctx - > sd , SE_FL_RCV_MORE ) ;
2024-02-14 15:17:17 +01:00
if ( ( global . tune . options & GTUNE_USE_SPLICE ) & & ! ( global . tune . no_zero_copy_fwd & NO_ZERO_COPY_FWD_PT ) )
2024-02-14 15:15:09 +01:00
se_fl_set ( ctx - > sd , SE_FL_MAY_FASTFWD_PROD | SE_FL_MAY_FASTFWD_CONS ) ;
2021-04-08 16:45:11 +02:00
2022-03-24 10:51:08 +01:00
TRACE_LEAVE ( PT_EV_CONN_NEW , conn ) ;
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 10:53:00 +02:00
return 0 ;
2017-09-13 18:30:23 +02:00
2022-05-27 16:17:23 +02:00
fail_free_sd :
sedesc_free ( ctx - > sd ) ;
2022-03-23 11:01:09 +01:00
fail_free_ctx :
2023-04-22 17:47:32 +02:00
tasklet_free ( ctx - > wait_event . tasklet ) ;
MEDIUM: mux: Teach the mux_pt how to deal with idle connections.
In order to make the mux_pt able to handle idle connections, give it its
own context, where it'll stores the connection, the current conn_stream if
any, and a wait_event, so that it can subscribe to I/O events.
Add a new parameter to the detach() method, that gives the mux a hint
if it should destroy the connection or not when detaching a conn_stream.
If 1, then the mux_pt immediately destroys the connecion, if 0, then it
just subscribes to any read event. If a read happens, it will call
conn_sock_drain(), and if there's a connection error, it'll free the
connection, after removing it from the idle list.
2018-11-05 18:28:43 +01:00
pool_free ( pool_head_pt_ctx , ctx ) ;
2017-09-13 18:30:23 +02:00
fail :
2021-04-08 16:45:11 +02:00
TRACE_DEVEL ( " leaving in error " , PT_EV_CONN_NEW | PT_EV_CONN_END | PT_EV_CONN_ERR ) ;
2017-09-13 18:30:23 +02:00
return - 1 ;
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 10:53:00 +02:00
}
/* callback to be used by default for the pass-through mux. It calls the data
* layer wake ( ) callback if it is set otherwise returns 0.
*/
static int mux_pt_wake ( struct connection * conn )
{
2018-12-19 14:12:10 +01:00
struct mux_pt_ctx * ctx = conn - > ctx ;
MEDIUM: mux: Teach the mux_pt how to deal with idle connections.
In order to make the mux_pt able to handle idle connections, give it its
own context, where it'll stores the connection, the current conn_stream if
any, and a wait_event, so that it can subscribe to I/O events.
Add a new parameter to the detach() method, that gives the mux a hint
if it should destroy the connection or not when detaching a conn_stream.
If 1, then the mux_pt immediately destroys the connecion, if 0, then it
just subscribes to any read event. If a read happens, it will call
conn_sock_drain(), and if there's a connection error, it'll free the
connection, after removing it from the idle list.
2018-11-05 18:28:43 +01:00
int ret = 0 ;
2017-09-13 18:30:23 +02:00
2022-03-24 10:51:08 +01:00
TRACE_ENTER ( PT_EV_CONN_WAKE , ctx - > conn ) ;
2022-05-27 16:17:23 +02:00
if ( ! se_fl_test ( ctx - > sd , SE_FL_ORPHAN ) ) {
2022-05-18 10:17:16 +02:00
ret = pt_sc ( ctx ) - > app_ops - > wake ? pt_sc ( ctx ) - > app_ops - > wake ( pt_sc ( ctx ) ) : 0 ;
2017-09-13 18:30:23 +02:00
2021-04-08 16:45:11 +02:00
if ( ret < 0 ) {
2022-05-27 11:23:05 +02:00
TRACE_DEVEL ( " leaving waking up SC " , PT_EV_CONN_WAKE , ctx - > conn ) ;
MEDIUM: mux: Teach the mux_pt how to deal with idle connections.
In order to make the mux_pt able to handle idle connections, give it its
own context, where it'll stores the connection, the current conn_stream if
any, and a wait_event, so that it can subscribe to I/O events.
Add a new parameter to the detach() method, that gives the mux a hint
if it should destroy the connection or not when detaching a conn_stream.
If 1, then the mux_pt immediately destroys the connecion, if 0, then it
just subscribes to any read event. If a read happens, it will call
conn_sock_drain(), and if there's a connection error, it'll free the
connection, after removing it from the idle list.
2018-11-05 18:28:43 +01:00
return ret ;
2021-04-08 16:45:11 +02:00
}
MEDIUM: mux: Teach the mux_pt how to deal with idle connections.
In order to make the mux_pt able to handle idle connections, give it its
own context, where it'll stores the connection, the current conn_stream if
any, and a wait_event, so that it can subscribe to I/O events.
Add a new parameter to the detach() method, that gives the mux a hint
if it should destroy the connection or not when detaching a conn_stream.
If 1, then the mux_pt immediately destroys the connecion, if 0, then it
just subscribes to any read event. If a read happens, it will call
conn_sock_drain(), and if there's a connection error, it'll free the
connection, after removing it from the idle list.
2018-11-05 18:28:43 +01:00
} else {
2020-12-11 16:20:34 +01:00
conn_ctrl_drain ( conn ) ;
MEDIUM: mux: Teach the mux_pt how to deal with idle connections.
In order to make the mux_pt able to handle idle connections, give it its
own context, where it'll stores the connection, the current conn_stream if
any, and a wait_event, so that it can subscribe to I/O events.
Add a new parameter to the detach() method, that gives the mux a hint
if it should destroy the connection or not when detaching a conn_stream.
If 1, then the mux_pt immediately destroys the connecion, if 0, then it
just subscribes to any read event. If a read happens, it will call
conn_sock_drain(), and if there's a connection error, it'll free the
connection, after removing it from the idle list.
2018-11-05 18:28:43 +01:00
if ( conn - > flags & ( CO_FL_ERROR | CO_FL_SOCK_RD_SH ) ) {
2021-04-08 16:45:11 +02:00
TRACE_DEVEL ( " leaving destroying PT context " , PT_EV_CONN_WAKE , ctx - > conn ) ;
MEDIUM: mux: Teach the mux_pt how to deal with idle connections.
In order to make the mux_pt able to handle idle connections, give it its
own context, where it'll stores the connection, the current conn_stream if
any, and a wait_event, so that it can subscribe to I/O events.
Add a new parameter to the detach() method, that gives the mux a hint
if it should destroy the connection or not when detaching a conn_stream.
If 1, then the mux_pt immediately destroys the connecion, if 0, then it
just subscribes to any read event. If a read happens, it will call
conn_sock_drain(), and if there's a connection error, it'll free the
connection, after removing it from the idle list.
2018-11-05 18:28:43 +01:00
mux_pt_destroy ( ctx ) ;
return - 1 ;
}
}
2018-08-24 15:48:59 +02:00
2017-11-23 18:25:47 +01:00
/* If we had early data, and we're done with the handshake
2020-07-05 16:36:08 +05:00
* then we know the data are safe , and we can remove the flag .
2017-11-23 18:25:47 +01:00
*/
2020-01-23 16:27:54 +01:00
if ( ( conn - > flags & ( CO_FL_EARLY_DATA | CO_FL_EARLY_SSL_HS | CO_FL_WAIT_XPRT ) ) = =
2017-11-23 18:25:47 +01:00
CO_FL_EARLY_DATA )
conn - > flags & = ~ CO_FL_EARLY_DATA ;
2021-04-08 16:45:11 +02:00
TRACE_LEAVE ( PT_EV_CONN_WAKE , ctx - > conn ) ;
2017-11-03 15:55:24 +01:00
return ret ;
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 10:53:00 +02:00
}
2017-09-13 18:30:23 +02:00
/*
* Attach a new stream to a connection
* ( Used for outgoing connections )
*/
2022-05-27 16:17:23 +02:00
static int mux_pt_attach ( struct connection * conn , struct sedesc * sd , struct session * sess )
2017-09-13 18:30:23 +02:00
{
2018-12-19 14:12:10 +01:00
struct mux_pt_ctx * ctx = conn - > ctx ;
2018-11-13 16:48:36 +01:00
2021-04-08 16:45:11 +02:00
TRACE_ENTER ( PT_EV_STRM_NEW , conn ) ;
2019-08-10 23:56:16 +02:00
if ( ctx - > wait_event . events )
conn - > xprt - > unsubscribe ( ctx - > conn , conn - > xprt_ctx , SUB_RETRY_RECV , & ctx - > wait_event ) ;
2022-05-27 16:17:23 +02:00
if ( sc_attach_mux ( sd - > sc , ctx , conn ) < 0 )
2022-03-31 19:27:18 +02:00
return - 1 ;
2022-05-27 16:17:23 +02:00
ctx - > sd = sd ;
se_fl_set ( ctx - > sd , SE_FL_RCV_MORE ) ;
2024-02-14 15:17:17 +01:00
if ( ( global . tune . options & GTUNE_USE_SPLICE ) & & ! ( global . tune . no_zero_copy_fwd & NO_ZERO_COPY_FWD_PT ) )
se_fl_set ( ctx - > sd , SE_FL_MAY_FASTFWD_PROD | SE_FL_MAY_FASTFWD_CONS ) ;
2021-04-08 16:45:11 +02:00
2022-05-27 16:17:23 +02:00
TRACE_LEAVE ( PT_EV_STRM_NEW , conn , sd - > sc ) ;
2021-12-16 14:44:31 +01:00
return 0 ;
2017-09-13 18:30:23 +02:00
}
2022-05-17 19:07:51 +02:00
/* Retrieves a valid stream connector from this connection, or returns NULL.
* For this mux , it ' s easy as we can only store a single stream connector .
2018-11-18 21:29:20 +01:00
*/
2022-05-27 11:00:59 +02:00
static struct stconn * mux_pt_get_first_sc ( const struct connection * conn )
2018-11-18 21:29:20 +01:00
{
2018-12-19 14:12:10 +01:00
struct mux_pt_ctx * ctx = conn - > ctx ;
2018-11-18 21:29:20 +01:00
2022-05-18 07:36:10 +02:00
return pt_sc ( ctx ) ;
2018-11-18 21:29:20 +01:00
}
2019-04-08 11:23:22 +02:00
/* Destroy the mux and the associated connection if still attached to this mux
* and no longer used */
static void mux_pt_destroy_meth ( void * ctx )
2018-11-06 16:32:42 +01:00
{
2019-04-08 11:23:22 +02:00
struct mux_pt_ctx * pt = ctx ;
2018-11-13 16:48:36 +01:00
2022-05-18 07:36:10 +02:00
TRACE_POINT ( PT_EV_CONN_END , pt - > conn , pt_sc ( pt ) ) ;
2022-05-27 16:17:23 +02:00
if ( se_fl_test ( pt - > sd , SE_FL_ORPHAN ) | | pt - > conn - > ctx ! = pt ) {
2022-04-14 11:08:26 +02:00
if ( pt - > conn - > ctx ! = pt ) {
2022-05-27 16:17:23 +02:00
pt - > sd = NULL ;
2022-04-14 11:08:26 +02:00
}
2019-04-08 11:23:22 +02:00
mux_pt_destroy ( pt ) ;
2022-03-23 15:15:29 +01:00
}
2018-11-06 16:32:42 +01:00
}
2017-09-13 18:30:23 +02:00
/*
2017-10-08 11:00:17 +02:00
* Detach the stream from the connection and possibly release the connection .
2017-09-13 18:30:23 +02:00
*/
2022-05-27 16:17:23 +02:00
static void mux_pt_detach ( struct sedesc * sd )
2017-09-13 18:30:23 +02:00
{
2022-05-27 16:17:23 +02:00
struct connection * conn = sd - > conn ;
2022-02-24 13:45:27 +01:00
struct mux_pt_ctx * ctx ;
2022-05-27 16:17:23 +02:00
TRACE_ENTER ( PT_EV_STRM_END , conn , sd - > sc ) ;
2021-04-08 16:45:11 +02:00
2022-03-23 15:15:29 +01:00
ctx = conn - > ctx ;
MEDIUM: mux: Teach the mux_pt how to deal with idle connections.
In order to make the mux_pt able to handle idle connections, give it its
own context, where it'll stores the connection, the current conn_stream if
any, and a wait_event, so that it can subscribe to I/O events.
Add a new parameter to the detach() method, that gives the mux a hint
if it should destroy the connection or not when detaching a conn_stream.
If 1, then the mux_pt immediately destroys the connecion, if 0, then it
just subscribes to any read event. If a read happens, it will call
conn_sock_drain(), and if there's a connection error, it'll free the
connection, after removing it from the idle list.
2018-11-05 18:28:43 +01:00
/* Subscribe, to know if we got disconnected */
BUG/MAJOR: mux-pt: Always destroy the backend connection on detach
In TCP, when a conn-stream is detached from a backend connection, the
connection must be always closed. It was only performed if an error or a
shutdown occurred or if there was no connection owner. But it is a problem,
because, since the 2.3, backend connections are always owned by a
session. This way it is possible to have idle connections attached to a
session instead of a server. But there is no idle connections in TCP. In
addition, when a session owns a connection it is responsible to close it
when it is released. But it only works for idle connections. And it only
works if the session is released.
Thus there is the place for bugs here. And indeed, a connection leak may
occur if a connection retry is performed because of a timeout. In this case,
the underlying connection is still alive and is waiting to be fully
established. Thus, when the conn-stream is detached from the connection, the
connection is not closed. Because the PT multiplexer is quite simple, there
is no timeout at this stage. We depend on the kenerl to be notified and
finally close the connection. With an unreachable server, orphan backend
connections may be accumulated for a while. It may be perceived as a leak.
Because there is no reason to keep such backend connections, we just close
it now. Frontend connections are still closed by the session or when an
error or a shutdown occurs.
This patch should fix the issue #1522. It must be backported as far as
2.0. Note that the 2.2 and 2.0 are not affected by this bug because there is
no owner for backend TCP connections. But it is probably a good idea to
backport the patch on these versions to avoid any future bugs.
2022-03-09 15:55:58 +01:00
if ( ! conn_is_back ( conn ) & & conn - > owner ! = NULL & &
2018-11-13 16:48:36 +01:00
! ( conn - > flags & ( CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH ) ) ) {
2019-03-21 18:27:17 +01:00
conn - > xprt - > subscribe ( conn , conn - > xprt_ctx , SUB_RETRY_RECV , & ctx - > wait_event ) ;
2021-04-08 16:45:11 +02:00
} else {
2018-11-13 16:48:36 +01:00
/* There's no session attached to that connection, destroy it */
2022-05-27 16:17:23 +02:00
TRACE_DEVEL ( " killing dead connection " , PT_EV_STRM_END , conn , sd - > sc ) ;
2018-11-13 16:48:36 +01:00
mux_pt_destroy ( ctx ) ;
2021-04-08 16:45:11 +02:00
}
TRACE_LEAVE ( PT_EV_STRM_END ) ;
2017-09-13 18:30:23 +02:00
}
2019-01-26 12:19:01 +01:00
/* returns the number of streams in use on a connection */
static int mux_pt_used_streams ( struct connection * conn )
2018-11-05 18:37:53 +01:00
{
2018-12-19 14:12:10 +01:00
struct mux_pt_ctx * ctx = conn - > ctx ;
2018-11-05 18:37:53 +01:00
2022-05-27 16:17:23 +02:00
return ( ! se_fl_test ( ctx - > sd , SE_FL_ORPHAN ) ? 1 : 0 ) ;
2018-11-05 18:37:53 +01:00
}
2019-01-26 12:19:01 +01:00
/* returns the number of streams still available on a connection */
static int mux_pt_avail_streams ( struct connection * conn )
2018-12-02 01:31:17 +01:00
{
2019-01-26 12:19:01 +01:00
return 1 - mux_pt_used_streams ( conn ) ;
2018-12-02 01:31:17 +01:00
}
2024-07-04 10:09:16 +02:00
static void mux_pt_shut ( struct stconn * sc , unsigned int mode , struct se_abort_info * reason )
2017-09-13 18:30:23 +02:00
{
2022-05-27 10:43:18 +02:00
struct connection * conn = __sc_conn ( sc ) ;
2022-05-10 11:21:15 +02:00
struct mux_pt_ctx * ctx = conn - > ctx ;
2021-12-17 17:28:35 +01:00
2022-05-27 10:43:18 +02:00
TRACE_ENTER ( PT_EV_STRM_SHUT , conn , sc ) ;
2024-04-18 09:56:11 +02:00
if ( mode & ( SE_SHW_SILENT | SE_SHW_NORMAL ) ) {
if ( conn_xprt_ready ( conn ) & & conn - > xprt - > shutw )
conn - > xprt - > shutw ( conn , conn - > xprt_ctx , ( mode & SE_SHW_NORMAL ) ) ;
2024-09-03 15:19:51 +02:00
if ( ! ( conn - > flags & CO_FL_SOCK_WR_SH ) )
2024-04-18 09:56:11 +02:00
conn_sock_shutw ( conn , ( mode & SE_SHW_NORMAL ) ) ;
}
2021-04-08 16:45:11 +02:00
2024-04-18 09:56:11 +02:00
if ( mode & ( SE_SHR_RESET | SE_SHR_DRAIN ) ) {
se_fl_clr ( ctx - > sd , SE_FL_RCV_MORE | SE_FL_WANT_ROOM ) ;
if ( conn_xprt_ready ( conn ) & & conn - > xprt - > shutr )
conn - > xprt - > shutr ( conn , conn - > xprt_ctx , ( mode & SE_SHR_DRAIN ) ) ;
else if ( mode & SE_SHR_DRAIN )
conn_ctrl_drain ( conn ) ;
}
2021-04-08 16:45:11 +02:00
2022-05-27 10:43:18 +02:00
TRACE_LEAVE ( PT_EV_STRM_SHUT , conn , sc ) ;
2017-09-13 18:30:23 +02:00
}
/*
* Called from the upper layer , to get more data
2021-09-21 15:50:55 +02:00
*
* The caller is responsible for defragmenting < buf > if necessary . But < flags >
* must be tested to know the calling context . If CO_RFL_BUF_FLUSH is set , it
* means the caller wants to flush input data ( from the mux buffer and the
* channel buffer ) to be able to use kernel splicing or any kind of mux - to - mux
* xfer . If CO_RFL_KEEP_RECV is set , the mux must always subscribe for read
* events before giving back . CO_RFL_BUF_WET is set if < buf > is congested with
* data scheduled for leaving soon . CO_RFL_BUF_NOT_STUCK is set to instruct the
* mux it may optimize the data copy to < buf > if necessary . Otherwise , it should
* copy as much data as possible .
2017-09-13 18:30:23 +02:00
*/
2022-05-27 10:43:18 +02:00
static size_t mux_pt_rcv_buf ( struct stconn * sc , struct buffer * buf , size_t count , int flags )
2017-09-13 18:30:23 +02:00
{
2022-05-27 10:43:18 +02:00
struct connection * conn = __sc_conn ( sc ) ;
2022-05-10 11:21:15 +02:00
struct mux_pt_ctx * ctx = conn - > ctx ;
2021-04-08 16:45:11 +02:00
size_t ret = 0 ;
2022-05-27 10:43:18 +02:00
TRACE_ENTER ( PT_EV_RX_DATA , conn , sc , buf , ( size_t [ ] ) { count } ) ;
2017-09-13 18:30:23 +02:00
2018-10-11 15:29:21 +02:00
if ( ! count ) {
2022-05-27 16:17:23 +02:00
se_fl_set ( ctx - > sd , SE_FL_RCV_MORE | SE_FL_WANT_ROOM ) ;
2021-04-08 16:45:11 +02:00
goto end ;
2018-10-11 15:29:21 +02:00
}
2018-12-14 10:51:23 +01:00
b_realign_if_empty ( buf ) ;
2021-12-17 17:28:35 +01:00
ret = conn - > xprt - > rcv_buf ( conn , conn - > xprt_ctx , buf , count , flags ) ;
if ( conn - > flags & CO_FL_ERROR ) {
2022-05-27 16:17:23 +02:00
se_fl_clr ( ctx - > sd , SE_FL_RCV_MORE | SE_FL_WANT_ROOM ) ;
2023-03-20 08:25:38 +01:00
if ( conn_xprt_read0_pending ( conn ) )
se_fl_set ( ctx - > sd , SE_FL_EOS ) ;
2022-05-27 16:17:23 +02:00
se_fl_set ( ctx - > sd , SE_FL_ERROR ) ;
2022-05-27 10:43:18 +02:00
TRACE_DEVEL ( " error on connection " , PT_EV_RX_DATA | PT_EV_CONN_ERR , conn , sc ) ;
2018-12-04 19:17:25 +01:00
}
2023-03-20 08:25:38 +01:00
else if ( conn_xprt_read0_pending ( conn ) ) {
se_fl_clr ( ctx - > sd , SE_FL_RCV_MORE | SE_FL_WANT_ROOM ) ;
se_fl_set ( ctx - > sd , ( SE_FL_EOI | SE_FL_EOS ) ) ;
TRACE_DEVEL ( " read0 on connection " , PT_EV_RX_DATA , conn , sc ) ;
}
2021-04-08 16:45:11 +02:00
end :
2022-05-27 10:43:18 +02:00
TRACE_LEAVE ( PT_EV_RX_DATA , conn , sc , buf , ( size_t [ ] ) { ret } ) ;
2018-07-18 11:29:06 +02:00
return ret ;
2017-09-13 18:30:23 +02:00
}
/* Called from the upper layer, to send data */
2022-05-27 10:43:18 +02:00
static size_t mux_pt_snd_buf ( struct stconn * sc , struct buffer * buf , size_t count , int flags )
2017-09-13 18:30:23 +02:00
{
2022-05-27 10:43:18 +02:00
struct connection * conn = __sc_conn ( sc ) ;
2022-05-10 11:21:15 +02:00
struct mux_pt_ctx * ctx = conn - > ctx ;
2018-11-30 13:17:48 +01:00
size_t ret ;
2022-05-27 10:43:18 +02:00
TRACE_ENTER ( PT_EV_TX_DATA , conn , sc , buf , ( size_t [ ] ) { count } ) ;
2021-04-08 16:45:11 +02:00
2021-12-17 17:28:35 +01:00
ret = conn - > xprt - > snd_buf ( conn , conn - > xprt_ctx , buf , count , flags ) ;
2018-07-27 11:59:41 +02:00
if ( ret > 0 )
b_del ( buf , ret ) ;
2021-04-08 16:45:11 +02:00
2022-03-31 16:47:46 +02:00
if ( conn - > flags & CO_FL_ERROR ) {
2023-03-13 11:07:37 +01:00
if ( conn_xprt_read0_pending ( conn ) )
se_fl_set ( ctx - > sd , SE_FL_EOS ) ;
2022-10-05 11:01:56 +02:00
se_fl_set_error ( ctx - > sd ) ;
2022-05-27 10:43:18 +02:00
TRACE_DEVEL ( " error on connection " , PT_EV_TX_DATA | PT_EV_CONN_ERR , conn , sc ) ;
2022-03-31 16:47:46 +02:00
}
2022-05-27 10:43:18 +02:00
TRACE_LEAVE ( PT_EV_TX_DATA , conn , sc , buf , ( size_t [ ] ) { ret } ) ;
2018-07-27 11:59:41 +02:00
return ret ;
2017-09-13 18:30:23 +02:00
}
2023-10-06 15:32:47 +02:00
static inline struct sedesc * mux_pt_opposite_sd ( struct mux_pt_ctx * ctx )
{
struct xref * peer ;
struct sedesc * sdo ;
peer = xref_get_peer_and_lock ( & ctx - > sd - > xref ) ;
if ( ! peer )
return NULL ;
sdo = container_of ( peer , struct sedesc , xref ) ;
xref_unlock ( & ctx - > sd - > xref , peer ) ;
return sdo ;
}
2024-01-24 11:12:05 +01:00
static size_t mux_pt_nego_ff ( struct stconn * sc , struct buffer * input , size_t count , unsigned int flags )
2023-10-06 15:32:47 +02:00
{
struct connection * conn = __sc_conn ( sc ) ;
struct mux_pt_ctx * ctx = conn - > ctx ;
size_t ret = 0 ;
TRACE_ENTER ( PT_EV_TX_DATA , conn , sc , 0 , ( size_t [ ] ) { count } ) ;
/* Use kernel splicing if it is supported by the sender and if there
* are no input data _AND_ no output data .
*
* TODO : It may be good to add a flag to send obuf data first if any ,
* and then data in pipe , or the opposite . For now , it is not
* supported to mix data .
*/
2024-01-24 11:12:05 +01:00
if ( ! b_data ( input ) & & ( flags & NEGO_FF_FL_MAY_SPLICE ) ) {
2023-10-06 15:32:47 +02:00
if ( conn - > xprt - > snd_pipe & & ( ctx - > sd - > iobuf . pipe | | ( pipes_used < global . maxpipes & & ( ctx - > sd - > iobuf . pipe = get_pipe ( ) ) ) ) ) {
ctx - > sd - > iobuf . offset = 0 ;
ctx - > sd - > iobuf . data = 0 ;
ret = count ;
goto out ;
}
ctx - > sd - > iobuf . flags | = IOBUF_FL_NO_SPLICING ;
TRACE_DEVEL ( " Unable to allocate pipe for splicing, fallback to buffer " , PT_EV_TX_DATA , conn , sc ) ;
}
/* No buffer case */
out :
TRACE_LEAVE ( PT_EV_TX_DATA , conn , sc , 0 , ( size_t [ ] ) { ret } ) ;
return ret ;
}
2023-10-31 13:43:21 +01:00
static size_t mux_pt_done_ff ( struct stconn * sc )
2023-10-06 15:32:47 +02:00
{
struct connection * conn = __sc_conn ( sc ) ;
struct mux_pt_ctx * ctx = conn - > ctx ;
struct sedesc * sd = ctx - > sd ;
size_t total = 0 ;
TRACE_ENTER ( PT_EV_TX_DATA , conn , sc ) ;
if ( sd - > iobuf . pipe ) {
total = conn - > xprt - > snd_pipe ( conn , conn - > xprt_ctx , sd - > iobuf . pipe , sd - > iobuf . pipe - > data ) ;
if ( ! sd - > iobuf . pipe - > data ) {
put_pipe ( sd - > iobuf . pipe ) ;
sd - > iobuf . pipe = NULL ;
}
}
else {
BUG_ON ( sd - > iobuf . buf ) ;
}
out :
if ( conn - > flags & CO_FL_ERROR ) {
if ( conn_xprt_read0_pending ( conn ) )
se_fl_set ( ctx - > sd , SE_FL_EOS ) ;
se_fl_set_error ( ctx - > sd ) ;
2024-07-30 10:43:55 +02:00
if ( sd - > iobuf . pipe ) {
put_pipe ( sd - > iobuf . pipe ) ;
sd - > iobuf . pipe = NULL ;
BUG/MEDIUM: mux-pt/mux-h1: Release the pipe on connection error on sending path
When data are sent using the kernel splicing, if a connection error
occurred, the pipe must be released. Indeed, in that case, no more data can
be sent and there is no reason to not release the pipe. But it is in fact an
issue for the stream because the channel will appear are not empty. This may
prevent the stream to be released. This happens on 2.8 when a filter is also
attached on it. On 2.9 and upper, it seems there is not issue. But it is
hard to be sure and the current patch remains valid is all cases. On 2.6 and
lower, the code is not the same and, AFAIK, there is no issue.
This patch must be backported to 2.8. However, on 2.8, there is no zero-copy
data forwarding. The patch must be adapted. There is no done_ff/resume_ff
callback functions for muxes. The pipe must released in sc_conn_send() when
an error flag is set on the SE, after the call to snd_pipe callback
function.
(cherry picked from commit 760d26a8625f3af2b6939037a40f19b5f8063be1)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
2024-07-30 08:41:03 +02:00
}
2023-10-06 15:32:47 +02:00
TRACE_DEVEL ( " error on connection " , PT_EV_TX_DATA | PT_EV_CONN_ERR , conn , sc ) ;
}
TRACE_LEAVE ( PT_EV_TX_DATA , conn , sc , 0 , ( size_t [ ] ) { total } ) ;
2023-10-31 13:43:21 +01:00
return total ;
2023-10-06 15:32:47 +02:00
}
static int mux_pt_fastfwd ( struct stconn * sc , unsigned int count , unsigned int flags )
{
struct connection * conn = __sc_conn ( sc ) ;
struct mux_pt_ctx * ctx = conn - > ctx ;
struct sedesc * sdo = NULL ;
size_t total = 0 , try = 0 ;
2024-01-24 11:12:05 +01:00
unsigned int nego_flags = NEGO_FF_FL_NONE ;
2023-10-06 15:32:47 +02:00
int ret = 0 ;
TRACE_ENTER ( PT_EV_RX_DATA , conn , sc , 0 , ( size_t [ ] ) { count } ) ;
se_fl_clr ( ctx - > sd , SE_FL_RCV_MORE | SE_FL_WANT_ROOM ) ;
conn - > flags & = ~ CO_FL_WAIT_ROOM ;
sdo = mux_pt_opposite_sd ( ctx ) ;
if ( ! sdo ) {
TRACE_STATE ( " Opposite endpoint not available yet " , PT_EV_RX_DATA , conn , sc ) ;
goto out ;
}
2024-01-24 11:12:05 +01:00
if ( conn - > xprt - > rcv_pipe & & ! ! ( flags & CO_RFL_MAY_SPLICE ) & & ! ( sdo - > iobuf . flags & IOBUF_FL_NO_SPLICING ) )
nego_flags | = NEGO_FF_FL_MAY_SPLICE ;
try = se_nego_ff ( sdo , & BUF_NULL , count , nego_flags ) ;
2023-10-06 15:32:47 +02:00
if ( sdo - > iobuf . flags & IOBUF_FL_NO_FF ) {
2023-11-21 19:54:16 +01:00
/* Fast forwarding is not supported by the consumer */
2024-02-14 15:00:30 +01:00
se_fl_clr ( ctx - > sd , SE_FL_MAY_FASTFWD_PROD ) ;
2023-10-06 15:32:47 +02:00
TRACE_DEVEL ( " Fast-forwarding not supported by opposite endpoint, disable it " , PT_EV_RX_DATA , conn , sc ) ;
goto end ;
}
if ( sdo - > iobuf . flags & IOBUF_FL_FF_BLOCKED ) {
se_fl_set ( ctx - > sd , SE_FL_RCV_MORE | SE_FL_WANT_ROOM ) ;
TRACE_STATE ( " waiting for more room " , PT_EV_RX_DATA | PT_EV_STRM_ERR , conn , sc ) ;
goto out ;
}
total + = sdo - > iobuf . data ;
if ( sdo - > iobuf . pipe ) {
/* Here, not data was xferred */
ret = conn - > xprt - > rcv_pipe ( conn , conn - > xprt_ctx , sdo - > iobuf . pipe , try ) ;
if ( ret < 0 ) {
TRACE_ERROR ( " Error when trying to fast-forward data, disable it and abort " ,
PT_EV_RX_DATA | PT_EV_STRM_ERR | PT_EV_CONN_ERR , conn , sc ) ;
2024-02-14 15:00:30 +01:00
se_fl_clr ( ctx - > sd , SE_FL_MAY_FASTFWD_PROD ) ;
2023-10-06 15:32:47 +02:00
BUG_ON ( sdo - > iobuf . pipe - > data ) ;
put_pipe ( sdo - > iobuf . pipe ) ;
sdo - > iobuf . pipe = NULL ;
goto end ;
}
total + = ret ;
}
else {
BUG_ON ( sdo - > iobuf . buf ) ;
ret = - 1 ; /* abort splicing for now and fallback to buffer mode */
goto end ;
}
ret = total ;
se_done_ff ( sdo ) ;
if ( sdo - > iobuf . pipe ) {
se_fl_set ( ctx - > sd , SE_FL_RCV_MORE | SE_FL_WANT_ROOM ) ;
}
TRACE_DEVEL ( " Data fast-forwarded " , PT_EV_RX_DATA , conn , sc , 0 , ( size_t [ ] ) { ret } ) ;
out :
if ( conn - > flags & CO_FL_ERROR ) {
if ( conn_xprt_read0_pending ( conn ) )
se_fl_set ( ctx - > sd , SE_FL_EOS ) ;
se_fl_set ( ctx - > sd , SE_FL_ERROR ) ;
TRACE_DEVEL ( " error on connection " , PT_EV_RX_DATA | PT_EV_CONN_ERR , conn , sc ) ;
}
else if ( conn_xprt_read0_pending ( conn ) ) {
se_fl_set ( ctx - > sd , ( SE_FL_EOS | SE_FL_EOI ) ) ;
TRACE_DEVEL ( " read0 on connection " , PT_EV_RX_DATA , conn , sc ) ;
}
end :
TRACE_LEAVE ( PT_EV_RX_DATA , conn , sc , 0 , ( size_t [ ] ) { ret } ) ;
return ret ;
}
static int mux_pt_resume_fastfwd ( struct stconn * sc , unsigned int flags )
{
struct connection * conn = __sc_conn ( sc ) ;
struct mux_pt_ctx * ctx = conn - > ctx ;
struct sedesc * sd = ctx - > sd ;
size_t total = 0 ;
TRACE_ENTER ( PT_EV_TX_DATA , conn , sc , 0 , ( size_t [ ] ) { flags } ) ;
if ( sd - > iobuf . pipe ) {
total = conn - > xprt - > snd_pipe ( conn , conn - > xprt_ctx , sd - > iobuf . pipe , sd - > iobuf . pipe - > data ) ;
if ( ! sd - > iobuf . pipe - > data ) {
put_pipe ( sd - > iobuf . pipe ) ;
sd - > iobuf . pipe = NULL ;
}
}
else {
BUG_ON ( sd - > iobuf . buf ) ;
}
out :
if ( conn - > flags & CO_FL_ERROR ) {
if ( conn_xprt_read0_pending ( conn ) )
se_fl_set ( ctx - > sd , SE_FL_EOS ) ;
se_fl_set_error ( ctx - > sd ) ;
2024-07-30 10:43:55 +02:00
if ( sd - > iobuf . pipe ) {
put_pipe ( sd - > iobuf . pipe ) ;
sd - > iobuf . pipe = NULL ;
BUG/MEDIUM: mux-pt/mux-h1: Release the pipe on connection error on sending path
When data are sent using the kernel splicing, if a connection error
occurred, the pipe must be released. Indeed, in that case, no more data can
be sent and there is no reason to not release the pipe. But it is in fact an
issue for the stream because the channel will appear are not empty. This may
prevent the stream to be released. This happens on 2.8 when a filter is also
attached on it. On 2.9 and upper, it seems there is not issue. But it is
hard to be sure and the current patch remains valid is all cases. On 2.6 and
lower, the code is not the same and, AFAIK, there is no issue.
This patch must be backported to 2.8. However, on 2.8, there is no zero-copy
data forwarding. The patch must be adapted. There is no done_ff/resume_ff
callback functions for muxes. The pipe must released in sc_conn_send() when
an error flag is set on the SE, after the call to snd_pipe callback
function.
(cherry picked from commit 760d26a8625f3af2b6939037a40f19b5f8063be1)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
2024-07-30 08:41:03 +02:00
}
2023-10-06 15:32:47 +02:00
TRACE_DEVEL ( " error on connection " , PT_EV_TX_DATA | PT_EV_CONN_ERR , conn , sc ) ;
}
TRACE_LEAVE ( PT_EV_TX_DATA , conn , sc , 0 , ( size_t [ ] ) { total } ) ;
return total ;
}
2020-01-17 07:52:13 +01:00
/* Called from the upper layer, to subscribe <es> to events <event_type>. The
* event subscriber < es > is not allowed to change from a previous call as long
* as at least one event is still subscribed . The < event_type > must only be a
* combination of SUB_RETRY_RECV and SUB_RETRY_SEND . It always returns 0.
*/
2022-05-27 10:43:18 +02:00
static int mux_pt_subscribe ( struct stconn * sc , int event_type , struct wait_event * es )
2018-07-17 18:46:31 +02:00
{
2022-05-27 10:43:18 +02:00
struct connection * conn = __sc_conn ( sc ) ;
2021-12-17 17:28:35 +01:00
2022-05-27 10:43:18 +02:00
TRACE_POINT ( PT_EV_RX_DATA | PT_EV_TX_DATA , conn , sc , 0 , ( size_t [ ] ) { event_type } ) ;
2021-12-17 17:28:35 +01:00
return conn - > xprt - > subscribe ( conn , conn - > xprt_ctx , event_type , es ) ;
2018-07-17 18:46:31 +02:00
}
2020-01-17 07:52:13 +01:00
/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
* The < es > pointer is not allowed to differ from the one passed to the
* subscribe ( ) call . It always returns zero .
*/
2022-05-27 10:43:18 +02:00
static int mux_pt_unsubscribe ( struct stconn * sc , int event_type , struct wait_event * es )
2018-09-28 17:57:58 +02:00
{
2022-05-27 10:43:18 +02:00
struct connection * conn = __sc_conn ( sc ) ;
2021-12-17 17:28:35 +01:00
2022-05-27 10:43:18 +02:00
TRACE_POINT ( PT_EV_RX_DATA | PT_EV_TX_DATA , conn , sc , 0 , ( size_t [ ] ) { event_type } ) ;
2021-12-17 17:28:35 +01:00
return conn - > xprt - > unsubscribe ( conn , conn - > xprt_ctx , event_type , es ) ;
2018-09-28 17:57:58 +02:00
}
2019-10-25 16:19:26 +02:00
static int mux_pt_ctl ( struct connection * conn , enum mux_ctl_type mux_ctl , void * output )
{
int ret = 0 ;
2024-04-30 16:18:07 +02:00
2019-10-25 16:19:26 +02:00
switch ( mux_ctl ) {
2023-11-28 14:27:51 +01:00
case MUX_CTL_STATUS :
2020-01-23 16:27:54 +01:00
if ( ! ( conn - > flags & CO_FL_WAIT_XPRT ) )
2019-10-25 16:19:26 +02:00
ret | = MUX_STATUS_READY ;
return ret ;
2023-11-28 14:27:51 +01:00
case MUX_CTL_EXIT_STATUS :
2020-10-06 14:59:17 +02:00
return MUX_ES_UNKNOWN ;
2024-04-30 16:18:07 +02:00
case MUX_CTL_GET_NBSTRM :
return mux_pt_used_streams ( conn ) ;
case MUX_CTL_GET_MAXSTRM :
return 1 ;
2019-10-25 16:19:26 +02:00
default :
return - 1 ;
}
}
2023-11-28 15:12:51 +01:00
static int mux_pt_sctl ( struct stconn * sc , enum mux_sctl_type mux_sctl , void * output )
{
int ret = 0 ;
switch ( mux_sctl ) {
case MUX_SCTL_SID :
if ( output )
* ( ( int64_t * ) output ) = 0 ;
return ret ;
default :
return - 1 ;
}
}
2023-12-04 14:48:52 +01:00
/* config parser for global "tune.pt.zero-copy-forwarding" */
static int cfg_parse_pt_zero_copy_fwd ( char * * args , int section_type , struct proxy * curpx ,
const struct proxy * defpx , const char * file , int line ,
char * * err )
{
if ( too_many_args ( 1 , args , err , NULL ) )
return - 1 ;
if ( strcmp ( args [ 1 ] , " on " ) = = 0 )
global . tune . no_zero_copy_fwd & = ~ NO_ZERO_COPY_FWD_PT ;
else if ( strcmp ( args [ 1 ] , " off " ) = = 0 )
global . tune . no_zero_copy_fwd | = NO_ZERO_COPY_FWD_PT ;
else {
memprintf ( err , " '%s' expects 'on' or 'off'. " , args [ 0 ] ) ;
return - 1 ;
}
return 0 ;
}
/* config keyword parsers */
static struct cfg_kw_list cfg_kws = { ILH , {
{ CFG_GLOBAL , " tune.pt.zero-copy-forwarding " , cfg_parse_pt_zero_copy_fwd } ,
{ 0 , NULL , NULL }
} } ;
INITCALL1 ( STG_REGISTER , cfg_register_keywords , & cfg_kws ) ;
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 10:53:00 +02:00
/* The mux operations */
2021-02-05 16:44:46 +01:00
const struct mux_ops mux_tcp_ops = {
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 10:53:00 +02:00
. init = mux_pt_init ,
. wake = mux_pt_wake ,
2017-09-13 18:30:23 +02:00
. rcv_buf = mux_pt_rcv_buf ,
. snd_buf = mux_pt_snd_buf ,
2023-10-18 12:18:10 +02:00
. nego_fastfwd = mux_pt_nego_ff ,
2023-10-06 15:32:47 +02:00
. done_fastfwd = mux_pt_done_ff ,
. fastfwd = mux_pt_fastfwd ,
. resume_fastfwd = mux_pt_resume_fastfwd ,
2018-07-17 18:46:31 +02:00
. subscribe = mux_pt_subscribe ,
2018-09-28 17:57:58 +02:00
. unsubscribe = mux_pt_unsubscribe ,
2017-09-13 18:30:23 +02:00
. attach = mux_pt_attach ,
2022-05-27 11:00:59 +02:00
. get_first_sc = mux_pt_get_first_sc ,
2017-09-13 18:30:23 +02:00
. detach = mux_pt_detach ,
2018-11-05 18:37:53 +01:00
. avail_streams = mux_pt_avail_streams ,
2019-01-26 12:19:01 +01:00
. used_streams = mux_pt_used_streams ,
2018-11-06 16:32:42 +01:00
. destroy = mux_pt_destroy_meth ,
2019-10-25 16:19:26 +02:00
. ctl = mux_pt_ctl ,
2023-11-28 15:12:51 +01:00
. sctl = mux_pt_sctl ,
2024-04-18 09:56:11 +02:00
. shut = mux_pt_shut ,
2017-12-20 16:14:44 +01:00
. flags = MX_FL_NONE ,
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 10:53:00 +02:00
. name = " PASS " ,
} ;
2017-09-21 19:43:21 +02:00
2021-02-05 16:44:46 +01:00
const struct mux_ops mux_pt_ops = {
. init = mux_pt_init ,
. wake = mux_pt_wake ,
. rcv_buf = mux_pt_rcv_buf ,
. snd_buf = mux_pt_snd_buf ,
2023-10-18 12:18:10 +02:00
. nego_fastfwd = mux_pt_nego_ff ,
2023-10-06 15:32:47 +02:00
. done_fastfwd = mux_pt_done_ff ,
. fastfwd = mux_pt_fastfwd ,
. resume_fastfwd = mux_pt_resume_fastfwd ,
2021-02-05 16:44:46 +01:00
. subscribe = mux_pt_subscribe ,
. unsubscribe = mux_pt_unsubscribe ,
. attach = mux_pt_attach ,
2022-05-27 11:00:59 +02:00
. get_first_sc = mux_pt_get_first_sc ,
2021-02-05 16:44:46 +01:00
. detach = mux_pt_detach ,
. avail_streams = mux_pt_avail_streams ,
. used_streams = mux_pt_used_streams ,
. destroy = mux_pt_destroy_meth ,
. ctl = mux_pt_ctl ,
2023-11-28 15:12:51 +01:00
. sctl = mux_pt_sctl ,
2024-04-18 09:56:11 +02:00
. shut = mux_pt_shut ,
2021-02-05 16:44:46 +01:00
. flags = MX_FL_NONE | MX_FL_NO_UPG ,
. name = " PASS " ,
} ;
2018-04-10 14:33:41 +02:00
/* PROT selection : default mux has empty name */
2021-02-05 16:44:46 +01:00
static struct mux_proto_list mux_proto_none =
{ . token = IST ( " none " ) , . mode = PROTO_MODE_TCP , . side = PROTO_SIDE_BOTH , . mux = & mux_pt_ops } ;
static struct mux_proto_list mux_proto_tcp =
{ . token = IST ( " " ) , . mode = PROTO_MODE_TCP , . side = PROTO_SIDE_BOTH , . mux = & mux_tcp_ops } ;
2017-09-21 19:43:21 +02:00
2021-02-05 16:44:46 +01:00
INITCALL1 ( STG_REGISTER , register_mux_proto , & mux_proto_none ) ;
INITCALL1 ( STG_REGISTER , register_mux_proto , & mux_proto_tcp ) ;