MINOR: quic: Replace MT_LISTs by LISTs for RX packets.

Replace ->rx.pqpkts quic_enc_level struct member MT_LIST by an LIST.
Same thing for ->list quic_rx_packet struct member MT_LIST.
Update the code consequently. This was a reminisence of the multithreading
support (several threads by connection).

Must be backported to 2.6

(cherry picked from commit a2d8ad20a3)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
This commit is contained in:
Frédéric Lécaille
2022-08-23 17:45:52 +02:00
committed by Christopher Faulet
parent 4109605dc9
commit efa5ee3a4e
2 changed files with 16 additions and 18 deletions

View File

@ -390,7 +390,7 @@ struct quic_dgram {
#define QUIC_FL_RX_PACKET_ACK_ELICITING (1UL << 0) #define QUIC_FL_RX_PACKET_ACK_ELICITING (1UL << 0)
struct quic_rx_packet { struct quic_rx_packet {
struct mt_list list; struct list list;
struct list qc_rx_pkt_list; struct list qc_rx_pkt_list;
struct quic_conn *qc; struct quic_conn *qc;
unsigned char type; unsigned char type;
@ -520,7 +520,7 @@ struct quic_enc_level {
/* <pkts> root must be protected from concurrent accesses */ /* <pkts> root must be protected from concurrent accesses */
__decl_thread(HA_RWLOCK_T pkts_rwlock); __decl_thread(HA_RWLOCK_T pkts_rwlock);
/* Liste of QUIC packets with protected header. */ /* Liste of QUIC packets with protected header. */
struct mt_list pqpkts; struct list pqpkts;
/* Crypto frames */ /* Crypto frames */
struct { struct {
uint64_t offset; uint64_t offset;

View File

@ -3506,8 +3506,7 @@ int quic_update_ack_ranges_list(struct quic_arngs *arngs,
static inline void qc_rm_hp_pkts(struct quic_conn *qc, struct quic_enc_level *el) static inline void qc_rm_hp_pkts(struct quic_conn *qc, struct quic_enc_level *el)
{ {
struct quic_tls_ctx *tls_ctx; struct quic_tls_ctx *tls_ctx;
struct quic_rx_packet *pqpkt; struct quic_rx_packet *pqpkt, *pkttmp;
struct mt_list *pkttmp1, pkttmp2;
struct quic_enc_level *app_qel; struct quic_enc_level *app_qel;
TRACE_ENTER(QUIC_EV_CONN_ELRMHP, qc); TRACE_ENTER(QUIC_EV_CONN_ELRMHP, qc);
@ -3519,7 +3518,7 @@ static inline void qc_rm_hp_pkts(struct quic_conn *qc, struct quic_enc_level *el
goto out; goto out;
} }
tls_ctx = &el->tls_ctx; tls_ctx = &el->tls_ctx;
mt_list_for_each_entry_safe(pqpkt, &el->rx.pqpkts, list, pkttmp1, pkttmp2) { list_for_each_entry_safe(pqpkt, pkttmp, &el->rx.pqpkts, list) {
if (!qc_do_rm_hp(qc, pqpkt, tls_ctx, el->pktns->rx.largest_pn, if (!qc_do_rm_hp(qc, pqpkt, tls_ctx, el->pktns->rx.largest_pn,
pqpkt->data + pqpkt->pn_offset, pqpkt->data)) { pqpkt->data + pqpkt->pn_offset, pqpkt->data)) {
TRACE_PROTO("hp removing error", QUIC_EV_CONN_ELRMHP, qc); TRACE_PROTO("hp removing error", QUIC_EV_CONN_ELRMHP, qc);
@ -3536,7 +3535,7 @@ static inline void qc_rm_hp_pkts(struct quic_conn *qc, struct quic_enc_level *el
HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &el->rx.pkts_rwlock); HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &el->rx.pkts_rwlock);
TRACE_PROTO("hp removed", QUIC_EV_CONN_ELRMHP, qc, pqpkt); TRACE_PROTO("hp removed", QUIC_EV_CONN_ELRMHP, qc, pqpkt);
} }
MT_LIST_DELETE_SAFE(pkttmp1); LIST_DELETE(&pqpkt->list);
quic_rx_packet_refdec(pqpkt); quic_rx_packet_refdec(pqpkt);
} }
@ -3865,7 +3864,7 @@ static struct task *quic_conn_app_io_cb(struct task *t, void *context, unsigned
qc_dgrams_retransmit(qc); qc_dgrams_retransmit(qc);
} }
if (!MT_LIST_ISEMPTY(&qel->rx.pqpkts) && qc_qel_may_rm_hp(qc, qel)) if (!LIST_ISEMPTY(&qel->rx.pqpkts) && qc_qel_may_rm_hp(qc, qel))
qc_rm_hp_pkts(qc, qel); qc_rm_hp_pkts(qc, qel);
if (!qc_treat_rx_pkts(qel, NULL, ctx, 0)) if (!qc_treat_rx_pkts(qel, NULL, ctx, 0))
@ -3922,7 +3921,7 @@ struct task *quic_conn_io_cb(struct task *t, void *context, unsigned int state)
} }
ssl_err = SSL_ERROR_NONE; ssl_err = SSL_ERROR_NONE;
zero_rtt = st < QUIC_HS_ST_COMPLETE && zero_rtt = st < QUIC_HS_ST_COMPLETE &&
(!MT_LIST_ISEMPTY(&qc->els[QUIC_TLS_ENC_LEVEL_EARLY_DATA].rx.pqpkts) || (!LIST_ISEMPTY(&qc->els[QUIC_TLS_ENC_LEVEL_EARLY_DATA].rx.pqpkts) ||
qc_el_rx_pkts(&qc->els[QUIC_TLS_ENC_LEVEL_EARLY_DATA])); qc_el_rx_pkts(&qc->els[QUIC_TLS_ENC_LEVEL_EARLY_DATA]));
start: start:
if (st >= QUIC_HS_ST_COMPLETE && if (st >= QUIC_HS_ST_COMPLETE &&
@ -3940,7 +3939,7 @@ struct task *quic_conn_io_cb(struct task *t, void *context, unsigned int state)
next_level: next_level:
/* Treat packets waiting for header packet protection decryption */ /* Treat packets waiting for header packet protection decryption */
if (!MT_LIST_ISEMPTY(&qel->rx.pqpkts) && qc_qel_may_rm_hp(qc, qel)) if (!LIST_ISEMPTY(&qel->rx.pqpkts) && qc_qel_may_rm_hp(qc, qel))
qc_rm_hp_pkts(qc, qel); qc_rm_hp_pkts(qc, qel);
force_ack = qel == &qc->els[QUIC_TLS_ENC_LEVEL_INITIAL] || force_ack = qel == &qc->els[QUIC_TLS_ENC_LEVEL_INITIAL] ||
@ -3953,21 +3952,20 @@ struct task *quic_conn_io_cb(struct task *t, void *context, unsigned int state)
goto out; goto out;
if (next_qel && next_qel == &qc->els[QUIC_TLS_ENC_LEVEL_EARLY_DATA] && if (next_qel && next_qel == &qc->els[QUIC_TLS_ENC_LEVEL_EARLY_DATA] &&
!MT_LIST_ISEMPTY(&next_qel->rx.pqpkts)) { !LIST_ISEMPTY(&next_qel->rx.pqpkts)) {
if ((next_qel->tls_ctx.flags & QUIC_FL_TLS_SECRETS_SET)) { if ((next_qel->tls_ctx.flags & QUIC_FL_TLS_SECRETS_SET)) {
qel = next_qel; qel = next_qel;
next_qel = NULL; next_qel = NULL;
goto next_level; goto next_level;
} }
else { else {
struct quic_rx_packet *pkt; struct quic_rx_packet *pkt, *pkttmp;
struct mt_list *elt1, elt2;
struct quic_enc_level *aqel = &qc->els[QUIC_TLS_ENC_LEVEL_EARLY_DATA]; struct quic_enc_level *aqel = &qc->els[QUIC_TLS_ENC_LEVEL_EARLY_DATA];
/* Drop these 0-RTT packets */ /* Drop these 0-RTT packets */
TRACE_PROTO("drop all 0-RTT packets", QUIC_EV_CONN_PHPKTS, qc); TRACE_DEVEL("drop all 0-RTT packets", QUIC_EV_CONN_PHPKTS, qc);
mt_list_for_each_entry_safe(pkt, &aqel->rx.pqpkts, list, elt1, elt2) { list_for_each_entry_safe(pkt, pkttmp, &aqel->rx.pqpkts, list) {
MT_LIST_DELETE_SAFE(elt1); LIST_DELETE(&pkt->list);
quic_rx_packet_refdec(pkt); quic_rx_packet_refdec(pkt);
} }
} }
@ -4018,7 +4016,7 @@ struct task *quic_conn_io_cb(struct task *t, void *context, unsigned int state)
*/ */
if (next_qel && next_qel != qel && if (next_qel && next_qel != qel &&
(next_qel->tls_ctx.flags & QUIC_FL_TLS_SECRETS_SET) && (next_qel->tls_ctx.flags & QUIC_FL_TLS_SECRETS_SET) &&
(!MT_LIST_ISEMPTY(&next_qel->rx.pqpkts) || qc_el_rx_pkts(next_qel))) { (!LIST_ISEMPTY(&next_qel->rx.pqpkts) || qc_el_rx_pkts(next_qel))) {
qel = next_qel; qel = next_qel;
next_qel = NULL; next_qel = NULL;
goto next_level; goto next_level;
@ -4069,7 +4067,7 @@ static int quic_conn_enc_level_init(struct quic_conn *qc,
qel->rx.pkts = EB_ROOT; qel->rx.pkts = EB_ROOT;
HA_RWLOCK_INIT(&qel->rx.pkts_rwlock); HA_RWLOCK_INIT(&qel->rx.pkts_rwlock);
MT_LIST_INIT(&qel->rx.pqpkts); LIST_INIT(&qel->rx.pqpkts);
qel->rx.crypto.offset = 0; qel->rx.crypto.offset = 0;
qel->rx.crypto.frms = EB_ROOT_UNIQUE; qel->rx.crypto.frms = EB_ROOT_UNIQUE;
@ -4680,7 +4678,7 @@ static inline int qc_try_rm_hp(struct quic_conn *qc,
TRACE_PROTO("hp not removed", QUIC_EV_CONN_TRMHP, qc, pkt); TRACE_PROTO("hp not removed", QUIC_EV_CONN_TRMHP, qc, pkt);
pkt->pn_offset = pn - beg; pkt->pn_offset = pn - beg;
MT_LIST_APPEND(&qel->rx.pqpkts, &pkt->list); LIST_APPEND(&qel->rx.pqpkts, &pkt->list);
quic_rx_packet_refinc(pkt); quic_rx_packet_refinc(pkt);
} }