diff --git a/include/haproxy/quic_cc-t.h b/include/haproxy/quic_cc-t.h index cae12bec6..888efca97 100644 --- a/include/haproxy/quic_cc-t.h +++ b/include/haproxy/quic_cc-t.h @@ -30,6 +30,7 @@ #include /* size_t */ #include +#include #define QUIC_CC_INFINITE_SSTHESH ((uint32_t)-1) @@ -86,6 +87,30 @@ struct quic_cc { uint32_t priv[16]; }; +struct quic_cc_path { + /* Control congestion. */ + struct quic_cc cc; + /* Packet loss detection information. */ + struct quic_loss loss; + + /* MTU. */ + size_t mtu; + /* Congestion window. */ + uint64_t cwnd; + /* The current maximum congestion window value reached. */ + uint64_t mcwnd; + /* The maximum congestion window value which can be reached. */ + uint64_t max_cwnd; + /* Minimum congestion window. */ + uint64_t min_cwnd; + /* Prepared data to be sent (in bytes). */ + uint64_t prep_in_flight; + /* Outstanding data (in bytes). */ + uint64_t in_flight; + /* Number of in flight ack-eliciting packets. */ + uint64_t ifae_pkts; +}; + struct quic_cc_algo { enum quic_cc_algo_type type; int (*init)(struct quic_cc *cc); diff --git a/include/haproxy/quic_cc.h b/include/haproxy/quic_cc.h index 6e3c7d522..721feca78 100644 --- a/include/haproxy/quic_cc.h +++ b/include/haproxy/quic_cc.h @@ -31,6 +31,7 @@ #include #include #include +#include void quic_cc_init(struct quic_cc *cc, struct quic_cc_algo *algo, struct quic_conn *qc); void quic_cc_event(struct quic_cc *cc, struct quic_cc_event *ev); @@ -73,5 +74,39 @@ static inline void *quic_cc_priv(const struct quic_cc *cc) return (void *)cc->priv; } +/* Initialize

QUIC network path depending on boolean + * which is true for an IPv4 path, if not false for an IPv6 path. + */ +static inline void quic_cc_path_init(struct quic_cc_path *path, int ipv4, unsigned long max_cwnd, + struct quic_cc_algo *algo, struct quic_conn *qc) +{ + unsigned int max_dgram_sz; + + max_dgram_sz = ipv4 ? QUIC_INITIAL_IPV4_MTU : QUIC_INITIAL_IPV6_MTU; + quic_loss_init(&path->loss); + path->mtu = max_dgram_sz; + path->cwnd = QUIC_MIN(10 * max_dgram_sz, QUIC_MAX(max_dgram_sz << 1, 14720U)); + path->mcwnd = path->cwnd; + path->max_cwnd = max_cwnd; + path->min_cwnd = max_dgram_sz << 1; + path->prep_in_flight = 0; + path->in_flight = 0; + path->ifae_pkts = 0; + quic_cc_init(&path->cc, algo, qc); +} + +/* Return the remaining available on QUIC path for prepared data + * (before being sent). Almost the same that for the QUIC path room, except that + * here this is the data which have been prepared which are taken into an account. + */ +static inline size_t quic_cc_path_prep_data(struct quic_cc_path *path) +{ + if (path->prep_in_flight > path->cwnd) + return 0; + + return path->cwnd - path->prep_in_flight; +} + + #endif /* USE_QUIC */ #endif /* _PROTO_QUIC_CC_H */ diff --git a/include/haproxy/quic_conn-t.h b/include/haproxy/quic_conn-t.h index 087dfd693..9a7e64dba 100644 --- a/include/haproxy/quic_conn-t.h +++ b/include/haproxy/quic_conn-t.h @@ -235,30 +235,6 @@ extern const struct quic_version *preferred_version; /* The maximum number of bytes of CRYPTO data in flight during handshakes. */ #define QUIC_CRYPTO_IN_FLIGHT_MAX 4096 -struct quic_path { - /* Control congestion. */ - struct quic_cc cc; - /* Packet loss detection information. */ - struct quic_loss loss; - - /* MTU. */ - size_t mtu; - /* Congestion window. */ - uint64_t cwnd; - /* The current maximum congestion window value reached. */ - uint64_t mcwnd; - /* The maximum congestion window value which can be reached. */ - uint64_t max_cwnd; - /* Minimum congestion window. */ - uint64_t min_cwnd; - /* Prepared data to be sent (in bytes). */ - uint64_t prep_in_flight; - /* Outstanding data (in bytes). */ - uint64_t in_flight; - /* Number of in flight ack-eliciting packets. */ - uint64_t ifae_pkts; -}; - /* Status of the connection/mux layer. This defines how to handle app data. * * During a standard quic_conn lifetime it transitions like this : @@ -433,8 +409,8 @@ struct quic_conn { } ku; unsigned int max_ack_delay; unsigned int max_idle_timeout; - struct quic_path paths[1]; - struct quic_path *path; + struct quic_cc_path paths[1]; + struct quic_cc_path *path; struct mt_list accept_list; /* chaining element used for accept, only valid for frontend connections */ diff --git a/include/haproxy/quic_conn.h b/include/haproxy/quic_conn.h index 725633f3d..3ef4febcb 100644 --- a/include/haproxy/quic_conn.h +++ b/include/haproxy/quic_conn.h @@ -146,39 +146,6 @@ static inline void quic_connection_id_to_frm_cpy(struct quic_frame *dst, ncid_frm->stateless_reset_token = src->stateless_reset_token; } -/* Initialize

QUIC network path depending on boolean - * which is true for an IPv4 path, if not false for an IPv6 path. - */ -static inline void quic_path_init(struct quic_path *path, int ipv4, unsigned long max_cwnd, - struct quic_cc_algo *algo, struct quic_conn *qc) -{ - unsigned int max_dgram_sz; - - max_dgram_sz = ipv4 ? QUIC_INITIAL_IPV4_MTU : QUIC_INITIAL_IPV6_MTU; - quic_loss_init(&path->loss); - path->mtu = max_dgram_sz; - path->cwnd = QUIC_MIN(10 * max_dgram_sz, QUIC_MAX(max_dgram_sz << 1, 14720U)); - path->mcwnd = path->cwnd; - path->max_cwnd = max_cwnd; - path->min_cwnd = max_dgram_sz << 1; - path->prep_in_flight = 0; - path->in_flight = 0; - path->ifae_pkts = 0; - quic_cc_init(&path->cc, algo, qc); -} - -/* Return the remaining available on QUIC path for prepared data - * (before being sent). Almost the same that for the QUIC path room, except that - * here this is the data which have been prepared which are taken into an account. - */ -static inline size_t quic_path_prep_data(struct quic_path *path) -{ - if (path->prep_in_flight > path->cwnd) - return 0; - - return path->cwnd - path->prep_in_flight; -} - /* Return 1 if header form is long, 0 if not. */ static inline int qc_pkt_long(const struct quic_rx_packet *pkt) { diff --git a/src/quic_cc_cubic.c b/src/quic_cc_cubic.c index 2e5599d75..bd14b37c8 100644 --- a/src/quic_cc_cubic.c +++ b/src/quic_cc_cubic.c @@ -97,7 +97,7 @@ static uint32_t cubic_root(uint64_t val) static inline void quic_cubic_update(struct quic_cc *cc, uint32_t acked) { struct cubic *c = quic_cc_priv(cc); - struct quic_path *path = container_of(cc, struct quic_path, cc); + struct quic_cc_path *path = container_of(cc, struct quic_cc_path, cc); /* Current cwnd as number of packets */ uint32_t t, target, inc, inc_diff; uint64_t delta, diff; @@ -183,7 +183,7 @@ static void quic_cc_cubic_slow_start(struct quic_cc *cc) static void quic_enter_recovery(struct quic_cc *cc) { - struct quic_path *path = container_of(cc, struct quic_path, cc); + struct quic_cc_path *path = container_of(cc, struct quic_cc_path, cc); struct cubic *c = quic_cc_priv(cc); /* Current cwnd as number of packets */ @@ -207,7 +207,7 @@ static void quic_enter_recovery(struct quic_cc *cc) /* Congestion slow-start callback. */ static void quic_cc_cubic_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev) { - struct quic_path *path = container_of(cc, struct quic_path, cc); + struct quic_cc_path *path = container_of(cc, struct quic_cc_path, cc); struct cubic *c = quic_cc_priv(cc); TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc); @@ -310,10 +310,10 @@ static void quic_cc_cubic_event(struct quic_cc *cc, struct quic_cc_event *ev) static void quic_cc_cubic_state_trace(struct buffer *buf, const struct quic_cc *cc) { - struct quic_path *path; + struct quic_cc_path *path; struct cubic *c = quic_cc_priv(cc); - path = container_of(cc, struct quic_path, cc); + path = container_of(cc, struct quic_cc_path, cc); chunk_appendf(buf, " state=%s cwnd=%llu mcwnd=%llu ssthresh=%d rpst=%dms", quic_cc_state_str(c->state), (unsigned long long)path->cwnd, diff --git a/src/quic_cc_newreno.c b/src/quic_cc_newreno.c index 7756a6119..405b0babc 100644 --- a/src/quic_cc_newreno.c +++ b/src/quic_cc_newreno.c @@ -51,10 +51,10 @@ static int quic_cc_nr_init(struct quic_cc *cc) /* Re-enter slow start state. */ static void quic_cc_nr_slow_start(struct quic_cc *cc) { - struct quic_path *path; + struct quic_cc_path *path; struct nr *nr = quic_cc_priv(cc); - path = container_of(cc, struct quic_path, cc); + path = container_of(cc, struct quic_cc_path, cc); path->cwnd = path->min_cwnd; /* Re-entering slow start state. */ nr->state = QUIC_CC_ST_SS; @@ -65,10 +65,10 @@ static void quic_cc_nr_slow_start(struct quic_cc *cc) /* Enter a recovery period. */ static void quic_cc_nr_enter_recovery(struct quic_cc *cc) { - struct quic_path *path; + struct quic_cc_path *path; struct nr *nr = quic_cc_priv(cc); - path = container_of(cc, struct quic_path, cc); + path = container_of(cc, struct quic_cc_path, cc); nr->recovery_start_time = now_ms; nr->ssthresh = path->cwnd >> 1; path->cwnd = QUIC_MAX(nr->ssthresh, (uint32_t)path->min_cwnd); @@ -78,12 +78,12 @@ static void quic_cc_nr_enter_recovery(struct quic_cc *cc) /* Slow start callback. */ static void quic_cc_nr_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev) { - struct quic_path *path; + struct quic_cc_path *path; struct nr *nr = quic_cc_priv(cc); TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc); TRACE_PROTO("CC reno", QUIC_EV_CONN_CC, cc->qc, ev); - path = container_of(cc, struct quic_path, cc); + path = container_of(cc, struct quic_cc_path, cc); switch (ev->type) { case QUIC_CC_EVT_ACK: path->cwnd += ev->ack.acked; @@ -109,12 +109,12 @@ static void quic_cc_nr_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev) /* Congestion avoidance callback. */ static void quic_cc_nr_ca_cb(struct quic_cc *cc, struct quic_cc_event *ev) { - struct quic_path *path; + struct quic_cc_path *path; struct nr *nr = quic_cc_priv(cc); TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc); TRACE_PROTO("CC reno", QUIC_EV_CONN_CC, cc->qc, ev); - path = container_of(cc, struct quic_path, cc); + path = container_of(cc, struct quic_cc_path, cc); switch (ev->type) { case QUIC_CC_EVT_ACK: { @@ -147,12 +147,12 @@ static void quic_cc_nr_ca_cb(struct quic_cc *cc, struct quic_cc_event *ev) /* Recovery period callback. */ static void quic_cc_nr_rp_cb(struct quic_cc *cc, struct quic_cc_event *ev) { - struct quic_path *path; + struct quic_cc_path *path; struct nr *nr = quic_cc_priv(cc); TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc); TRACE_PROTO("CC reno", QUIC_EV_CONN_CC, cc->qc, ev); - path = container_of(cc, struct quic_path, cc); + path = container_of(cc, struct quic_cc_path, cc); switch (ev->type) { case QUIC_CC_EVT_ACK: /* RFC 9022 7.3.2. Recovery @@ -182,10 +182,10 @@ static void quic_cc_nr_rp_cb(struct quic_cc *cc, struct quic_cc_event *ev) } static void quic_cc_nr_state_trace(struct buffer *buf, const struct quic_cc *cc) { - struct quic_path *path; + struct quic_cc_path *path; struct nr *nr = quic_cc_priv(cc); - path = container_of(cc, struct quic_path, cc); + path = container_of(cc, struct quic_cc_path, cc); chunk_appendf(buf, " state=%s cwnd=%llu mcwnd=%llu ssthresh=%ld rpst=%dms pktloss=%llu", quic_cc_state_str(nr->state), (unsigned long long)path->cwnd, diff --git a/src/quic_cc_nocc.c b/src/quic_cc_nocc.c index b512a38c2..6e5cff96b 100644 --- a/src/quic_cc_nocc.c +++ b/src/quic_cc_nocc.c @@ -11,9 +11,9 @@ static int quic_cc_nocc_init(struct quic_cc *cc) { - struct quic_path *path; + struct quic_cc_path *path; - path = container_of(cc, struct quic_path, cc); + path = container_of(cc, struct quic_cc_path, cc); path->cwnd = path->max_cwnd; return 1; } @@ -48,9 +48,9 @@ static void quic_cc_nocc_rp_cb(struct quic_cc *cc, struct quic_cc_event *ev) static void quic_cc_nocc_state_trace(struct buffer *buf, const struct quic_cc *cc) { - struct quic_path *path; + struct quic_cc_path *path; - path = container_of(cc, struct quic_path, cc); + path = container_of(cc, struct quic_cc_path, cc); chunk_appendf(buf, " cwnd=%llu", (unsigned long long)path->cwnd); } diff --git a/src/quic_conn.c b/src/quic_conn.c index 7f9837f82..14d9c9a2c 100644 --- a/src/quic_conn.c +++ b/src/quic_conn.c @@ -1118,8 +1118,8 @@ struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4, qc->max_ack_delay = 0; /* Only one path at this time (multipath not supported) */ qc->path = &qc->paths[0]; - quic_path_init(qc->path, ipv4, server ? l->bind_conf->max_cwnd : 0, - cc_algo ? cc_algo : default_quic_cc_algo, qc); + quic_cc_path_init(qc->path, ipv4, server ? l->bind_conf->max_cwnd : 0, + cc_algo ? cc_algo : default_quic_cc_algo, qc); qc->stream_buf_count = 0; memcpy(&qc->local_addr, local_addr, sizeof(qc->local_addr)); diff --git a/src/quic_tx.c b/src/quic_tx.c index 49dfd6914..c1beabd7f 100644 --- a/src/quic_tx.c +++ b/src/quic_tx.c @@ -2092,7 +2092,7 @@ static int qc_build_frms(struct list *outlist, struct list *inlist, * control window. */ if (!qel->pktns->tx.pto_probe) { - size_t remain = quic_path_prep_data(qc->path); + size_t remain = quic_cc_path_prep_data(qc->path); if (headlen > remain) goto leave; @@ -2443,7 +2443,7 @@ static int qc_do_build_pkt(unsigned char *pos, const unsigned char *end, if (!probe && !LIST_ISEMPTY(frms) && !cc) { size_t path_room; - path_room = quic_path_prep_data(qc->path); + path_room = quic_cc_path_prep_data(qc->path); if (end - beg > path_room) end = beg + path_room; } @@ -2840,7 +2840,7 @@ int qc_notify_send(struct quic_conn *qc) * * Probe packets MUST NOT be blocked by the congestion controller. */ - if ((quic_path_prep_data(qc->path) || pktns->tx.pto_probe) && + if ((quic_cc_path_prep_data(qc->path) || pktns->tx.pto_probe) && (!qc_test_fd(qc) || !fd_send_active(qc->fd))) { tasklet_wakeup(qc->subs->tasklet); qc->subs->events &= ~SUB_RETRY_SEND;