MINOR: freq_ctr: unify freq_ctr and freq_ctr_period into freq_ctr
Both structures are identical except the name of the field starting the period and its description. Let's call them all freq_ctr and the period's start "curr_tick" which is generic. This is only a temporary change and fields are expected to remain the same with no code change (verified).
This commit is contained in:
parent
d209c87142
commit
fa1258f02c
@ -214,7 +214,7 @@ enum {
|
||||
STD_T_SINT = 0, /* signed int */
|
||||
STD_T_UINT, /* unsigned int */
|
||||
STD_T_ULL, /* unsigned long long */
|
||||
STD_T_FRQP, /* freq_ctr_period structure made of three unsigned int */
|
||||
STD_T_FRQP, /* freq_ctr structure made of three unsigned int */
|
||||
};
|
||||
|
||||
/* Prototypes */
|
||||
|
@ -57,7 +57,7 @@ struct activity {
|
||||
ALWAYS_ALIGN(64);
|
||||
|
||||
struct freq_ctr cpust_1s; // avg amount of half-ms stolen over last second
|
||||
struct freq_ctr_period cpust_15s; // avg amount of half-ms stolen over last 15s
|
||||
struct freq_ctr cpust_15s; // avg amount of half-ms stolen over last 15s
|
||||
unsigned int avg_loop_us; // average run time per loop over last 1024 runs
|
||||
unsigned int accepted; // accepted incoming connections
|
||||
unsigned int accq_pushed; // accept queue connections pushed
|
||||
|
@ -48,7 +48,7 @@ static inline void appctx_init(struct appctx *appctx, unsigned long thread_mask)
|
||||
appctx->chunk = NULL;
|
||||
appctx->io_release = NULL;
|
||||
appctx->thread_mask = thread_mask;
|
||||
appctx->call_rate.curr_sec = 0;
|
||||
appctx->call_rate.curr_tick = 0;
|
||||
appctx->call_rate.curr_ctr = 0;
|
||||
appctx->call_rate.prev_ctr = 0;
|
||||
appctx->state = 0;
|
||||
|
@ -24,27 +24,17 @@
|
||||
|
||||
#include <haproxy/api-t.h>
|
||||
|
||||
/* The implicit freq_ctr counter counts a rate of events per second. It is the
|
||||
* preferred form to count rates over a one-second period, because it does not
|
||||
* involve any divide.
|
||||
/* The generic freq_ctr counter counts a rate of events per period, where the
|
||||
* period has to be known by the user. The period is measured in ticks and
|
||||
* must be at least 2 ticks long. This form is slightly more CPU intensive for
|
||||
* reads than the per-second form as it involves a divide.
|
||||
*/
|
||||
struct freq_ctr {
|
||||
unsigned int curr_sec; /* start date of current period (seconds from now.tv_sec) */
|
||||
unsigned int curr_tick; /* start date of current period (wrapping ticks) */
|
||||
unsigned int curr_ctr; /* cumulated value for current period */
|
||||
unsigned int prev_ctr; /* value for last period */
|
||||
};
|
||||
|
||||
/* The generic freq_ctr_period counter counts a rate of events per period, where
|
||||
* the period has to be known by the user. The period is measured in ticks and
|
||||
* must be at least 2 ticks long. This form is slightly more CPU intensive than
|
||||
* the per-second form.
|
||||
*/
|
||||
struct freq_ctr_period {
|
||||
unsigned int curr_tick; /* start date of current period (wrapping ticks) */
|
||||
unsigned int curr_ctr; /* cumulated value for current period */
|
||||
unsigned int prev_ctr; /* value for last period */
|
||||
};
|
||||
|
||||
#endif /* _HAPROXY_FREQ_CTR_T_H */
|
||||
|
||||
/*
|
||||
|
@ -28,7 +28,7 @@
|
||||
#include <haproxy/time.h>
|
||||
|
||||
/* exported functions from freq_ctr.c */
|
||||
ullong freq_ctr_total(struct freq_ctr_period *ctr, uint period, int pend);
|
||||
ullong freq_ctr_total(struct freq_ctr *ctr, uint period, int pend);
|
||||
|
||||
/* Update a frequency counter by <inc> incremental units. It is automatically
|
||||
* rotated if the period is over. It is important that it correctly initializes
|
||||
@ -49,7 +49,7 @@ static inline unsigned int update_freq_ctr(struct freq_ctr *ctr, unsigned int in
|
||||
* we operate, since timing variations would have resulted in the
|
||||
* same uncertainty as well.
|
||||
*/
|
||||
curr_sec = ctr->curr_sec;
|
||||
curr_sec = ctr->curr_tick;
|
||||
do {
|
||||
now_tmp = global_now >> 32;
|
||||
if (curr_sec == (now_tmp & 0x7fffffff))
|
||||
@ -57,7 +57,7 @@ static inline unsigned int update_freq_ctr(struct freq_ctr *ctr, unsigned int in
|
||||
|
||||
/* remove the bit, used for the lock */
|
||||
curr_sec &= 0x7fffffff;
|
||||
} while (!_HA_ATOMIC_CAS(&ctr->curr_sec, &curr_sec, curr_sec | 0x80000000));
|
||||
} while (!_HA_ATOMIC_CAS(&ctr->curr_tick, &curr_sec, curr_sec | 0x80000000));
|
||||
__ha_barrier_atomic_store();
|
||||
|
||||
elapsed = (now_tmp & 0x7fffffff) - curr_sec;
|
||||
@ -72,7 +72,7 @@ static inline unsigned int update_freq_ctr(struct freq_ctr *ctr, unsigned int in
|
||||
}
|
||||
|
||||
/* release the lock and update the time in case of rotate. */
|
||||
_HA_ATOMIC_STORE(&ctr->curr_sec, curr_sec & 0x7fffffff);
|
||||
_HA_ATOMIC_STORE(&ctr->curr_tick, curr_sec & 0x7fffffff);
|
||||
|
||||
return _HA_ATOMIC_ADD_FETCH(&ctr->curr_ctr, inc);
|
||||
}
|
||||
@ -82,7 +82,7 @@ static inline unsigned int update_freq_ctr(struct freq_ctr *ctr, unsigned int in
|
||||
* a null area. This one works on frequency counters which have a period
|
||||
* different from one second.
|
||||
*/
|
||||
static inline unsigned int update_freq_ctr_period(struct freq_ctr_period *ctr,
|
||||
static inline unsigned int update_freq_ctr_period(struct freq_ctr *ctr,
|
||||
unsigned int period, unsigned int inc)
|
||||
{
|
||||
unsigned int curr_tick;
|
||||
@ -135,7 +135,7 @@ unsigned int read_freq_ctr(struct freq_ctr *ctr);
|
||||
* instead which does not have the flapping correction, so that even frequencies
|
||||
* as low as one event/period are properly handled.
|
||||
*/
|
||||
static inline uint read_freq_ctr_period(struct freq_ctr_period *ctr, uint period)
|
||||
static inline uint read_freq_ctr_period(struct freq_ctr *ctr, uint period)
|
||||
{
|
||||
ullong total = freq_ctr_total(ctr, period, -1);
|
||||
|
||||
@ -152,7 +152,7 @@ unsigned int freq_ctr_remain(struct freq_ctr *ctr, unsigned int freq, unsigned i
|
||||
* while respecting <freq> events per period, and taking into account that
|
||||
* <pend> events are already known to be pending. Returns 0 if limit was reached.
|
||||
*/
|
||||
static inline uint freq_ctr_remain_period(struct freq_ctr_period *ctr, uint period, uint freq, uint pend)
|
||||
static inline uint freq_ctr_remain_period(struct freq_ctr *ctr, uint period, uint freq, uint pend)
|
||||
{
|
||||
ullong total = freq_ctr_total(ctr, period, pend);
|
||||
uint avg = div64_32(total, period);
|
||||
@ -176,7 +176,7 @@ unsigned int next_event_delay(struct freq_ctr *ctr, unsigned int freq, unsigned
|
||||
* time, which will be rounded down 1ms for better accuracy, with a minimum
|
||||
* of one ms.
|
||||
*/
|
||||
static inline uint next_event_delay_period(struct freq_ctr_period *ctr, uint period, uint freq, uint pend)
|
||||
static inline uint next_event_delay_period(struct freq_ctr *ctr, uint period, uint freq, uint pend)
|
||||
{
|
||||
ullong total = freq_ctr_total(ctr, period, pend);
|
||||
ullong limit = (ullong)freq * period;
|
||||
@ -196,7 +196,7 @@ static inline uint next_event_delay_period(struct freq_ctr_period *ctr, uint per
|
||||
}
|
||||
|
||||
/* process freq counters over configurable periods */
|
||||
unsigned int read_freq_ctr_period(struct freq_ctr_period *ctr, unsigned int period);
|
||||
unsigned int read_freq_ctr_period(struct freq_ctr *ctr, unsigned int period);
|
||||
|
||||
/* While the functions above report average event counts per period, we are
|
||||
* also interested in average values per event. For this we use a different
|
||||
|
@ -72,7 +72,7 @@ enum {
|
||||
STD_T_SINT = 0, /* data is of type signed int */
|
||||
STD_T_UINT, /* data is of type unsigned int */
|
||||
STD_T_ULL, /* data is of type unsigned long long */
|
||||
STD_T_FRQP, /* data is of type freq_ctr_period */
|
||||
STD_T_FRQP, /* data is of type freq_ctr */
|
||||
STD_T_DICT, /* data is of type key of dictionary entry */
|
||||
};
|
||||
|
||||
@ -116,7 +116,7 @@ union stktable_data {
|
||||
int std_t_sint;
|
||||
unsigned int std_t_uint;
|
||||
unsigned long long std_t_ull;
|
||||
struct freq_ctr_period std_t_frqp;
|
||||
struct freq_ctr std_t_frqp;
|
||||
struct dict_entry *std_t_dict;
|
||||
|
||||
/* types of each storable data */
|
||||
@ -124,24 +124,24 @@ union stktable_data {
|
||||
struct dict_entry *server_key;
|
||||
unsigned int gpt0;
|
||||
unsigned int gpc0;
|
||||
struct freq_ctr_period gpc0_rate;
|
||||
struct freq_ctr gpc0_rate;
|
||||
unsigned int gpc1;
|
||||
struct freq_ctr_period gpc1_rate;
|
||||
struct freq_ctr gpc1_rate;
|
||||
unsigned int conn_cnt;
|
||||
struct freq_ctr_period conn_rate;
|
||||
struct freq_ctr conn_rate;
|
||||
unsigned int conn_cur;
|
||||
unsigned int sess_cnt;
|
||||
struct freq_ctr_period sess_rate;
|
||||
struct freq_ctr sess_rate;
|
||||
unsigned int http_req_cnt;
|
||||
struct freq_ctr_period http_req_rate;
|
||||
struct freq_ctr http_req_rate;
|
||||
unsigned int http_err_cnt;
|
||||
struct freq_ctr_period http_err_rate;
|
||||
struct freq_ctr http_err_rate;
|
||||
unsigned long long bytes_in_cnt;
|
||||
struct freq_ctr_period bytes_in_rate;
|
||||
struct freq_ctr bytes_in_rate;
|
||||
unsigned long long bytes_out_cnt;
|
||||
struct freq_ctr_period bytes_out_rate;
|
||||
struct freq_ctr bytes_out_rate;
|
||||
unsigned int http_fail_cnt;
|
||||
struct freq_ctr_period http_fail_rate;
|
||||
struct freq_ctr http_fail_rate;
|
||||
};
|
||||
|
||||
/* known data types */
|
||||
|
@ -79,7 +79,7 @@ static inline int stktable_type_size(int type)
|
||||
case STD_T_ULL:
|
||||
return sizeof(unsigned long long);
|
||||
case STD_T_FRQP:
|
||||
return sizeof(struct freq_ctr_period);
|
||||
return sizeof(struct freq_ctr);
|
||||
case STD_T_DICT:
|
||||
return sizeof(struct dict_entry *);
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ unsigned int read_freq_ctr(struct freq_ctr *ctr)
|
||||
__ha_compiler_barrier();
|
||||
_past = ctr->prev_ctr;
|
||||
__ha_compiler_barrier();
|
||||
_curr_sec = ctr->curr_sec;
|
||||
_curr_sec = ctr->curr_tick;
|
||||
__ha_compiler_barrier();
|
||||
if (_curr_sec & 0x80000000)
|
||||
continue;
|
||||
@ -45,7 +45,7 @@ unsigned int read_freq_ctr(struct freq_ctr *ctr)
|
||||
__ha_compiler_barrier();
|
||||
past = ctr->prev_ctr;
|
||||
__ha_compiler_barrier();
|
||||
curr_sec = ctr->curr_sec;
|
||||
curr_sec = ctr->curr_tick;
|
||||
__ha_compiler_barrier();
|
||||
if (_curr == curr && _past == past && _curr_sec == curr_sec)
|
||||
break;
|
||||
@ -80,7 +80,7 @@ unsigned int freq_ctr_remain(struct freq_ctr *ctr, unsigned int freq, unsigned i
|
||||
__ha_compiler_barrier();
|
||||
_past = ctr->prev_ctr;
|
||||
__ha_compiler_barrier();
|
||||
_curr_sec = ctr->curr_sec;
|
||||
_curr_sec = ctr->curr_tick;
|
||||
__ha_compiler_barrier();
|
||||
if (_curr_sec & 0x80000000)
|
||||
continue;
|
||||
@ -88,7 +88,7 @@ unsigned int freq_ctr_remain(struct freq_ctr *ctr, unsigned int freq, unsigned i
|
||||
__ha_compiler_barrier();
|
||||
past = ctr->prev_ctr;
|
||||
__ha_compiler_barrier();
|
||||
curr_sec = ctr->curr_sec;
|
||||
curr_sec = ctr->curr_tick;
|
||||
__ha_compiler_barrier();
|
||||
if (_curr == curr && _past == past && _curr_sec == curr_sec)
|
||||
break;
|
||||
@ -127,7 +127,7 @@ unsigned int next_event_delay(struct freq_ctr *ctr, unsigned int freq, unsigned
|
||||
__ha_compiler_barrier();
|
||||
_past = ctr->prev_ctr;
|
||||
__ha_compiler_barrier();
|
||||
_curr_sec = ctr->curr_sec;
|
||||
_curr_sec = ctr->curr_tick;
|
||||
__ha_compiler_barrier();
|
||||
if (_curr_sec & 0x80000000)
|
||||
continue;
|
||||
@ -135,7 +135,7 @@ unsigned int next_event_delay(struct freq_ctr *ctr, unsigned int freq, unsigned
|
||||
__ha_compiler_barrier();
|
||||
past = ctr->prev_ctr;
|
||||
__ha_compiler_barrier();
|
||||
curr_sec = ctr->curr_sec;
|
||||
curr_sec = ctr->curr_tick;
|
||||
__ha_compiler_barrier();
|
||||
if (_curr == curr && _past == past && _curr_sec == curr_sec)
|
||||
break;
|
||||
@ -176,7 +176,7 @@ unsigned int next_event_delay(struct freq_ctr *ctr, unsigned int freq, unsigned
|
||||
* read_freq_ctr_period() to avoid reporting ups and downs on low-frequency
|
||||
* events when the past value is <= 1.
|
||||
*/
|
||||
ullong freq_ctr_total(struct freq_ctr_period *ctr, uint period, int pend)
|
||||
ullong freq_ctr_total(struct freq_ctr *ctr, uint period, int pend)
|
||||
{
|
||||
ullong curr, past;
|
||||
uint curr_tick;
|
||||
|
@ -713,7 +713,7 @@ static int peer_prepare_updatemsg(char *msg, size_t size, struct peer_prep_param
|
||||
break;
|
||||
}
|
||||
case STD_T_FRQP: {
|
||||
struct freq_ctr_period *frqp;
|
||||
struct freq_ctr *frqp;
|
||||
|
||||
frqp = &stktable_data_cast(data_ptr, std_t_frqp);
|
||||
intencode((unsigned int)(now_ms - frqp->curr_tick), &cursor);
|
||||
@ -1667,11 +1667,11 @@ static int peer_treat_updatemsg(struct appctx *appctx, struct peer *p, int updt,
|
||||
break;
|
||||
|
||||
case STD_T_FRQP: {
|
||||
struct freq_ctr_period data;
|
||||
struct freq_ctr data;
|
||||
|
||||
/* First bit is reserved for the freq_ctr_period lock
|
||||
/* First bit is reserved for the freq_ctr lock
|
||||
Note: here we're still protected by the stksess lock
|
||||
so we don't need to update the update the freq_ctr_period
|
||||
so we don't need to update the update the freq_ctr
|
||||
using its internal lock */
|
||||
|
||||
data.curr_tick = tick_add(now_ms, -decoded_int) & ~0x1;
|
||||
|
@ -3627,7 +3627,7 @@ static int table_process_entry_per_key(struct appctx *appctx, char **args)
|
||||
int data_type;
|
||||
int cur_arg;
|
||||
void *ptr;
|
||||
struct freq_ctr_period *frqp;
|
||||
struct freq_ctr *frqp;
|
||||
|
||||
if (!*args[4])
|
||||
return cli_err(appctx, "Key value expected\n");
|
||||
@ -3764,9 +3764,9 @@ static int table_process_entry_per_key(struct appctx *appctx, char **args)
|
||||
* push measures without having to update them too often.
|
||||
*/
|
||||
frqp = &stktable_data_cast(ptr, std_t_frqp);
|
||||
/* First bit is reserved for the freq_ctr_period lock
|
||||
/* First bit is reserved for the freq_ctr lock
|
||||
Note: here we're still protected by the stksess lock
|
||||
so we don't need to update the update the freq_ctr_period
|
||||
so we don't need to update the update the freq_ctr
|
||||
using its internal lock */
|
||||
frqp->curr_tick = now_ms & ~0x1;
|
||||
frqp->prev_ctr = 0;
|
||||
|
@ -424,7 +424,7 @@ struct stream *stream_new(struct session *sess, enum obj_type *origin, struct bu
|
||||
s->buffer_wait.target = s;
|
||||
s->buffer_wait.wakeup_cb = stream_buf_available;
|
||||
|
||||
s->call_rate.curr_sec = s->call_rate.curr_ctr = s->call_rate.prev_ctr = 0;
|
||||
s->call_rate.curr_tick = s->call_rate.curr_ctr = s->call_rate.prev_ctr = 0;
|
||||
s->pcli_next_pid = 0;
|
||||
s->pcli_flags = 0;
|
||||
s->unique_id = IST_NULL;
|
||||
|
Loading…
Reference in New Issue
Block a user