Merge branch 'remove-qdisc-running-counter'
Sebastian Andrzej Siewior says: ==================== Try to simplify the gnet_stats and remove qdisc->running sequence counter. The first few patches is a follow up to https://lore.kernel.org/all/20211007175000.2334713-1-bigeasy@linutronix.de/ The remaining patches (#5+) remove the seqcount_t (Qdisc::running) from the Qdisc. The statistics (Qdisc::bstats and Qdisc::cpu_bstats) use u64_stats_t and the "running state" is now represented by a bit in Qdisc::state. By removing the seqcount_t from Qdisc and decoupling the bstats statistics from the seqcount_t it is possible to query the statistics even if the Qdisc is running instead of waiting until it is idle again. The try-lock like usage of the seqcount_t in qdisc_run_begin() is problematic on PREEMPT_RT. Inside the qdisc_run_begin/end() qdisc->running sequence counter write sections, at sch_direct_xmit(), the seqcount write serialization lock is released then re-acquired. This is fine for !RT, because the writer is in a BH disabled region and there is a no in-IRQ reader. For RT though, BH sections are preemptible. The earlier introduced seqcount_LOCKNAME_t mechanism, which for RT the reader acquires then relesaes the write serailization lock to avoid infinite spinning if it preempts a seqcount write section, cannot work: the qdisc->running write serialization lock is already intermittingly released inside the seqcount write section. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
f8ba22a142
@ -458,7 +458,7 @@ nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle,
|
||||
static void
|
||||
nfp_abm_stats_calculate(struct nfp_alink_stats *new,
|
||||
struct nfp_alink_stats *old,
|
||||
struct gnet_stats_basic_packed *bstats,
|
||||
struct gnet_stats_basic_sync *bstats,
|
||||
struct gnet_stats_queue *qstats)
|
||||
{
|
||||
_bstats_update(bstats, new->tx_bytes - old->tx_bytes,
|
||||
|
@ -1916,7 +1916,6 @@ enum netdev_ml_priv_type {
|
||||
* @sfp_bus: attached &struct sfp_bus structure.
|
||||
*
|
||||
* @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
|
||||
* @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
|
||||
*
|
||||
* @proto_down: protocol port state information can be sent to the
|
||||
* switch driver and used to set the phys state of the
|
||||
@ -2250,7 +2249,6 @@ struct net_device {
|
||||
struct phy_device *phydev;
|
||||
struct sfp_bus *sfp_bus;
|
||||
struct lock_class_key *qdisc_tx_busylock;
|
||||
struct lock_class_key *qdisc_running_key;
|
||||
bool proto_down;
|
||||
unsigned wol_enabled:1;
|
||||
unsigned threaded:1;
|
||||
@ -2360,13 +2358,11 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
|
||||
#define netdev_lockdep_set_classes(dev) \
|
||||
{ \
|
||||
static struct lock_class_key qdisc_tx_busylock_key; \
|
||||
static struct lock_class_key qdisc_running_key; \
|
||||
static struct lock_class_key qdisc_xmit_lock_key; \
|
||||
static struct lock_class_key dev_addr_list_lock_key; \
|
||||
unsigned int i; \
|
||||
\
|
||||
(dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
|
||||
(dev)->qdisc_running_key = &qdisc_running_key; \
|
||||
lockdep_set_class(&(dev)->addr_list_lock, \
|
||||
&dev_addr_list_lock_key); \
|
||||
for (i = 0; i < (dev)->num_tx_queues; i++) \
|
||||
|
@ -83,6 +83,11 @@ static inline u64 u64_stats_read(const u64_stats_t *p)
|
||||
return local64_read(&p->v);
|
||||
}
|
||||
|
||||
static inline void u64_stats_set(u64_stats_t *p, u64 val)
|
||||
{
|
||||
local64_set(&p->v, val);
|
||||
}
|
||||
|
||||
static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
|
||||
{
|
||||
local64_add(val, &p->v);
|
||||
@ -104,6 +109,11 @@ static inline u64 u64_stats_read(const u64_stats_t *p)
|
||||
return p->v;
|
||||
}
|
||||
|
||||
static inline void u64_stats_set(u64_stats_t *p, u64 val)
|
||||
{
|
||||
p->v = val;
|
||||
}
|
||||
|
||||
static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
|
||||
{
|
||||
p->v += val;
|
||||
|
@ -30,13 +30,13 @@ struct tc_action {
|
||||
atomic_t tcfa_bindcnt;
|
||||
int tcfa_action;
|
||||
struct tcf_t tcfa_tm;
|
||||
struct gnet_stats_basic_packed tcfa_bstats;
|
||||
struct gnet_stats_basic_packed tcfa_bstats_hw;
|
||||
struct gnet_stats_basic_sync tcfa_bstats;
|
||||
struct gnet_stats_basic_sync tcfa_bstats_hw;
|
||||
struct gnet_stats_queue tcfa_qstats;
|
||||
struct net_rate_estimator __rcu *tcfa_rate_est;
|
||||
spinlock_t tcfa_lock;
|
||||
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
|
||||
struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw;
|
||||
struct gnet_stats_basic_sync __percpu *cpu_bstats;
|
||||
struct gnet_stats_basic_sync __percpu *cpu_bstats_hw;
|
||||
struct gnet_stats_queue __percpu *cpu_qstats;
|
||||
struct tc_cookie __rcu *act_cookie;
|
||||
struct tcf_chain __rcu *goto_chain;
|
||||
@ -206,7 +206,7 @@ static inline void tcf_action_update_bstats(struct tc_action *a,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (likely(a->cpu_bstats)) {
|
||||
bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), skb);
|
||||
bstats_update(this_cpu_ptr(a->cpu_bstats), skb);
|
||||
return;
|
||||
}
|
||||
spin_lock(&a->tcfa_lock);
|
||||
|
@ -7,14 +7,17 @@
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/pkt_sched.h>
|
||||
|
||||
/* Note: this used to be in include/uapi/linux/gen_stats.h */
|
||||
struct gnet_stats_basic_packed {
|
||||
__u64 bytes;
|
||||
__u64 packets;
|
||||
};
|
||||
|
||||
struct gnet_stats_basic_cpu {
|
||||
struct gnet_stats_basic_packed bstats;
|
||||
/* Throughput stats.
|
||||
* Must be initialized beforehand with gnet_stats_basic_sync_init().
|
||||
*
|
||||
* If no reads can ever occur parallel to writes (e.g. stack-allocated
|
||||
* bstats), then the internal stat values can be written to and read
|
||||
* from directly. Otherwise, use _bstats_set/update() for writes and
|
||||
* gnet_stats_add_basic() for reads.
|
||||
*/
|
||||
struct gnet_stats_basic_sync {
|
||||
u64_stats_t bytes;
|
||||
u64_stats_t packets;
|
||||
struct u64_stats_sync syncp;
|
||||
} __aligned(2 * sizeof(u64));
|
||||
|
||||
@ -34,6 +37,7 @@ struct gnet_dump {
|
||||
struct tc_stats tc_stats;
|
||||
};
|
||||
|
||||
void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b);
|
||||
int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
|
||||
struct gnet_dump *d, int padattr);
|
||||
|
||||
@ -42,41 +46,38 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
|
||||
spinlock_t *lock, struct gnet_dump *d,
|
||||
int padattr);
|
||||
|
||||
int gnet_stats_copy_basic(const seqcount_t *running,
|
||||
struct gnet_dump *d,
|
||||
struct gnet_stats_basic_cpu __percpu *cpu,
|
||||
struct gnet_stats_basic_packed *b);
|
||||
void __gnet_stats_copy_basic(const seqcount_t *running,
|
||||
struct gnet_stats_basic_packed *bstats,
|
||||
struct gnet_stats_basic_cpu __percpu *cpu,
|
||||
struct gnet_stats_basic_packed *b);
|
||||
int gnet_stats_copy_basic_hw(const seqcount_t *running,
|
||||
struct gnet_dump *d,
|
||||
struct gnet_stats_basic_cpu __percpu *cpu,
|
||||
struct gnet_stats_basic_packed *b);
|
||||
int gnet_stats_copy_basic(struct gnet_dump *d,
|
||||
struct gnet_stats_basic_sync __percpu *cpu,
|
||||
struct gnet_stats_basic_sync *b, bool running);
|
||||
void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
|
||||
struct gnet_stats_basic_sync __percpu *cpu,
|
||||
struct gnet_stats_basic_sync *b, bool running);
|
||||
int gnet_stats_copy_basic_hw(struct gnet_dump *d,
|
||||
struct gnet_stats_basic_sync __percpu *cpu,
|
||||
struct gnet_stats_basic_sync *b, bool running);
|
||||
int gnet_stats_copy_rate_est(struct gnet_dump *d,
|
||||
struct net_rate_estimator __rcu **ptr);
|
||||
int gnet_stats_copy_queue(struct gnet_dump *d,
|
||||
struct gnet_stats_queue __percpu *cpu_q,
|
||||
struct gnet_stats_queue *q, __u32 qlen);
|
||||
void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
|
||||
const struct gnet_stats_queue __percpu *cpu_q,
|
||||
const struct gnet_stats_queue *q, __u32 qlen);
|
||||
void gnet_stats_add_queue(struct gnet_stats_queue *qstats,
|
||||
const struct gnet_stats_queue __percpu *cpu_q,
|
||||
const struct gnet_stats_queue *q);
|
||||
int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
|
||||
|
||||
int gnet_stats_finish_copy(struct gnet_dump *d);
|
||||
|
||||
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
|
||||
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
|
||||
int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
|
||||
struct gnet_stats_basic_sync __percpu *cpu_bstats,
|
||||
struct net_rate_estimator __rcu **rate_est,
|
||||
spinlock_t *lock,
|
||||
seqcount_t *running, struct nlattr *opt);
|
||||
bool running, struct nlattr *opt);
|
||||
void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
|
||||
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
|
||||
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
|
||||
int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
|
||||
struct gnet_stats_basic_sync __percpu *cpu_bstats,
|
||||
struct net_rate_estimator __rcu **ptr,
|
||||
spinlock_t *lock,
|
||||
seqcount_t *running, struct nlattr *opt);
|
||||
bool running, struct nlattr *opt);
|
||||
bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
|
||||
bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
|
||||
struct gnet_stats_rate_est64 *sample);
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
struct xt_rateest {
|
||||
/* keep lock and bstats on same cache line to speedup xt_rateest_tg() */
|
||||
struct gnet_stats_basic_packed bstats;
|
||||
struct gnet_stats_basic_sync bstats;
|
||||
spinlock_t lock;
|
||||
|
||||
|
||||
|
@ -765,7 +765,7 @@ struct tc_cookie {
|
||||
};
|
||||
|
||||
struct tc_qopt_offload_stats {
|
||||
struct gnet_stats_basic_packed *bstats;
|
||||
struct gnet_stats_basic_sync *bstats;
|
||||
struct gnet_stats_queue *qstats;
|
||||
};
|
||||
|
||||
@ -885,7 +885,7 @@ struct tc_gred_qopt_offload_params {
|
||||
};
|
||||
|
||||
struct tc_gred_qopt_offload_stats {
|
||||
struct gnet_stats_basic_packed bstats[MAX_DPs];
|
||||
struct gnet_stats_basic_sync bstats[MAX_DPs];
|
||||
struct gnet_stats_queue qstats[MAX_DPs];
|
||||
struct red_stats *xstats[MAX_DPs];
|
||||
};
|
||||
|
@ -38,6 +38,10 @@ enum qdisc_state_t {
|
||||
__QDISC_STATE_DEACTIVATED,
|
||||
__QDISC_STATE_MISSED,
|
||||
__QDISC_STATE_DRAINING,
|
||||
/* Only for !TCQ_F_NOLOCK qdisc. Never access it directly.
|
||||
* Use qdisc_run_begin/end() or qdisc_is_running() instead.
|
||||
*/
|
||||
__QDISC_STATE_RUNNING,
|
||||
};
|
||||
|
||||
#define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED)
|
||||
@ -97,7 +101,7 @@ struct Qdisc {
|
||||
struct netdev_queue *dev_queue;
|
||||
|
||||
struct net_rate_estimator __rcu *rate_est;
|
||||
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
|
||||
struct gnet_stats_basic_sync __percpu *cpu_bstats;
|
||||
struct gnet_stats_queue __percpu *cpu_qstats;
|
||||
int pad;
|
||||
refcount_t refcnt;
|
||||
@ -107,8 +111,7 @@ struct Qdisc {
|
||||
*/
|
||||
struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
|
||||
struct qdisc_skb_head q;
|
||||
struct gnet_stats_basic_packed bstats;
|
||||
seqcount_t running;
|
||||
struct gnet_stats_basic_sync bstats;
|
||||
struct gnet_stats_queue qstats;
|
||||
unsigned long state;
|
||||
struct Qdisc *next_sched;
|
||||
@ -143,11 +146,15 @@ static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* For !TCQ_F_NOLOCK qdisc: callers must either call this within a qdisc
|
||||
* root_lock section, or provide their own memory barriers -- ordering
|
||||
* against qdisc_run_begin/end() atomic bit operations.
|
||||
*/
|
||||
static inline bool qdisc_is_running(struct Qdisc *qdisc)
|
||||
{
|
||||
if (qdisc->flags & TCQ_F_NOLOCK)
|
||||
return spin_is_locked(&qdisc->seqlock);
|
||||
return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
|
||||
return test_bit(__QDISC_STATE_RUNNING, &qdisc->state);
|
||||
}
|
||||
|
||||
static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc)
|
||||
@ -167,6 +174,9 @@ static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
|
||||
return !READ_ONCE(qdisc->q.qlen);
|
||||
}
|
||||
|
||||
/* For !TCQ_F_NOLOCK qdisc, qdisc_run_begin/end() must be invoked with
|
||||
* the qdisc root lock acquired.
|
||||
*/
|
||||
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
|
||||
{
|
||||
if (qdisc->flags & TCQ_F_NOLOCK) {
|
||||
@ -206,15 +216,8 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
|
||||
* after it releases the lock at the end of qdisc_run_end().
|
||||
*/
|
||||
return spin_trylock(&qdisc->seqlock);
|
||||
} else if (qdisc_is_running(qdisc)) {
|
||||
return false;
|
||||
}
|
||||
/* Variant of write_seqcount_begin() telling lockdep a trylock
|
||||
* was attempted.
|
||||
*/
|
||||
raw_write_seqcount_begin(&qdisc->running);
|
||||
seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
|
||||
return true;
|
||||
return test_and_set_bit(__QDISC_STATE_RUNNING, &qdisc->state);
|
||||
}
|
||||
|
||||
static inline void qdisc_run_end(struct Qdisc *qdisc)
|
||||
@ -226,7 +229,7 @@ static inline void qdisc_run_end(struct Qdisc *qdisc)
|
||||
&qdisc->state)))
|
||||
__netif_schedule(qdisc);
|
||||
} else {
|
||||
write_seqcount_end(&qdisc->running);
|
||||
clear_bit(__QDISC_STATE_RUNNING, &qdisc->state);
|
||||
}
|
||||
}
|
||||
|
||||
@ -592,14 +595,6 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
|
||||
return qdisc_lock(root);
|
||||
}
|
||||
|
||||
static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
|
||||
{
|
||||
struct Qdisc *root = qdisc_root_sleeping(qdisc);
|
||||
|
||||
ASSERT_RTNL();
|
||||
return &root->running;
|
||||
}
|
||||
|
||||
static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
|
||||
{
|
||||
return qdisc->dev_queue->dev;
|
||||
@ -849,14 +844,16 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
return sch->enqueue(skb, sch, to_free);
|
||||
}
|
||||
|
||||
static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
|
||||
static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
|
||||
__u64 bytes, __u32 packets)
|
||||
{
|
||||
bstats->bytes += bytes;
|
||||
bstats->packets += packets;
|
||||
u64_stats_update_begin(&bstats->syncp);
|
||||
u64_stats_add(&bstats->bytes, bytes);
|
||||
u64_stats_add(&bstats->packets, packets);
|
||||
u64_stats_update_end(&bstats->syncp);
|
||||
}
|
||||
|
||||
static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
|
||||
static inline void bstats_update(struct gnet_stats_basic_sync *bstats,
|
||||
const struct sk_buff *skb)
|
||||
{
|
||||
_bstats_update(bstats,
|
||||
@ -864,26 +861,10 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
|
||||
skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
|
||||
}
|
||||
|
||||
static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
|
||||
__u64 bytes, __u32 packets)
|
||||
{
|
||||
u64_stats_update_begin(&bstats->syncp);
|
||||
_bstats_update(&bstats->bstats, bytes, packets);
|
||||
u64_stats_update_end(&bstats->syncp);
|
||||
}
|
||||
|
||||
static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
|
||||
const struct sk_buff *skb)
|
||||
{
|
||||
u64_stats_update_begin(&bstats->syncp);
|
||||
bstats_update(&bstats->bstats, skb);
|
||||
u64_stats_update_end(&bstats->syncp);
|
||||
}
|
||||
|
||||
static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
|
||||
const struct sk_buff *skb)
|
||||
{
|
||||
bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
|
||||
bstats_update(this_cpu_ptr(sch->cpu_bstats), skb);
|
||||
}
|
||||
|
||||
static inline void qdisc_bstats_update(struct Qdisc *sch,
|
||||
@ -972,10 +953,9 @@ static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
|
||||
__u32 *backlog)
|
||||
{
|
||||
struct gnet_stats_queue qstats = { 0 };
|
||||
__u32 len = qdisc_qlen_sum(sch);
|
||||
|
||||
__gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
|
||||
*qlen = qstats.qlen;
|
||||
gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats);
|
||||
*qlen = qstats.qlen + qdisc_qlen(sch);
|
||||
*backlog = qstats.backlog;
|
||||
}
|
||||
|
||||
@ -1316,7 +1296,7 @@ void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64);
|
||||
struct mini_Qdisc {
|
||||
struct tcf_proto *filter_list;
|
||||
struct tcf_block *block;
|
||||
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
|
||||
struct gnet_stats_basic_sync __percpu *cpu_bstats;
|
||||
struct gnet_stats_queue __percpu *cpu_qstats;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
@ -1324,7 +1304,7 @@ struct mini_Qdisc {
|
||||
static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
|
||||
const struct sk_buff *skb)
|
||||
{
|
||||
bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb);
|
||||
bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb);
|
||||
}
|
||||
|
||||
static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
|
||||
|
@ -40,10 +40,10 @@
|
||||
*/
|
||||
|
||||
struct net_rate_estimator {
|
||||
struct gnet_stats_basic_packed *bstats;
|
||||
struct gnet_stats_basic_sync *bstats;
|
||||
spinlock_t *stats_lock;
|
||||
seqcount_t *running;
|
||||
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
|
||||
bool running;
|
||||
struct gnet_stats_basic_sync __percpu *cpu_bstats;
|
||||
u8 ewma_log;
|
||||
u8 intvl_log; /* period : (250ms << intvl_log) */
|
||||
|
||||
@ -60,13 +60,13 @@ struct net_rate_estimator {
|
||||
};
|
||||
|
||||
static void est_fetch_counters(struct net_rate_estimator *e,
|
||||
struct gnet_stats_basic_packed *b)
|
||||
struct gnet_stats_basic_sync *b)
|
||||
{
|
||||
memset(b, 0, sizeof(*b));
|
||||
gnet_stats_basic_sync_init(b);
|
||||
if (e->stats_lock)
|
||||
spin_lock(e->stats_lock);
|
||||
|
||||
__gnet_stats_copy_basic(e->running, b, e->cpu_bstats, e->bstats);
|
||||
gnet_stats_add_basic(b, e->cpu_bstats, e->bstats, e->running);
|
||||
|
||||
if (e->stats_lock)
|
||||
spin_unlock(e->stats_lock);
|
||||
@ -76,14 +76,18 @@ static void est_fetch_counters(struct net_rate_estimator *e,
|
||||
static void est_timer(struct timer_list *t)
|
||||
{
|
||||
struct net_rate_estimator *est = from_timer(est, t, timer);
|
||||
struct gnet_stats_basic_packed b;
|
||||
struct gnet_stats_basic_sync b;
|
||||
u64 b_bytes, b_packets;
|
||||
u64 rate, brate;
|
||||
|
||||
est_fetch_counters(est, &b);
|
||||
brate = (b.bytes - est->last_bytes) << (10 - est->intvl_log);
|
||||
b_bytes = u64_stats_read(&b.bytes);
|
||||
b_packets = u64_stats_read(&b.packets);
|
||||
|
||||
brate = (b_bytes - est->last_bytes) << (10 - est->intvl_log);
|
||||
brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log);
|
||||
|
||||
rate = (b.packets - est->last_packets) << (10 - est->intvl_log);
|
||||
rate = (b_packets - est->last_packets) << (10 - est->intvl_log);
|
||||
rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log);
|
||||
|
||||
write_seqcount_begin(&est->seq);
|
||||
@ -91,8 +95,8 @@ static void est_timer(struct timer_list *t)
|
||||
est->avpps += rate;
|
||||
write_seqcount_end(&est->seq);
|
||||
|
||||
est->last_bytes = b.bytes;
|
||||
est->last_packets = b.packets;
|
||||
est->last_bytes = b_bytes;
|
||||
est->last_packets = b_packets;
|
||||
|
||||
est->next_jiffies += ((HZ/4) << est->intvl_log);
|
||||
|
||||
@ -109,7 +113,9 @@ static void est_timer(struct timer_list *t)
|
||||
* @cpu_bstats: bstats per cpu
|
||||
* @rate_est: rate estimator statistics
|
||||
* @lock: lock for statistics and control path
|
||||
* @running: qdisc running seqcount
|
||||
* @running: true if @bstats represents a running qdisc, thus @bstats'
|
||||
* internal values might change during basic reads. Only used
|
||||
* if @bstats_cpu is NULL
|
||||
* @opt: rate estimator configuration TLV
|
||||
*
|
||||
* Creates a new rate estimator with &bstats as source and &rate_est
|
||||
@ -121,16 +127,16 @@ static void est_timer(struct timer_list *t)
|
||||
* Returns 0 on success or a negative error code.
|
||||
*
|
||||
*/
|
||||
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
|
||||
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
|
||||
int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
|
||||
struct gnet_stats_basic_sync __percpu *cpu_bstats,
|
||||
struct net_rate_estimator __rcu **rate_est,
|
||||
spinlock_t *lock,
|
||||
seqcount_t *running,
|
||||
bool running,
|
||||
struct nlattr *opt)
|
||||
{
|
||||
struct gnet_estimator *parm = nla_data(opt);
|
||||
struct net_rate_estimator *old, *est;
|
||||
struct gnet_stats_basic_packed b;
|
||||
struct gnet_stats_basic_sync b;
|
||||
int intvl_log;
|
||||
|
||||
if (nla_len(opt) < sizeof(*parm))
|
||||
@ -164,8 +170,8 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
|
||||
est_fetch_counters(est, &b);
|
||||
if (lock)
|
||||
local_bh_enable();
|
||||
est->last_bytes = b.bytes;
|
||||
est->last_packets = b.packets;
|
||||
est->last_bytes = u64_stats_read(&b.bytes);
|
||||
est->last_packets = u64_stats_read(&b.packets);
|
||||
|
||||
if (lock)
|
||||
spin_lock_bh(lock);
|
||||
@ -214,7 +220,9 @@ EXPORT_SYMBOL(gen_kill_estimator);
|
||||
* @cpu_bstats: bstats per cpu
|
||||
* @rate_est: rate estimator statistics
|
||||
* @lock: lock for statistics and control path
|
||||
* @running: qdisc running seqcount (might be NULL)
|
||||
* @running: true if @bstats represents a running qdisc, thus @bstats'
|
||||
* internal values might change during basic reads. Only used
|
||||
* if @cpu_bstats is NULL
|
||||
* @opt: rate estimator configuration TLV
|
||||
*
|
||||
* Replaces the configuration of a rate estimator by calling
|
||||
@ -222,11 +230,11 @@ EXPORT_SYMBOL(gen_kill_estimator);
|
||||
*
|
||||
* Returns 0 on success or a negative error code.
|
||||
*/
|
||||
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
|
||||
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
|
||||
int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
|
||||
struct gnet_stats_basic_sync __percpu *cpu_bstats,
|
||||
struct net_rate_estimator __rcu **rate_est,
|
||||
spinlock_t *lock,
|
||||
seqcount_t *running, struct nlattr *opt)
|
||||
bool running, struct nlattr *opt)
|
||||
{
|
||||
return gen_new_estimator(bstats, cpu_bstats, rate_est,
|
||||
lock, running, opt);
|
||||
|
@ -18,7 +18,7 @@
|
||||
#include <linux/gen_stats.h>
|
||||
#include <net/netlink.h>
|
||||
#include <net/gen_stats.h>
|
||||
|
||||
#include <net/sch_generic.h>
|
||||
|
||||
static inline int
|
||||
gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr)
|
||||
@ -114,63 +114,81 @@ gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
|
||||
}
|
||||
EXPORT_SYMBOL(gnet_stats_start_copy);
|
||||
|
||||
static void
|
||||
__gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
|
||||
struct gnet_stats_basic_cpu __percpu *cpu)
|
||||
/* Must not be inlined, due to u64_stats seqcount_t lockdep key */
|
||||
void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b)
|
||||
{
|
||||
u64_stats_set(&b->bytes, 0);
|
||||
u64_stats_set(&b->packets, 0);
|
||||
u64_stats_init(&b->syncp);
|
||||
}
|
||||
EXPORT_SYMBOL(gnet_stats_basic_sync_init);
|
||||
|
||||
static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_sync *bstats,
|
||||
struct gnet_stats_basic_sync __percpu *cpu)
|
||||
{
|
||||
u64 t_bytes = 0, t_packets = 0;
|
||||
int i;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i);
|
||||
struct gnet_stats_basic_sync *bcpu = per_cpu_ptr(cpu, i);
|
||||
unsigned int start;
|
||||
u64 bytes, packets;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&bcpu->syncp);
|
||||
bytes = bcpu->bstats.bytes;
|
||||
packets = bcpu->bstats.packets;
|
||||
bytes = u64_stats_read(&bcpu->bytes);
|
||||
packets = u64_stats_read(&bcpu->packets);
|
||||
} while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
|
||||
|
||||
bstats->bytes += bytes;
|
||||
bstats->packets += packets;
|
||||
t_bytes += bytes;
|
||||
t_packets += packets;
|
||||
}
|
||||
_bstats_update(bstats, t_bytes, t_packets);
|
||||
}
|
||||
|
||||
void
|
||||
__gnet_stats_copy_basic(const seqcount_t *running,
|
||||
struct gnet_stats_basic_packed *bstats,
|
||||
struct gnet_stats_basic_cpu __percpu *cpu,
|
||||
struct gnet_stats_basic_packed *b)
|
||||
void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
|
||||
struct gnet_stats_basic_sync __percpu *cpu,
|
||||
struct gnet_stats_basic_sync *b, bool running)
|
||||
{
|
||||
unsigned int seq;
|
||||
unsigned int start;
|
||||
u64 bytes = 0;
|
||||
u64 packets = 0;
|
||||
|
||||
WARN_ON_ONCE((cpu || running) && !in_task());
|
||||
|
||||
if (cpu) {
|
||||
__gnet_stats_copy_basic_cpu(bstats, cpu);
|
||||
gnet_stats_add_basic_cpu(bstats, cpu);
|
||||
return;
|
||||
}
|
||||
do {
|
||||
if (running)
|
||||
seq = read_seqcount_begin(running);
|
||||
bstats->bytes = b->bytes;
|
||||
bstats->packets = b->packets;
|
||||
} while (running && read_seqcount_retry(running, seq));
|
||||
start = u64_stats_fetch_begin_irq(&b->syncp);
|
||||
bytes = u64_stats_read(&b->bytes);
|
||||
packets = u64_stats_read(&b->packets);
|
||||
} while (running && u64_stats_fetch_retry_irq(&b->syncp, start));
|
||||
|
||||
_bstats_update(bstats, bytes, packets);
|
||||
}
|
||||
EXPORT_SYMBOL(__gnet_stats_copy_basic);
|
||||
EXPORT_SYMBOL(gnet_stats_add_basic);
|
||||
|
||||
static int
|
||||
___gnet_stats_copy_basic(const seqcount_t *running,
|
||||
struct gnet_dump *d,
|
||||
struct gnet_stats_basic_cpu __percpu *cpu,
|
||||
struct gnet_stats_basic_packed *b,
|
||||
int type)
|
||||
___gnet_stats_copy_basic(struct gnet_dump *d,
|
||||
struct gnet_stats_basic_sync __percpu *cpu,
|
||||
struct gnet_stats_basic_sync *b,
|
||||
int type, bool running)
|
||||
{
|
||||
struct gnet_stats_basic_packed bstats = {0};
|
||||
struct gnet_stats_basic_sync bstats;
|
||||
u64 bstats_bytes, bstats_packets;
|
||||
|
||||
__gnet_stats_copy_basic(running, &bstats, cpu, b);
|
||||
gnet_stats_basic_sync_init(&bstats);
|
||||
gnet_stats_add_basic(&bstats, cpu, b, running);
|
||||
|
||||
bstats_bytes = u64_stats_read(&bstats.bytes);
|
||||
bstats_packets = u64_stats_read(&bstats.packets);
|
||||
|
||||
if (d->compat_tc_stats && type == TCA_STATS_BASIC) {
|
||||
d->tc_stats.bytes = bstats.bytes;
|
||||
d->tc_stats.packets = bstats.packets;
|
||||
d->tc_stats.bytes = bstats_bytes;
|
||||
d->tc_stats.packets = bstats_packets;
|
||||
}
|
||||
|
||||
if (d->tail) {
|
||||
@ -178,24 +196,28 @@ ___gnet_stats_copy_basic(const seqcount_t *running,
|
||||
int res;
|
||||
|
||||
memset(&sb, 0, sizeof(sb));
|
||||
sb.bytes = bstats.bytes;
|
||||
sb.packets = bstats.packets;
|
||||
sb.bytes = bstats_bytes;
|
||||
sb.packets = bstats_packets;
|
||||
res = gnet_stats_copy(d, type, &sb, sizeof(sb), TCA_STATS_PAD);
|
||||
if (res < 0 || sb.packets == bstats.packets)
|
||||
if (res < 0 || sb.packets == bstats_packets)
|
||||
return res;
|
||||
/* emit 64bit stats only if needed */
|
||||
return gnet_stats_copy(d, TCA_STATS_PKT64, &bstats.packets,
|
||||
sizeof(bstats.packets), TCA_STATS_PAD);
|
||||
return gnet_stats_copy(d, TCA_STATS_PKT64, &bstats_packets,
|
||||
sizeof(bstats_packets), TCA_STATS_PAD);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gnet_stats_copy_basic - copy basic statistics into statistic TLV
|
||||
* @running: seqcount_t pointer
|
||||
* @d: dumping handle
|
||||
* @cpu: copy statistic per cpu
|
||||
* @b: basic statistics
|
||||
* @running: true if @b represents a running qdisc, thus @b's
|
||||
* internal values might change during basic reads.
|
||||
* Only used if @cpu is NULL
|
||||
*
|
||||
* Context: task; must not be run from IRQ or BH contexts
|
||||
*
|
||||
* Appends the basic statistics to the top level TLV created by
|
||||
* gnet_stats_start_copy().
|
||||
@ -204,22 +226,25 @@ ___gnet_stats_copy_basic(const seqcount_t *running,
|
||||
* if the room in the socket buffer was not sufficient.
|
||||
*/
|
||||
int
|
||||
gnet_stats_copy_basic(const seqcount_t *running,
|
||||
struct gnet_dump *d,
|
||||
struct gnet_stats_basic_cpu __percpu *cpu,
|
||||
struct gnet_stats_basic_packed *b)
|
||||
gnet_stats_copy_basic(struct gnet_dump *d,
|
||||
struct gnet_stats_basic_sync __percpu *cpu,
|
||||
struct gnet_stats_basic_sync *b,
|
||||
bool running)
|
||||
{
|
||||
return ___gnet_stats_copy_basic(running, d, cpu, b,
|
||||
TCA_STATS_BASIC);
|
||||
return ___gnet_stats_copy_basic(d, cpu, b, TCA_STATS_BASIC, running);
|
||||
}
|
||||
EXPORT_SYMBOL(gnet_stats_copy_basic);
|
||||
|
||||
/**
|
||||
* gnet_stats_copy_basic_hw - copy basic hw statistics into statistic TLV
|
||||
* @running: seqcount_t pointer
|
||||
* @d: dumping handle
|
||||
* @cpu: copy statistic per cpu
|
||||
* @b: basic statistics
|
||||
* @running: true if @b represents a running qdisc, thus @b's
|
||||
* internal values might change during basic reads.
|
||||
* Only used if @cpu is NULL
|
||||
*
|
||||
* Context: task; must not be run from IRQ or BH contexts
|
||||
*
|
||||
* Appends the basic statistics to the top level TLV created by
|
||||
* gnet_stats_start_copy().
|
||||
@ -228,13 +253,12 @@ EXPORT_SYMBOL(gnet_stats_copy_basic);
|
||||
* if the room in the socket buffer was not sufficient.
|
||||
*/
|
||||
int
|
||||
gnet_stats_copy_basic_hw(const seqcount_t *running,
|
||||
struct gnet_dump *d,
|
||||
struct gnet_stats_basic_cpu __percpu *cpu,
|
||||
struct gnet_stats_basic_packed *b)
|
||||
gnet_stats_copy_basic_hw(struct gnet_dump *d,
|
||||
struct gnet_stats_basic_sync __percpu *cpu,
|
||||
struct gnet_stats_basic_sync *b,
|
||||
bool running)
|
||||
{
|
||||
return ___gnet_stats_copy_basic(running, d, cpu, b,
|
||||
TCA_STATS_BASIC_HW);
|
||||
return ___gnet_stats_copy_basic(d, cpu, b, TCA_STATS_BASIC_HW, running);
|
||||
}
|
||||
EXPORT_SYMBOL(gnet_stats_copy_basic_hw);
|
||||
|
||||
@ -282,16 +306,15 @@ gnet_stats_copy_rate_est(struct gnet_dump *d,
|
||||
}
|
||||
EXPORT_SYMBOL(gnet_stats_copy_rate_est);
|
||||
|
||||
static void
|
||||
__gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
|
||||
const struct gnet_stats_queue __percpu *q)
|
||||
static void gnet_stats_add_queue_cpu(struct gnet_stats_queue *qstats,
|
||||
const struct gnet_stats_queue __percpu *q)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
|
||||
|
||||
qstats->qlen = 0;
|
||||
qstats->qlen += qcpu->backlog;
|
||||
qstats->backlog += qcpu->backlog;
|
||||
qstats->drops += qcpu->drops;
|
||||
qstats->requeues += qcpu->requeues;
|
||||
@ -299,24 +322,21 @@ __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
|
||||
}
|
||||
}
|
||||
|
||||
void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
|
||||
const struct gnet_stats_queue __percpu *cpu,
|
||||
const struct gnet_stats_queue *q,
|
||||
__u32 qlen)
|
||||
void gnet_stats_add_queue(struct gnet_stats_queue *qstats,
|
||||
const struct gnet_stats_queue __percpu *cpu,
|
||||
const struct gnet_stats_queue *q)
|
||||
{
|
||||
if (cpu) {
|
||||
__gnet_stats_copy_queue_cpu(qstats, cpu);
|
||||
gnet_stats_add_queue_cpu(qstats, cpu);
|
||||
} else {
|
||||
qstats->qlen = q->qlen;
|
||||
qstats->backlog = q->backlog;
|
||||
qstats->drops = q->drops;
|
||||
qstats->requeues = q->requeues;
|
||||
qstats->overlimits = q->overlimits;
|
||||
qstats->qlen += q->qlen;
|
||||
qstats->backlog += q->backlog;
|
||||
qstats->drops += q->drops;
|
||||
qstats->requeues += q->requeues;
|
||||
qstats->overlimits += q->overlimits;
|
||||
}
|
||||
|
||||
qstats->qlen = qlen;
|
||||
}
|
||||
EXPORT_SYMBOL(__gnet_stats_copy_queue);
|
||||
EXPORT_SYMBOL(gnet_stats_add_queue);
|
||||
|
||||
/**
|
||||
* gnet_stats_copy_queue - copy queue statistics into statistics TLV
|
||||
@ -339,7 +359,8 @@ gnet_stats_copy_queue(struct gnet_dump *d,
|
||||
{
|
||||
struct gnet_stats_queue qstats = {0};
|
||||
|
||||
__gnet_stats_copy_queue(&qstats, cpu_q, q, qlen);
|
||||
gnet_stats_add_queue(&qstats, cpu_q, q);
|
||||
qstats.qlen = qlen;
|
||||
|
||||
if (d->compat_tc_stats) {
|
||||
d->tc_stats.drops = qstats.drops;
|
||||
|
@ -94,11 +94,11 @@ static unsigned int
|
||||
xt_rateest_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
{
|
||||
const struct xt_rateest_target_info *info = par->targinfo;
|
||||
struct gnet_stats_basic_packed *stats = &info->est->bstats;
|
||||
struct gnet_stats_basic_sync *stats = &info->est->bstats;
|
||||
|
||||
spin_lock_bh(&info->est->lock);
|
||||
stats->bytes += skb->len;
|
||||
stats->packets++;
|
||||
u64_stats_add(&stats->bytes, skb->len);
|
||||
u64_stats_inc(&stats->packets);
|
||||
spin_unlock_bh(&info->est->lock);
|
||||
|
||||
return XT_CONTINUE;
|
||||
@ -143,6 +143,7 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
|
||||
if (!est)
|
||||
goto err1;
|
||||
|
||||
gnet_stats_basic_sync_init(&est->bstats);
|
||||
strlcpy(est->name, info->name, sizeof(est->name));
|
||||
spin_lock_init(&est->lock);
|
||||
est->refcnt = 1;
|
||||
|
@ -480,16 +480,18 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
|
||||
atomic_set(&p->tcfa_bindcnt, 1);
|
||||
|
||||
if (cpustats) {
|
||||
p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
|
||||
p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
|
||||
if (!p->cpu_bstats)
|
||||
goto err1;
|
||||
p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
|
||||
p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
|
||||
if (!p->cpu_bstats_hw)
|
||||
goto err2;
|
||||
p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
|
||||
if (!p->cpu_qstats)
|
||||
goto err3;
|
||||
}
|
||||
gnet_stats_basic_sync_init(&p->tcfa_bstats);
|
||||
gnet_stats_basic_sync_init(&p->tcfa_bstats_hw);
|
||||
spin_lock_init(&p->tcfa_lock);
|
||||
p->tcfa_index = index;
|
||||
p->tcfa_tm.install = jiffies;
|
||||
@ -499,7 +501,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
|
||||
if (est) {
|
||||
err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
|
||||
&p->tcfa_rate_est,
|
||||
&p->tcfa_lock, NULL, est);
|
||||
&p->tcfa_lock, false, est);
|
||||
if (err)
|
||||
goto err4;
|
||||
}
|
||||
@ -1126,13 +1128,13 @@ void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
|
||||
u64 drops, bool hw)
|
||||
{
|
||||
if (a->cpu_bstats) {
|
||||
_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
|
||||
_bstats_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
|
||||
|
||||
this_cpu_ptr(a->cpu_qstats)->drops += drops;
|
||||
|
||||
if (hw)
|
||||
_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
|
||||
bytes, packets);
|
||||
_bstats_update(this_cpu_ptr(a->cpu_bstats_hw),
|
||||
bytes, packets);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1171,9 +1173,10 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
|
||||
if (err < 0)
|
||||
goto errout;
|
||||
|
||||
if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 ||
|
||||
gnet_stats_copy_basic_hw(NULL, &d, p->cpu_bstats_hw,
|
||||
&p->tcfa_bstats_hw) < 0 ||
|
||||
if (gnet_stats_copy_basic(&d, p->cpu_bstats,
|
||||
&p->tcfa_bstats, false) < 0 ||
|
||||
gnet_stats_copy_basic_hw(&d, p->cpu_bstats_hw,
|
||||
&p->tcfa_bstats_hw, false) < 0 ||
|
||||
gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
|
||||
gnet_stats_copy_queue(&d, p->cpu_qstats,
|
||||
&p->tcfa_qstats,
|
||||
|
@ -41,7 +41,7 @@ static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act,
|
||||
int action, filter_res;
|
||||
|
||||
tcf_lastuse_update(&prog->tcf_tm);
|
||||
bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
|
||||
bstats_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
|
||||
|
||||
filter = rcu_dereference(prog->filter);
|
||||
if (at_ingress) {
|
||||
|
@ -718,7 +718,7 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
|
||||
u8 *tlv_data;
|
||||
u16 metalen;
|
||||
|
||||
bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
|
||||
bstats_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
|
||||
tcf_lastuse_update(&ife->tcf_tm);
|
||||
|
||||
if (skb_at_tc_ingress(skb))
|
||||
@ -806,7 +806,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
|
||||
exceed_mtu = true;
|
||||
}
|
||||
|
||||
bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
|
||||
bstats_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
|
||||
tcf_lastuse_update(&ife->tcf_tm);
|
||||
|
||||
if (!metalen) { /* no metadata to send */
|
||||
|
@ -59,7 +59,7 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
|
||||
int ret, mac_len;
|
||||
|
||||
tcf_lastuse_update(&m->tcf_tm);
|
||||
bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
|
||||
bstats_update(this_cpu_ptr(m->common.cpu_bstats), skb);
|
||||
|
||||
/* Ensure 'data' points at mac_header prior calling mpls manipulating
|
||||
* functions.
|
||||
|
@ -125,7 +125,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
|
||||
police->common.cpu_bstats,
|
||||
&police->tcf_rate_est,
|
||||
&police->tcf_lock,
|
||||
NULL, est);
|
||||
false, est);
|
||||
if (err)
|
||||
goto failure;
|
||||
} else if (tb[TCA_POLICE_AVRATE] &&
|
||||
@ -248,7 +248,7 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
|
||||
int ret;
|
||||
|
||||
tcf_lastuse_update(&police->tcf_tm);
|
||||
bstats_cpu_update(this_cpu_ptr(police->common.cpu_bstats), skb);
|
||||
bstats_update(this_cpu_ptr(police->common.cpu_bstats), skb);
|
||||
|
||||
ret = READ_ONCE(police->tcf_action);
|
||||
p = rcu_dereference_bh(police->params);
|
||||
|
@ -163,7 +163,7 @@ static int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a,
|
||||
int retval;
|
||||
|
||||
tcf_lastuse_update(&s->tcf_tm);
|
||||
bstats_cpu_update(this_cpu_ptr(s->common.cpu_bstats), skb);
|
||||
bstats_update(this_cpu_ptr(s->common.cpu_bstats), skb);
|
||||
retval = READ_ONCE(s->tcf_action);
|
||||
|
||||
psample_group = rcu_dereference_bh(s->psample_group);
|
||||
|
@ -36,7 +36,8 @@ static int tcf_simp_act(struct sk_buff *skb, const struct tc_action *a,
|
||||
* then it would look like "hello_3" (without quotes)
|
||||
*/
|
||||
pr_info("simple: %s_%llu\n",
|
||||
(char *)d->tcfd_defdata, d->tcf_bstats.packets);
|
||||
(char *)d->tcfd_defdata,
|
||||
u64_stats_read(&d->tcf_bstats.packets));
|
||||
spin_unlock(&d->tcf_lock);
|
||||
return d->tcf_action;
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a,
|
||||
int action;
|
||||
|
||||
tcf_lastuse_update(&d->tcf_tm);
|
||||
bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
|
||||
bstats_update(this_cpu_ptr(d->common.cpu_bstats), skb);
|
||||
|
||||
params = rcu_dereference_bh(d->params);
|
||||
action = READ_ONCE(d->tcf_action);
|
||||
|
@ -31,7 +31,7 @@ static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a,
|
||||
u64 flags;
|
||||
|
||||
tcf_lastuse_update(&d->tcf_tm);
|
||||
bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
|
||||
bstats_update(this_cpu_ptr(d->common.cpu_bstats), skb);
|
||||
|
||||
action = READ_ONCE(d->tcf_action);
|
||||
if (unlikely(action == TC_ACT_SHOT))
|
||||
|
@ -885,7 +885,7 @@ static void qdisc_offload_graft_root(struct net_device *dev,
|
||||
static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
|
||||
u32 portid, u32 seq, u16 flags, int event)
|
||||
{
|
||||
struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
|
||||
struct gnet_stats_basic_sync __percpu *cpu_bstats = NULL;
|
||||
struct gnet_stats_queue __percpu *cpu_qstats = NULL;
|
||||
struct tcmsg *tcm;
|
||||
struct nlmsghdr *nlh;
|
||||
@ -943,8 +943,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
|
||||
cpu_qstats = q->cpu_qstats;
|
||||
}
|
||||
|
||||
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
|
||||
&d, cpu_bstats, &q->bstats) < 0 ||
|
||||
if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats, true) < 0 ||
|
||||
gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
|
||||
gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
|
||||
goto nla_put_failure;
|
||||
@ -1265,26 +1264,17 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
|
||||
rcu_assign_pointer(sch->stab, stab);
|
||||
}
|
||||
if (tca[TCA_RATE]) {
|
||||
seqcount_t *running;
|
||||
|
||||
err = -EOPNOTSUPP;
|
||||
if (sch->flags & TCQ_F_MQROOT) {
|
||||
NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
|
||||
goto err_out4;
|
||||
}
|
||||
|
||||
if (sch->parent != TC_H_ROOT &&
|
||||
!(sch->flags & TCQ_F_INGRESS) &&
|
||||
(!p || !(p->flags & TCQ_F_MQROOT)))
|
||||
running = qdisc_root_sleeping_running(sch);
|
||||
else
|
||||
running = &sch->running;
|
||||
|
||||
err = gen_new_estimator(&sch->bstats,
|
||||
sch->cpu_bstats,
|
||||
&sch->rate_est,
|
||||
NULL,
|
||||
running,
|
||||
true,
|
||||
tca[TCA_RATE]);
|
||||
if (err) {
|
||||
NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
|
||||
@ -1360,7 +1350,7 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
|
||||
sch->cpu_bstats,
|
||||
&sch->rate_est,
|
||||
NULL,
|
||||
qdisc_root_sleeping_running(sch),
|
||||
true,
|
||||
tca[TCA_RATE]);
|
||||
}
|
||||
out:
|
||||
|
@ -52,7 +52,7 @@ struct atm_flow_data {
|
||||
struct atm_qdisc_data *parent; /* parent qdisc */
|
||||
struct socket *sock; /* for closing */
|
||||
int ref; /* reference count */
|
||||
struct gnet_stats_basic_packed bstats;
|
||||
struct gnet_stats_basic_sync bstats;
|
||||
struct gnet_stats_queue qstats;
|
||||
struct list_head list;
|
||||
struct atm_flow_data *excess; /* flow for excess traffic;
|
||||
@ -548,6 +548,7 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
|
||||
pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
|
||||
INIT_LIST_HEAD(&p->flows);
|
||||
INIT_LIST_HEAD(&p->link.list);
|
||||
gnet_stats_basic_sync_init(&p->link.bstats);
|
||||
list_add(&p->link.list, &p->flows);
|
||||
p->link.q = qdisc_create_dflt(sch->dev_queue,
|
||||
&pfifo_qdisc_ops, sch->handle, extack);
|
||||
@ -652,8 +653,7 @@ atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
|
||||
{
|
||||
struct atm_flow_data *flow = (struct atm_flow_data *)arg;
|
||||
|
||||
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
||||
d, NULL, &flow->bstats) < 0 ||
|
||||
if (gnet_stats_copy_basic(d, NULL, &flow->bstats, true) < 0 ||
|
||||
gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0)
|
||||
return -1;
|
||||
|
||||
|
@ -116,7 +116,7 @@ struct cbq_class {
|
||||
long avgidle;
|
||||
long deficit; /* Saved deficit for WRR */
|
||||
psched_time_t penalized;
|
||||
struct gnet_stats_basic_packed bstats;
|
||||
struct gnet_stats_basic_sync bstats;
|
||||
struct gnet_stats_queue qstats;
|
||||
struct net_rate_estimator __rcu *rate_est;
|
||||
struct tc_cbq_xstats xstats;
|
||||
@ -565,8 +565,7 @@ cbq_update(struct cbq_sched_data *q)
|
||||
long avgidle = cl->avgidle;
|
||||
long idle;
|
||||
|
||||
cl->bstats.packets++;
|
||||
cl->bstats.bytes += len;
|
||||
_bstats_update(&cl->bstats, len, 1);
|
||||
|
||||
/*
|
||||
* (now - last) is total time between packet right edges.
|
||||
@ -1384,8 +1383,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
|
||||
if (cl->undertime != PSCHED_PASTPERFECT)
|
||||
cl->xstats.undertime = cl->undertime - q->now;
|
||||
|
||||
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
||||
d, NULL, &cl->bstats) < 0 ||
|
||||
if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
|
||||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
|
||||
gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
|
||||
return -1;
|
||||
@ -1519,7 +1517,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
|
||||
err = gen_replace_estimator(&cl->bstats, NULL,
|
||||
&cl->rate_est,
|
||||
NULL,
|
||||
qdisc_root_sleeping_running(sch),
|
||||
true,
|
||||
tca[TCA_RATE]);
|
||||
if (err) {
|
||||
NL_SET_ERR_MSG(extack, "Failed to replace specified rate estimator");
|
||||
@ -1611,6 +1609,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
|
||||
if (cl == NULL)
|
||||
goto failure;
|
||||
|
||||
gnet_stats_basic_sync_init(&cl->bstats);
|
||||
err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
|
||||
if (err) {
|
||||
kfree(cl);
|
||||
@ -1619,9 +1618,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
|
||||
|
||||
if (tca[TCA_RATE]) {
|
||||
err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
|
||||
NULL,
|
||||
qdisc_root_sleeping_running(sch),
|
||||
tca[TCA_RATE]);
|
||||
NULL, true, tca[TCA_RATE]);
|
||||
if (err) {
|
||||
NL_SET_ERR_MSG(extack, "Couldn't create new estimator");
|
||||
tcf_block_put(cl->block);
|
||||
|
@ -19,7 +19,7 @@ struct drr_class {
|
||||
struct Qdisc_class_common common;
|
||||
unsigned int filter_cnt;
|
||||
|
||||
struct gnet_stats_basic_packed bstats;
|
||||
struct gnet_stats_basic_sync bstats;
|
||||
struct gnet_stats_queue qstats;
|
||||
struct net_rate_estimator __rcu *rate_est;
|
||||
struct list_head alist;
|
||||
@ -85,8 +85,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
if (tca[TCA_RATE]) {
|
||||
err = gen_replace_estimator(&cl->bstats, NULL,
|
||||
&cl->rate_est,
|
||||
NULL,
|
||||
qdisc_root_sleeping_running(sch),
|
||||
NULL, true,
|
||||
tca[TCA_RATE]);
|
||||
if (err) {
|
||||
NL_SET_ERR_MSG(extack, "Failed to replace estimator");
|
||||
@ -106,6 +105,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
if (cl == NULL)
|
||||
return -ENOBUFS;
|
||||
|
||||
gnet_stats_basic_sync_init(&cl->bstats);
|
||||
cl->common.classid = classid;
|
||||
cl->quantum = quantum;
|
||||
cl->qdisc = qdisc_create_dflt(sch->dev_queue,
|
||||
@ -118,9 +118,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
|
||||
if (tca[TCA_RATE]) {
|
||||
err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
|
||||
NULL,
|
||||
qdisc_root_sleeping_running(sch),
|
||||
tca[TCA_RATE]);
|
||||
NULL, true, tca[TCA_RATE]);
|
||||
if (err) {
|
||||
NL_SET_ERR_MSG(extack, "Failed to replace estimator");
|
||||
qdisc_put(cl->qdisc);
|
||||
@ -267,8 +265,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
|
||||
if (qlen)
|
||||
xstats.deficit = cl->deficit;
|
||||
|
||||
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
||||
d, NULL, &cl->bstats) < 0 ||
|
||||
if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
|
||||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
|
||||
gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0)
|
||||
return -1;
|
||||
|
@ -41,7 +41,7 @@ struct ets_class {
|
||||
struct Qdisc *qdisc;
|
||||
u32 quantum;
|
||||
u32 deficit;
|
||||
struct gnet_stats_basic_packed bstats;
|
||||
struct gnet_stats_basic_sync bstats;
|
||||
struct gnet_stats_queue qstats;
|
||||
};
|
||||
|
||||
@ -325,8 +325,7 @@ static int ets_class_dump_stats(struct Qdisc *sch, unsigned long arg,
|
||||
struct ets_class *cl = ets_class_from_arg(sch, arg);
|
||||
struct Qdisc *cl_q = cl->qdisc;
|
||||
|
||||
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
||||
d, NULL, &cl_q->bstats) < 0 ||
|
||||
if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats, true) < 0 ||
|
||||
qdisc_qstats_copy(d, cl_q) < 0)
|
||||
return -1;
|
||||
|
||||
@ -689,7 +688,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
|
||||
q->classes[i].qdisc = NULL;
|
||||
q->classes[i].quantum = 0;
|
||||
q->classes[i].deficit = 0;
|
||||
memset(&q->classes[i].bstats, 0, sizeof(q->classes[i].bstats));
|
||||
gnet_stats_basic_sync_init(&q->classes[i].bstats);
|
||||
memset(&q->classes[i].qstats, 0, sizeof(q->classes[i].qstats));
|
||||
}
|
||||
return 0;
|
||||
|
@ -304,8 +304,8 @@ trace:
|
||||
|
||||
/*
|
||||
* Transmit possibly several skbs, and handle the return status as
|
||||
* required. Owning running seqcount bit guarantees that
|
||||
* only one CPU can execute this function.
|
||||
* required. Owning qdisc running bit guarantees that only one CPU
|
||||
* can execute this function.
|
||||
*
|
||||
* Returns to the caller:
|
||||
* false - hardware queue frozen backoff
|
||||
@ -606,7 +606,6 @@ struct Qdisc noop_qdisc = {
|
||||
.ops = &noop_qdisc_ops,
|
||||
.q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
|
||||
.dev_queue = &noop_netdev_queue,
|
||||
.running = SEQCNT_ZERO(noop_qdisc.running),
|
||||
.busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
|
||||
.gso_skb = {
|
||||
.next = (struct sk_buff *)&noop_qdisc.gso_skb,
|
||||
@ -867,7 +866,6 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
|
||||
EXPORT_SYMBOL(pfifo_fast_ops);
|
||||
|
||||
static struct lock_class_key qdisc_tx_busylock;
|
||||
static struct lock_class_key qdisc_running_key;
|
||||
|
||||
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
|
||||
const struct Qdisc_ops *ops,
|
||||
@ -892,11 +890,12 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
|
||||
__skb_queue_head_init(&sch->gso_skb);
|
||||
__skb_queue_head_init(&sch->skb_bad_txq);
|
||||
qdisc_skb_head_init(&sch->q);
|
||||
gnet_stats_basic_sync_init(&sch->bstats);
|
||||
spin_lock_init(&sch->q.lock);
|
||||
|
||||
if (ops->static_flags & TCQ_F_CPUSTATS) {
|
||||
sch->cpu_bstats =
|
||||
netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
|
||||
netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
|
||||
if (!sch->cpu_bstats)
|
||||
goto errout1;
|
||||
|
||||
@ -916,10 +915,6 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
|
||||
lockdep_set_class(&sch->seqlock,
|
||||
dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
|
||||
|
||||
seqcount_init(&sch->running);
|
||||
lockdep_set_class(&sch->running,
|
||||
dev->qdisc_running_key ?: &qdisc_running_key);
|
||||
|
||||
sch->ops = ops;
|
||||
sch->flags = ops->static_flags;
|
||||
sch->enqueue = ops->enqueue;
|
||||
|
@ -353,6 +353,7 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
|
||||
{
|
||||
struct gred_sched *table = qdisc_priv(sch);
|
||||
struct tc_gred_qopt_offload *hw_stats;
|
||||
u64 bytes = 0, packets = 0;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
@ -364,9 +365,11 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
|
||||
hw_stats->handle = sch->handle;
|
||||
hw_stats->parent = sch->parent;
|
||||
|
||||
for (i = 0; i < MAX_DPs; i++)
|
||||
for (i = 0; i < MAX_DPs; i++) {
|
||||
gnet_stats_basic_sync_init(&hw_stats->stats.bstats[i]);
|
||||
if (table->tab[i])
|
||||
hw_stats->stats.xstats[i] = &table->tab[i]->stats;
|
||||
}
|
||||
|
||||
ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats);
|
||||
/* Even if driver returns failure adjust the stats - in case offload
|
||||
@ -375,19 +378,19 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
|
||||
for (i = 0; i < MAX_DPs; i++) {
|
||||
if (!table->tab[i])
|
||||
continue;
|
||||
table->tab[i]->packetsin += hw_stats->stats.bstats[i].packets;
|
||||
table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes;
|
||||
table->tab[i]->packetsin += u64_stats_read(&hw_stats->stats.bstats[i].packets);
|
||||
table->tab[i]->bytesin += u64_stats_read(&hw_stats->stats.bstats[i].bytes);
|
||||
table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
|
||||
|
||||
_bstats_update(&sch->bstats,
|
||||
hw_stats->stats.bstats[i].bytes,
|
||||
hw_stats->stats.bstats[i].packets);
|
||||
bytes += u64_stats_read(&hw_stats->stats.bstats[i].bytes);
|
||||
packets += u64_stats_read(&hw_stats->stats.bstats[i].packets);
|
||||
sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
|
||||
sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
|
||||
sch->qstats.drops += hw_stats->stats.qstats[i].drops;
|
||||
sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
|
||||
sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
|
||||
}
|
||||
_bstats_update(&sch->bstats, bytes, packets);
|
||||
|
||||
kfree(hw_stats);
|
||||
return ret;
|
||||
|
@ -111,7 +111,7 @@ enum hfsc_class_flags {
|
||||
struct hfsc_class {
|
||||
struct Qdisc_class_common cl_common;
|
||||
|
||||
struct gnet_stats_basic_packed bstats;
|
||||
struct gnet_stats_basic_sync bstats;
|
||||
struct gnet_stats_queue qstats;
|
||||
struct net_rate_estimator __rcu *rate_est;
|
||||
struct tcf_proto __rcu *filter_list; /* filter list */
|
||||
@ -965,7 +965,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
err = gen_replace_estimator(&cl->bstats, NULL,
|
||||
&cl->rate_est,
|
||||
NULL,
|
||||
qdisc_root_sleeping_running(sch),
|
||||
true,
|
||||
tca[TCA_RATE]);
|
||||
if (err)
|
||||
return err;
|
||||
@ -1033,9 +1033,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
|
||||
if (tca[TCA_RATE]) {
|
||||
err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
|
||||
NULL,
|
||||
qdisc_root_sleeping_running(sch),
|
||||
tca[TCA_RATE]);
|
||||
NULL, true, tca[TCA_RATE]);
|
||||
if (err) {
|
||||
tcf_block_put(cl->block);
|
||||
kfree(cl);
|
||||
@ -1328,7 +1326,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
|
||||
xstats.work = cl->cl_total;
|
||||
xstats.rtwork = cl->cl_cumul;
|
||||
|
||||
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
|
||||
if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
|
||||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
|
||||
gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
|
||||
return -1;
|
||||
@ -1406,6 +1404,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
gnet_stats_basic_sync_init(&q->root.bstats);
|
||||
q->root.cl_common.classid = sch->handle;
|
||||
q->root.sched = q;
|
||||
q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
|
||||
|
@ -113,8 +113,8 @@ struct htb_class {
|
||||
/*
|
||||
* Written often fields
|
||||
*/
|
||||
struct gnet_stats_basic_packed bstats;
|
||||
struct gnet_stats_basic_packed bstats_bias;
|
||||
struct gnet_stats_basic_sync bstats;
|
||||
struct gnet_stats_basic_sync bstats_bias;
|
||||
struct tc_htb_xstats xstats; /* our special stats */
|
||||
|
||||
/* token bucket parameters */
|
||||
@ -1308,10 +1308,11 @@ nla_put_failure:
|
||||
static void htb_offload_aggregate_stats(struct htb_sched *q,
|
||||
struct htb_class *cl)
|
||||
{
|
||||
u64 bytes = 0, packets = 0;
|
||||
struct htb_class *c;
|
||||
unsigned int i;
|
||||
|
||||
memset(&cl->bstats, 0, sizeof(cl->bstats));
|
||||
gnet_stats_basic_sync_init(&cl->bstats);
|
||||
|
||||
for (i = 0; i < q->clhash.hashsize; i++) {
|
||||
hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) {
|
||||
@ -1323,14 +1324,15 @@ static void htb_offload_aggregate_stats(struct htb_sched *q,
|
||||
if (p != cl)
|
||||
continue;
|
||||
|
||||
cl->bstats.bytes += c->bstats_bias.bytes;
|
||||
cl->bstats.packets += c->bstats_bias.packets;
|
||||
bytes += u64_stats_read(&c->bstats_bias.bytes);
|
||||
packets += u64_stats_read(&c->bstats_bias.packets);
|
||||
if (c->level == 0) {
|
||||
cl->bstats.bytes += c->leaf.q->bstats.bytes;
|
||||
cl->bstats.packets += c->leaf.q->bstats.packets;
|
||||
bytes += u64_stats_read(&c->leaf.q->bstats.bytes);
|
||||
packets += u64_stats_read(&c->leaf.q->bstats.packets);
|
||||
}
|
||||
}
|
||||
}
|
||||
_bstats_update(&cl->bstats, bytes, packets);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1357,16 +1359,16 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
|
||||
if (cl->leaf.q)
|
||||
cl->bstats = cl->leaf.q->bstats;
|
||||
else
|
||||
memset(&cl->bstats, 0, sizeof(cl->bstats));
|
||||
cl->bstats.bytes += cl->bstats_bias.bytes;
|
||||
cl->bstats.packets += cl->bstats_bias.packets;
|
||||
gnet_stats_basic_sync_init(&cl->bstats);
|
||||
_bstats_update(&cl->bstats,
|
||||
u64_stats_read(&cl->bstats_bias.bytes),
|
||||
u64_stats_read(&cl->bstats_bias.packets));
|
||||
} else {
|
||||
htb_offload_aggregate_stats(q, cl);
|
||||
}
|
||||
}
|
||||
|
||||
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
||||
d, NULL, &cl->bstats) < 0 ||
|
||||
if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
|
||||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
|
||||
gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
|
||||
return -1;
|
||||
@ -1578,8 +1580,9 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
|
||||
WARN_ON(old != q);
|
||||
|
||||
if (cl->parent) {
|
||||
cl->parent->bstats_bias.bytes += q->bstats.bytes;
|
||||
cl->parent->bstats_bias.packets += q->bstats.packets;
|
||||
_bstats_update(&cl->parent->bstats_bias,
|
||||
u64_stats_read(&q->bstats.bytes),
|
||||
u64_stats_read(&q->bstats.packets));
|
||||
}
|
||||
|
||||
offload_opt = (struct tc_htb_qopt_offload) {
|
||||
@ -1849,6 +1852,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
|
||||
if (!cl)
|
||||
goto failure;
|
||||
|
||||
gnet_stats_basic_sync_init(&cl->bstats);
|
||||
gnet_stats_basic_sync_init(&cl->bstats_bias);
|
||||
|
||||
err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
|
||||
if (err) {
|
||||
kfree(cl);
|
||||
@ -1858,7 +1864,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
|
||||
err = gen_new_estimator(&cl->bstats, NULL,
|
||||
&cl->rate_est,
|
||||
NULL,
|
||||
qdisc_root_sleeping_running(sch),
|
||||
true,
|
||||
tca[TCA_RATE] ? : &est.nla);
|
||||
if (err)
|
||||
goto err_block_put;
|
||||
@ -1922,8 +1928,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
|
||||
htb_graft_helper(dev_queue, old_q);
|
||||
goto err_kill_estimator;
|
||||
}
|
||||
parent->bstats_bias.bytes += old_q->bstats.bytes;
|
||||
parent->bstats_bias.packets += old_q->bstats.packets;
|
||||
_bstats_update(&parent->bstats_bias,
|
||||
u64_stats_read(&old_q->bstats.bytes),
|
||||
u64_stats_read(&old_q->bstats.packets));
|
||||
qdisc_put(old_q);
|
||||
}
|
||||
new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
|
||||
@ -1983,7 +1990,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
|
||||
err = gen_replace_estimator(&cl->bstats, NULL,
|
||||
&cl->rate_est,
|
||||
NULL,
|
||||
qdisc_root_sleeping_running(sch),
|
||||
true,
|
||||
tca[TCA_RATE]);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -130,10 +130,9 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
|
||||
struct net_device *dev = qdisc_dev(sch);
|
||||
struct Qdisc *qdisc;
|
||||
unsigned int ntx;
|
||||
__u32 qlen = 0;
|
||||
|
||||
sch->q.qlen = 0;
|
||||
memset(&sch->bstats, 0, sizeof(sch->bstats));
|
||||
gnet_stats_basic_sync_init(&sch->bstats);
|
||||
memset(&sch->qstats, 0, sizeof(sch->qstats));
|
||||
|
||||
/* MQ supports lockless qdiscs. However, statistics accounting needs
|
||||
@ -145,25 +144,11 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
|
||||
qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
|
||||
spin_lock_bh(qdisc_lock(qdisc));
|
||||
|
||||
if (qdisc_is_percpu_stats(qdisc)) {
|
||||
qlen = qdisc_qlen_sum(qdisc);
|
||||
__gnet_stats_copy_basic(NULL, &sch->bstats,
|
||||
qdisc->cpu_bstats,
|
||||
&qdisc->bstats);
|
||||
__gnet_stats_copy_queue(&sch->qstats,
|
||||
qdisc->cpu_qstats,
|
||||
&qdisc->qstats, qlen);
|
||||
sch->q.qlen += qlen;
|
||||
} else {
|
||||
sch->q.qlen += qdisc->q.qlen;
|
||||
sch->bstats.bytes += qdisc->bstats.bytes;
|
||||
sch->bstats.packets += qdisc->bstats.packets;
|
||||
sch->qstats.qlen += qdisc->qstats.qlen;
|
||||
sch->qstats.backlog += qdisc->qstats.backlog;
|
||||
sch->qstats.drops += qdisc->qstats.drops;
|
||||
sch->qstats.requeues += qdisc->qstats.requeues;
|
||||
sch->qstats.overlimits += qdisc->qstats.overlimits;
|
||||
}
|
||||
gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
|
||||
&qdisc->bstats, false);
|
||||
gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats,
|
||||
&qdisc->qstats);
|
||||
sch->q.qlen += qdisc_qlen(qdisc);
|
||||
|
||||
spin_unlock_bh(qdisc_lock(qdisc));
|
||||
}
|
||||
@ -246,8 +231,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
||||
struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
|
||||
|
||||
sch = dev_queue->qdisc_sleeping;
|
||||
if (gnet_stats_copy_basic(&sch->running, d, sch->cpu_bstats,
|
||||
&sch->bstats) < 0 ||
|
||||
if (gnet_stats_copy_basic(d, sch->cpu_bstats, &sch->bstats, true) < 0 ||
|
||||
qdisc_qstats_copy(d, sch) < 0)
|
||||
return -1;
|
||||
return 0;
|
||||
|
@ -390,7 +390,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
|
||||
unsigned int ntx, tc;
|
||||
|
||||
sch->q.qlen = 0;
|
||||
memset(&sch->bstats, 0, sizeof(sch->bstats));
|
||||
gnet_stats_basic_sync_init(&sch->bstats);
|
||||
memset(&sch->qstats, 0, sizeof(sch->qstats));
|
||||
|
||||
/* MQ supports lockless qdiscs. However, statistics accounting needs
|
||||
@ -402,25 +402,11 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
|
||||
qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
|
||||
spin_lock_bh(qdisc_lock(qdisc));
|
||||
|
||||
if (qdisc_is_percpu_stats(qdisc)) {
|
||||
__u32 qlen = qdisc_qlen_sum(qdisc);
|
||||
|
||||
__gnet_stats_copy_basic(NULL, &sch->bstats,
|
||||
qdisc->cpu_bstats,
|
||||
&qdisc->bstats);
|
||||
__gnet_stats_copy_queue(&sch->qstats,
|
||||
qdisc->cpu_qstats,
|
||||
&qdisc->qstats, qlen);
|
||||
sch->q.qlen += qlen;
|
||||
} else {
|
||||
sch->q.qlen += qdisc->q.qlen;
|
||||
sch->bstats.bytes += qdisc->bstats.bytes;
|
||||
sch->bstats.packets += qdisc->bstats.packets;
|
||||
sch->qstats.backlog += qdisc->qstats.backlog;
|
||||
sch->qstats.drops += qdisc->qstats.drops;
|
||||
sch->qstats.requeues += qdisc->qstats.requeues;
|
||||
sch->qstats.overlimits += qdisc->qstats.overlimits;
|
||||
}
|
||||
gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
|
||||
&qdisc->bstats, false);
|
||||
gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats,
|
||||
&qdisc->qstats);
|
||||
sch->q.qlen += qdisc_qlen(qdisc);
|
||||
|
||||
spin_unlock_bh(qdisc_lock(qdisc));
|
||||
}
|
||||
@ -512,12 +498,13 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
||||
{
|
||||
if (cl >= TC_H_MIN_PRIORITY) {
|
||||
int i;
|
||||
__u32 qlen = 0;
|
||||
__u32 qlen;
|
||||
struct gnet_stats_queue qstats = {0};
|
||||
struct gnet_stats_basic_packed bstats = {0};
|
||||
struct gnet_stats_basic_sync bstats;
|
||||
struct net_device *dev = qdisc_dev(sch);
|
||||
struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK];
|
||||
|
||||
gnet_stats_basic_sync_init(&bstats);
|
||||
/* Drop lock here it will be reclaimed before touching
|
||||
* statistics this is required because the d->lock we
|
||||
* hold here is the look on dev_queue->qdisc_sleeping
|
||||
@ -532,40 +519,28 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
||||
|
||||
spin_lock_bh(qdisc_lock(qdisc));
|
||||
|
||||
if (qdisc_is_percpu_stats(qdisc)) {
|
||||
qlen = qdisc_qlen_sum(qdisc);
|
||||
gnet_stats_add_basic(&bstats, qdisc->cpu_bstats,
|
||||
&qdisc->bstats, false);
|
||||
gnet_stats_add_queue(&qstats, qdisc->cpu_qstats,
|
||||
&qdisc->qstats);
|
||||
sch->q.qlen += qdisc_qlen(qdisc);
|
||||
|
||||
__gnet_stats_copy_basic(NULL, &bstats,
|
||||
qdisc->cpu_bstats,
|
||||
&qdisc->bstats);
|
||||
__gnet_stats_copy_queue(&qstats,
|
||||
qdisc->cpu_qstats,
|
||||
&qdisc->qstats,
|
||||
qlen);
|
||||
} else {
|
||||
qlen += qdisc->q.qlen;
|
||||
bstats.bytes += qdisc->bstats.bytes;
|
||||
bstats.packets += qdisc->bstats.packets;
|
||||
qstats.backlog += qdisc->qstats.backlog;
|
||||
qstats.drops += qdisc->qstats.drops;
|
||||
qstats.requeues += qdisc->qstats.requeues;
|
||||
qstats.overlimits += qdisc->qstats.overlimits;
|
||||
}
|
||||
spin_unlock_bh(qdisc_lock(qdisc));
|
||||
}
|
||||
qlen = qdisc_qlen(sch) + qstats.qlen;
|
||||
|
||||
/* Reclaim root sleeping lock before completing stats */
|
||||
if (d->lock)
|
||||
spin_lock_bh(d->lock);
|
||||
if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 ||
|
||||
if (gnet_stats_copy_basic(d, NULL, &bstats, false) < 0 ||
|
||||
gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
|
||||
return -1;
|
||||
} else {
|
||||
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
|
||||
|
||||
sch = dev_queue->qdisc_sleeping;
|
||||
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d,
|
||||
sch->cpu_bstats, &sch->bstats) < 0 ||
|
||||
if (gnet_stats_copy_basic(d, sch->cpu_bstats,
|
||||
&sch->bstats, true) < 0 ||
|
||||
qdisc_qstats_copy(d, sch) < 0)
|
||||
return -1;
|
||||
}
|
||||
|
@ -338,8 +338,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
||||
struct Qdisc *cl_q;
|
||||
|
||||
cl_q = q->queues[cl - 1];
|
||||
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
||||
d, cl_q->cpu_bstats, &cl_q->bstats) < 0 ||
|
||||
if (gnet_stats_copy_basic(d, cl_q->cpu_bstats, &cl_q->bstats, true) < 0 ||
|
||||
qdisc_qstats_copy(d, cl_q) < 0)
|
||||
return -1;
|
||||
|
||||
|
@ -361,8 +361,8 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
||||
struct Qdisc *cl_q;
|
||||
|
||||
cl_q = q->queues[cl - 1];
|
||||
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
||||
d, cl_q->cpu_bstats, &cl_q->bstats) < 0 ||
|
||||
if (gnet_stats_copy_basic(d, cl_q->cpu_bstats,
|
||||
&cl_q->bstats, true) < 0 ||
|
||||
qdisc_qstats_copy(d, cl_q) < 0)
|
||||
return -1;
|
||||
|
||||
|
@ -131,7 +131,7 @@ struct qfq_class {
|
||||
|
||||
unsigned int filter_cnt;
|
||||
|
||||
struct gnet_stats_basic_packed bstats;
|
||||
struct gnet_stats_basic_sync bstats;
|
||||
struct gnet_stats_queue qstats;
|
||||
struct net_rate_estimator __rcu *rate_est;
|
||||
struct Qdisc *qdisc;
|
||||
@ -451,7 +451,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
err = gen_replace_estimator(&cl->bstats, NULL,
|
||||
&cl->rate_est,
|
||||
NULL,
|
||||
qdisc_root_sleeping_running(sch),
|
||||
true,
|
||||
tca[TCA_RATE]);
|
||||
if (err)
|
||||
return err;
|
||||
@ -465,6 +465,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
if (cl == NULL)
|
||||
return -ENOBUFS;
|
||||
|
||||
gnet_stats_basic_sync_init(&cl->bstats);
|
||||
cl->common.classid = classid;
|
||||
cl->deficit = lmax;
|
||||
|
||||
@ -477,7 +478,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
err = gen_new_estimator(&cl->bstats, NULL,
|
||||
&cl->rate_est,
|
||||
NULL,
|
||||
qdisc_root_sleeping_running(sch),
|
||||
true,
|
||||
tca[TCA_RATE]);
|
||||
if (err)
|
||||
goto destroy_class;
|
||||
@ -639,8 +640,7 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
|
||||
xstats.weight = cl->agg->class_weight;
|
||||
xstats.lmax = cl->agg->lmax;
|
||||
|
||||
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
||||
d, NULL, &cl->bstats) < 0 ||
|
||||
if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
|
||||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
|
||||
qdisc_qstats_copy(d, cl->qdisc) < 0)
|
||||
return -1;
|
||||
@ -1234,8 +1234,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
return err;
|
||||
}
|
||||
|
||||
cl->bstats.bytes += len;
|
||||
cl->bstats.packets += gso_segs;
|
||||
_bstats_update(&cl->bstats, len, gso_segs);
|
||||
sch->qstats.backlog += len;
|
||||
++sch->q.qlen;
|
||||
|
||||
|
@ -1977,7 +1977,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
||||
struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
|
||||
|
||||
sch = dev_queue->qdisc_sleeping;
|
||||
if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
|
||||
if (gnet_stats_copy_basic(d, NULL, &sch->bstats, true) < 0 ||
|
||||
qdisc_qstats_copy(d, sch) < 0)
|
||||
return -1;
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user