Merge branch 'net-add-and-use-dev_get_tstats64'
Heiner Kallweit says: ==================== net: add and use dev_get_tstats64 It's a frequent pattern to use netdev->stats for the less frequently accessed counters and per-cpu counters for the frequently accessed counters (rx/tx bytes/packets). Add a default ndo_get_stats64() implementation for this use case. Subsequently switch more drivers to use this pattern. v2: - add patches for replacing ip_tunnel_get_stats64 Requested additional migrations will come in a separate series. v3: - add atomic_long_t member rx_frame_errors in patch 3 for making counter updates atomic ==================== Link: https://lore.kernel.org/r/99273e2f-c218-cd19-916e-9161d8ad8c56@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
52643b7832
@ -510,7 +510,7 @@ static const struct net_device_ops bareudp_netdev_ops = {
|
||||
.ndo_open = bareudp_open,
|
||||
.ndo_stop = bareudp_stop,
|
||||
.ndo_start_xmit = bareudp_xmit,
|
||||
.ndo_get_stats64 = ip_tunnel_get_stats64,
|
||||
.ndo_get_stats64 = dev_get_tstats64,
|
||||
.ndo_fill_metadata_dst = bareudp_fill_metadata_dst,
|
||||
};
|
||||
|
||||
|
@ -1138,7 +1138,7 @@ static const struct net_device_ops geneve_netdev_ops = {
|
||||
.ndo_open = geneve_open,
|
||||
.ndo_stop = geneve_stop,
|
||||
.ndo_start_xmit = geneve_xmit,
|
||||
.ndo_get_stats64 = ip_tunnel_get_stats64,
|
||||
.ndo_get_stats64 = dev_get_tstats64,
|
||||
.ndo_change_mtu = geneve_change_mtu,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
|
@ -607,7 +607,7 @@ static const struct net_device_ops gtp_netdev_ops = {
|
||||
.ndo_init = gtp_dev_init,
|
||||
.ndo_uninit = gtp_dev_uninit,
|
||||
.ndo_start_xmit = gtp_dev_xmit,
|
||||
.ndo_get_stats64 = ip_tunnel_get_stats64,
|
||||
.ndo_get_stats64 = dev_get_tstats64,
|
||||
};
|
||||
|
||||
static void gtp_link_setup(struct net_device *dev)
|
||||
|
@ -107,17 +107,6 @@ struct tap_filter {
|
||||
|
||||
#define TUN_FLOW_EXPIRE (3 * HZ)
|
||||
|
||||
struct tun_pcpu_stats {
|
||||
u64_stats_t rx_packets;
|
||||
u64_stats_t rx_bytes;
|
||||
u64_stats_t tx_packets;
|
||||
u64_stats_t tx_bytes;
|
||||
struct u64_stats_sync syncp;
|
||||
u32 rx_dropped;
|
||||
u32 tx_dropped;
|
||||
u32 rx_frame_errors;
|
||||
};
|
||||
|
||||
/* A tun_file connects an open character device to a tuntap netdevice. It
|
||||
* also contains all socket related structures (except sock_fprog and tap_filter)
|
||||
* to serve as one transmit queue for tuntap device. The sock_fprog and
|
||||
@ -207,7 +196,7 @@ struct tun_struct {
|
||||
void *security;
|
||||
u32 flow_count;
|
||||
u32 rx_batched;
|
||||
struct tun_pcpu_stats __percpu *pcpu_stats;
|
||||
atomic_long_t rx_frame_errors;
|
||||
struct bpf_prog __rcu *xdp_prog;
|
||||
struct tun_prog __rcu *steering_prog;
|
||||
struct tun_prog __rcu *filter_prog;
|
||||
@ -1066,7 +1055,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
drop:
|
||||
this_cpu_inc(tun->pcpu_stats->tx_dropped);
|
||||
atomic_long_inc(&dev->tx_dropped);
|
||||
skb_tx_error(skb);
|
||||
kfree_skb(skb);
|
||||
rcu_read_unlock();
|
||||
@ -1103,37 +1092,12 @@ static void tun_set_headroom(struct net_device *dev, int new_hr)
|
||||
static void
|
||||
tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
|
||||
struct tun_struct *tun = netdev_priv(dev);
|
||||
struct tun_pcpu_stats *p;
|
||||
int i;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
u64 rxpackets, rxbytes, txpackets, txbytes;
|
||||
unsigned int start;
|
||||
dev_get_tstats64(dev, stats);
|
||||
|
||||
p = per_cpu_ptr(tun->pcpu_stats, i);
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&p->syncp);
|
||||
rxpackets = u64_stats_read(&p->rx_packets);
|
||||
rxbytes = u64_stats_read(&p->rx_bytes);
|
||||
txpackets = u64_stats_read(&p->tx_packets);
|
||||
txbytes = u64_stats_read(&p->tx_bytes);
|
||||
} while (u64_stats_fetch_retry(&p->syncp, start));
|
||||
|
||||
stats->rx_packets += rxpackets;
|
||||
stats->rx_bytes += rxbytes;
|
||||
stats->tx_packets += txpackets;
|
||||
stats->tx_bytes += txbytes;
|
||||
|
||||
/* u32 counters */
|
||||
rx_dropped += p->rx_dropped;
|
||||
rx_frame_errors += p->rx_frame_errors;
|
||||
tx_dropped += p->tx_dropped;
|
||||
}
|
||||
stats->rx_dropped = rx_dropped;
|
||||
stats->rx_frame_errors = rx_frame_errors;
|
||||
stats->tx_dropped = tx_dropped;
|
||||
stats->rx_frame_errors +=
|
||||
(unsigned long)atomic_long_read(&tun->rx_frame_errors);
|
||||
}
|
||||
|
||||
static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
||||
@ -1247,7 +1211,7 @@ resample:
|
||||
void *frame = tun_xdp_to_ptr(xdp);
|
||||
|
||||
if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
|
||||
this_cpu_inc(tun->pcpu_stats->tx_dropped);
|
||||
atomic_long_inc(&dev->tx_dropped);
|
||||
xdp_return_frame_rx_napi(xdp);
|
||||
drops++;
|
||||
}
|
||||
@ -1283,7 +1247,7 @@ static const struct net_device_ops tap_netdev_ops = {
|
||||
.ndo_select_queue = tun_select_queue,
|
||||
.ndo_features_check = passthru_features_check,
|
||||
.ndo_set_rx_headroom = tun_set_headroom,
|
||||
.ndo_get_stats64 = tun_net_get_stats64,
|
||||
.ndo_get_stats64 = dev_get_tstats64,
|
||||
.ndo_bpf = tun_xdp,
|
||||
.ndo_xdp_xmit = tun_xdp_xmit,
|
||||
.ndo_change_carrier = tun_net_change_carrier,
|
||||
@ -1577,7 +1541,7 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
|
||||
trace_xdp_exception(tun->dev, xdp_prog, act);
|
||||
fallthrough;
|
||||
case XDP_DROP:
|
||||
this_cpu_inc(tun->pcpu_stats->rx_dropped);
|
||||
atomic_long_inc(&tun->dev->rx_dropped);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1683,7 +1647,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||
size_t total_len = iov_iter_count(from);
|
||||
size_t len = total_len, align = tun->align, linear;
|
||||
struct virtio_net_hdr gso = { 0 };
|
||||
struct tun_pcpu_stats *stats;
|
||||
int good_linear;
|
||||
int copylen;
|
||||
bool zerocopy = false;
|
||||
@ -1752,7 +1715,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||
*/
|
||||
skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
|
||||
if (IS_ERR(skb)) {
|
||||
this_cpu_inc(tun->pcpu_stats->rx_dropped);
|
||||
atomic_long_inc(&tun->dev->rx_dropped);
|
||||
return PTR_ERR(skb);
|
||||
}
|
||||
if (!skb)
|
||||
@ -1781,7 +1744,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||
|
||||
if (IS_ERR(skb)) {
|
||||
if (PTR_ERR(skb) != -EAGAIN)
|
||||
this_cpu_inc(tun->pcpu_stats->rx_dropped);
|
||||
atomic_long_inc(&tun->dev->rx_dropped);
|
||||
if (frags)
|
||||
mutex_unlock(&tfile->napi_mutex);
|
||||
return PTR_ERR(skb);
|
||||
@ -1795,7 +1758,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||
if (err) {
|
||||
err = -EFAULT;
|
||||
drop:
|
||||
this_cpu_inc(tun->pcpu_stats->rx_dropped);
|
||||
atomic_long_inc(&tun->dev->rx_dropped);
|
||||
kfree_skb(skb);
|
||||
if (frags) {
|
||||
tfile->napi.skb = NULL;
|
||||
@ -1807,7 +1770,7 @@ drop:
|
||||
}
|
||||
|
||||
if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
|
||||
this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
|
||||
atomic_long_inc(&tun->rx_frame_errors);
|
||||
kfree_skb(skb);
|
||||
if (frags) {
|
||||
tfile->napi.skb = NULL;
|
||||
@ -1830,7 +1793,7 @@ drop:
|
||||
pi.proto = htons(ETH_P_IPV6);
|
||||
break;
|
||||
default:
|
||||
this_cpu_inc(tun->pcpu_stats->rx_dropped);
|
||||
atomic_long_inc(&tun->dev->rx_dropped);
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1910,7 +1873,7 @@ drop:
|
||||
skb_headlen(skb));
|
||||
|
||||
if (unlikely(headlen > skb_headlen(skb))) {
|
||||
this_cpu_inc(tun->pcpu_stats->rx_dropped);
|
||||
atomic_long_inc(&tun->dev->rx_dropped);
|
||||
napi_free_frags(&tfile->napi);
|
||||
rcu_read_unlock();
|
||||
mutex_unlock(&tfile->napi_mutex);
|
||||
@ -1942,12 +1905,9 @@ drop:
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
stats = get_cpu_ptr(tun->pcpu_stats);
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
u64_stats_inc(&stats->rx_packets);
|
||||
u64_stats_add(&stats->rx_bytes, len);
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
put_cpu_ptr(stats);
|
||||
preempt_disable();
|
||||
dev_sw_netstats_rx_add(tun->dev, len);
|
||||
preempt_enable();
|
||||
|
||||
if (rxhash)
|
||||
tun_flow_update(tun, rxhash, tfile);
|
||||
@ -1979,7 +1939,6 @@ static ssize_t tun_put_user_xdp(struct tun_struct *tun,
|
||||
{
|
||||
int vnet_hdr_sz = 0;
|
||||
size_t size = xdp_frame->len;
|
||||
struct tun_pcpu_stats *stats;
|
||||
size_t ret;
|
||||
|
||||
if (tun->flags & IFF_VNET_HDR) {
|
||||
@ -1996,12 +1955,9 @@ static ssize_t tun_put_user_xdp(struct tun_struct *tun,
|
||||
|
||||
ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
|
||||
|
||||
stats = get_cpu_ptr(tun->pcpu_stats);
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
u64_stats_inc(&stats->tx_packets);
|
||||
u64_stats_add(&stats->tx_bytes, ret);
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
put_cpu_ptr(tun->pcpu_stats);
|
||||
preempt_disable();
|
||||
dev_sw_netstats_tx_add(tun->dev, 1, ret);
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -2013,7 +1969,6 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
||||
struct iov_iter *iter)
|
||||
{
|
||||
struct tun_pi pi = { 0, skb->protocol };
|
||||
struct tun_pcpu_stats *stats;
|
||||
ssize_t total;
|
||||
int vlan_offset = 0;
|
||||
int vlan_hlen = 0;
|
||||
@ -2091,12 +2046,9 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
||||
|
||||
done:
|
||||
/* caller is in process context, */
|
||||
stats = get_cpu_ptr(tun->pcpu_stats);
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
u64_stats_inc(&stats->tx_packets);
|
||||
u64_stats_add(&stats->tx_bytes, skb->len + vlan_hlen);
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
put_cpu_ptr(tun->pcpu_stats);
|
||||
preempt_disable();
|
||||
dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen);
|
||||
preempt_enable();
|
||||
|
||||
return total;
|
||||
}
|
||||
@ -2235,11 +2187,11 @@ static void tun_free_netdev(struct net_device *dev)
|
||||
|
||||
BUG_ON(!(list_empty(&tun->disabled)));
|
||||
|
||||
free_percpu(tun->pcpu_stats);
|
||||
/* We clear pcpu_stats so that tun_set_iff() can tell if
|
||||
free_percpu(dev->tstats);
|
||||
/* We clear tstats so that tun_set_iff() can tell if
|
||||
* tun_free_netdev() has been called from register_netdevice().
|
||||
*/
|
||||
tun->pcpu_stats = NULL;
|
||||
dev->tstats = NULL;
|
||||
|
||||
tun_flow_uninit(tun);
|
||||
security_tun_dev_free_security(tun->security);
|
||||
@ -2370,7 +2322,6 @@ static int tun_xdp_one(struct tun_struct *tun,
|
||||
unsigned int datasize = xdp->data_end - xdp->data;
|
||||
struct tun_xdp_hdr *hdr = xdp->data_hard_start;
|
||||
struct virtio_net_hdr *gso = &hdr->gso;
|
||||
struct tun_pcpu_stats *stats;
|
||||
struct bpf_prog *xdp_prog;
|
||||
struct sk_buff *skb = NULL;
|
||||
u32 rxhash = 0, act;
|
||||
@ -2428,7 +2379,7 @@ build:
|
||||
skb_put(skb, xdp->data_end - xdp->data);
|
||||
|
||||
if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
|
||||
this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
|
||||
atomic_long_inc(&tun->rx_frame_errors);
|
||||
kfree_skb(skb);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
@ -2451,14 +2402,10 @@ build:
|
||||
|
||||
netif_receive_skb(skb);
|
||||
|
||||
/* No need for get_cpu_ptr() here since this function is
|
||||
/* No need to disable preemption here since this function is
|
||||
* always called with bh disabled
|
||||
*/
|
||||
stats = this_cpu_ptr(tun->pcpu_stats);
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
u64_stats_inc(&stats->rx_packets);
|
||||
u64_stats_add(&stats->rx_bytes, datasize);
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
dev_sw_netstats_rx_add(tun->dev, datasize);
|
||||
|
||||
if (rxhash)
|
||||
tun_flow_update(tun, rxhash, tfile);
|
||||
@ -2751,8 +2698,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
||||
tun->rx_batched = 0;
|
||||
RCU_INIT_POINTER(tun->steering_prog, NULL);
|
||||
|
||||
tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
|
||||
if (!tun->pcpu_stats) {
|
||||
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
||||
if (!dev->tstats) {
|
||||
err = -ENOMEM;
|
||||
goto err_free_dev;
|
||||
}
|
||||
@ -2807,16 +2754,16 @@ err_detach:
|
||||
tun_detach_all(dev);
|
||||
/* We are here because register_netdevice() has failed.
|
||||
* If register_netdevice() already called tun_free_netdev()
|
||||
* while dealing with the error, tun->pcpu_stats has been cleared.
|
||||
* while dealing with the error, dev->stats has been cleared.
|
||||
*/
|
||||
if (!tun->pcpu_stats)
|
||||
if (!dev->tstats)
|
||||
goto err_free_dev;
|
||||
|
||||
err_free_flow:
|
||||
tun_flow_uninit(tun);
|
||||
security_tun_dev_free_security(tun->security);
|
||||
err_free_stat:
|
||||
free_percpu(tun->pcpu_stats);
|
||||
free_percpu(dev->tstats);
|
||||
err_free_dev:
|
||||
free_netdev(dev);
|
||||
return err;
|
||||
|
@ -3211,7 +3211,7 @@ static const struct net_device_ops vxlan_netdev_ether_ops = {
|
||||
.ndo_open = vxlan_open,
|
||||
.ndo_stop = vxlan_stop,
|
||||
.ndo_start_xmit = vxlan_xmit,
|
||||
.ndo_get_stats64 = ip_tunnel_get_stats64,
|
||||
.ndo_get_stats64 = dev_get_tstats64,
|
||||
.ndo_set_rx_mode = vxlan_set_multicast_list,
|
||||
.ndo_change_mtu = vxlan_change_mtu,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
@ -3230,7 +3230,7 @@ static const struct net_device_ops vxlan_netdev_raw_ops = {
|
||||
.ndo_open = vxlan_open,
|
||||
.ndo_stop = vxlan_stop,
|
||||
.ndo_start_xmit = vxlan_xmit,
|
||||
.ndo_get_stats64 = ip_tunnel_get_stats64,
|
||||
.ndo_get_stats64 = dev_get_tstats64,
|
||||
.ndo_change_mtu = vxlan_change_mtu,
|
||||
.ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
|
||||
};
|
||||
|
@ -215,7 +215,7 @@ static const struct net_device_ops netdev_ops = {
|
||||
.ndo_open = wg_open,
|
||||
.ndo_stop = wg_stop,
|
||||
.ndo_start_xmit = wg_xmit,
|
||||
.ndo_get_stats64 = ip_tunnel_get_stats64
|
||||
.ndo_get_stats64 = dev_get_tstats64
|
||||
};
|
||||
|
||||
static void wg_destruct(struct net_device *dev)
|
||||
|
@ -4527,6 +4527,7 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
|
||||
const struct net_device_stats *netdev_stats);
|
||||
void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
|
||||
const struct pcpu_sw_netstats __percpu *netstats);
|
||||
void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
|
||||
|
||||
extern int netdev_max_backlog;
|
||||
extern int netdev_tstamp_prequeue;
|
||||
|
@ -274,8 +274,6 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
|
||||
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
|
||||
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
|
||||
|
||||
void ip_tunnel_get_stats64(struct net_device *dev,
|
||||
struct rtnl_link_stats64 *tot);
|
||||
struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
|
||||
int link, __be16 flags,
|
||||
__be32 remote, __be32 local,
|
||||
|
@ -10366,6 +10366,21 @@ void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats);
|
||||
|
||||
/**
|
||||
* dev_get_tstats64 - ndo_get_stats64 implementation
|
||||
* @dev: device to get statistics from
|
||||
* @s: place to store stats
|
||||
*
|
||||
* Populate @s from dev->stats and dev->tstats. Can be used as
|
||||
* ndo_get_stats64() callback.
|
||||
*/
|
||||
void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s)
|
||||
{
|
||||
netdev_stats_to_stats64(s, &dev->stats);
|
||||
dev_fetch_sw_netstats(s, dev->tstats);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_get_tstats64);
|
||||
|
||||
struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
|
||||
{
|
||||
struct netdev_queue *queue = dev_ingress_queue(dev);
|
||||
|
@ -201,7 +201,6 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
{
|
||||
struct dsa_port *cpu_dp = dev->dsa_ptr;
|
||||
struct sk_buff *nskb = NULL;
|
||||
struct pcpu_sw_netstats *s;
|
||||
struct dsa_slave_priv *p;
|
||||
|
||||
if (unlikely(!cpu_dp)) {
|
||||
@ -234,11 +233,7 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
skb = nskb;
|
||||
}
|
||||
|
||||
s = this_cpu_ptr(p->stats64);
|
||||
u64_stats_update_begin(&s->syncp);
|
||||
s->rx_packets++;
|
||||
s->rx_bytes += skb->len;
|
||||
u64_stats_update_end(&s->syncp);
|
||||
dev_sw_netstats_rx_add(skb->dev, skb->len);
|
||||
|
||||
if (dsa_skb_defer_rx_timestamp(p, skb))
|
||||
return 0;
|
||||
|
@ -78,8 +78,6 @@ struct dsa_slave_priv {
|
||||
struct sk_buff * (*xmit)(struct sk_buff *skb,
|
||||
struct net_device *dev);
|
||||
|
||||
struct pcpu_sw_netstats __percpu *stats64;
|
||||
|
||||
struct gro_cells gcells;
|
||||
|
||||
/* DSA port data, such as switch, port index, etc. */
|
||||
|
@ -575,14 +575,9 @@ static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
|
||||
static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct dsa_slave_priv *p = netdev_priv(dev);
|
||||
struct pcpu_sw_netstats *s;
|
||||
struct sk_buff *nskb;
|
||||
|
||||
s = this_cpu_ptr(p->stats64);
|
||||
u64_stats_update_begin(&s->syncp);
|
||||
s->tx_packets++;
|
||||
s->tx_bytes += skb->len;
|
||||
u64_stats_update_end(&s->syncp);
|
||||
dev_sw_netstats_tx_add(dev, 1, skb->len);
|
||||
|
||||
DSA_SKB_CB(skb)->clone = NULL;
|
||||
|
||||
@ -714,7 +709,6 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev,
|
||||
uint64_t *data)
|
||||
{
|
||||
struct dsa_port *dp = dsa_slave_to_port(dev);
|
||||
struct dsa_slave_priv *p = netdev_priv(dev);
|
||||
struct dsa_switch *ds = dp->ds;
|
||||
struct pcpu_sw_netstats *s;
|
||||
unsigned int start;
|
||||
@ -723,7 +717,7 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev,
|
||||
for_each_possible_cpu(i) {
|
||||
u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
|
||||
|
||||
s = per_cpu_ptr(p->stats64, i);
|
||||
s = per_cpu_ptr(dev->tstats, i);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&s->syncp);
|
||||
tx_packets = s->tx_packets;
|
||||
@ -1252,15 +1246,6 @@ static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
||||
return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
|
||||
}
|
||||
|
||||
static void dsa_slave_get_stats64(struct net_device *dev,
|
||||
struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
struct dsa_slave_priv *p = netdev_priv(dev);
|
||||
|
||||
netdev_stats_to_stats64(stats, &dev->stats);
|
||||
dev_fetch_sw_netstats(stats, p->stats64);
|
||||
}
|
||||
|
||||
static int dsa_slave_get_rxnfc(struct net_device *dev,
|
||||
struct ethtool_rxnfc *nfc, u32 *rule_locs)
|
||||
{
|
||||
@ -1636,7 +1621,7 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
|
||||
#endif
|
||||
.ndo_get_phys_port_name = dsa_slave_get_phys_port_name,
|
||||
.ndo_setup_tc = dsa_slave_setup_tc,
|
||||
.ndo_get_stats64 = dsa_slave_get_stats64,
|
||||
.ndo_get_stats64 = dev_get_tstats64,
|
||||
.ndo_get_port_parent_id = dsa_slave_get_port_parent_id,
|
||||
.ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid,
|
||||
.ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid,
|
||||
@ -1846,8 +1831,8 @@ int dsa_slave_create(struct dsa_port *port)
|
||||
slave_dev->vlan_features = master->vlan_features;
|
||||
|
||||
p = netdev_priv(slave_dev);
|
||||
p->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
||||
if (!p->stats64) {
|
||||
slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
||||
if (!slave_dev->tstats) {
|
||||
free_netdev(slave_dev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -1909,7 +1894,7 @@ out_phy:
|
||||
out_gcells:
|
||||
gro_cells_destroy(&p->gcells);
|
||||
out_free:
|
||||
free_percpu(p->stats64);
|
||||
free_percpu(slave_dev->tstats);
|
||||
free_netdev(slave_dev);
|
||||
port->slave = NULL;
|
||||
return ret;
|
||||
@ -1931,7 +1916,7 @@ void dsa_slave_destroy(struct net_device *slave_dev)
|
||||
dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
|
||||
phylink_destroy(dp->pl);
|
||||
gro_cells_destroy(&p->gcells);
|
||||
free_percpu(p->stats64);
|
||||
free_percpu(slave_dev->tstats);
|
||||
free_netdev(slave_dev);
|
||||
}
|
||||
|
||||
|
@ -920,7 +920,7 @@ static const struct net_device_ops ipgre_netdev_ops = {
|
||||
.ndo_start_xmit = ipgre_xmit,
|
||||
.ndo_do_ioctl = ip_tunnel_ioctl,
|
||||
.ndo_change_mtu = ip_tunnel_change_mtu,
|
||||
.ndo_get_stats64 = ip_tunnel_get_stats64,
|
||||
.ndo_get_stats64 = dev_get_tstats64,
|
||||
.ndo_get_iflink = ip_tunnel_get_iflink,
|
||||
.ndo_tunnel_ctl = ipgre_tunnel_ctl,
|
||||
};
|
||||
@ -1275,7 +1275,7 @@ static const struct net_device_ops gre_tap_netdev_ops = {
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_change_mtu = ip_tunnel_change_mtu,
|
||||
.ndo_get_stats64 = ip_tunnel_get_stats64,
|
||||
.ndo_get_stats64 = dev_get_tstats64,
|
||||
.ndo_get_iflink = ip_tunnel_get_iflink,
|
||||
.ndo_fill_metadata_dst = gre_fill_metadata_dst,
|
||||
};
|
||||
@ -1308,7 +1308,7 @@ static const struct net_device_ops erspan_netdev_ops = {
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_change_mtu = ip_tunnel_change_mtu,
|
||||
.ndo_get_stats64 = ip_tunnel_get_stats64,
|
||||
.ndo_get_stats64 = dev_get_tstats64,
|
||||
.ndo_get_iflink = ip_tunnel_get_iflink,
|
||||
.ndo_fill_metadata_dst = gre_fill_metadata_dst,
|
||||
};
|
||||
|
@ -429,15 +429,6 @@ int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
|
||||
}
|
||||
EXPORT_SYMBOL(skb_tunnel_check_pmtu);
|
||||
|
||||
/* Often modified stats are per cpu, other are shared (netdev->stats) */
|
||||
void ip_tunnel_get_stats64(struct net_device *dev,
|
||||
struct rtnl_link_stats64 *tot)
|
||||
{
|
||||
netdev_stats_to_stats64(tot, &dev->stats);
|
||||
dev_fetch_sw_netstats(tot, dev->tstats);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
|
||||
|
||||
static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = {
|
||||
[LWTUNNEL_IP_UNSPEC] = { .strict_start_type = LWTUNNEL_IP_OPTS },
|
||||
[LWTUNNEL_IP_ID] = { .type = NLA_U64 },
|
||||
|
@ -404,7 +404,7 @@ static const struct net_device_ops vti_netdev_ops = {
|
||||
.ndo_start_xmit = vti_tunnel_xmit,
|
||||
.ndo_do_ioctl = ip_tunnel_ioctl,
|
||||
.ndo_change_mtu = ip_tunnel_change_mtu,
|
||||
.ndo_get_stats64 = ip_tunnel_get_stats64,
|
||||
.ndo_get_stats64 = dev_get_tstats64,
|
||||
.ndo_get_iflink = ip_tunnel_get_iflink,
|
||||
.ndo_tunnel_ctl = vti_tunnel_ctl,
|
||||
};
|
||||
|
@ -347,7 +347,7 @@ static const struct net_device_ops ipip_netdev_ops = {
|
||||
.ndo_start_xmit = ipip_tunnel_xmit,
|
||||
.ndo_do_ioctl = ip_tunnel_ioctl,
|
||||
.ndo_change_mtu = ip_tunnel_change_mtu,
|
||||
.ndo_get_stats64 = ip_tunnel_get_stats64,
|
||||
.ndo_get_stats64 = dev_get_tstats64,
|
||||
.ndo_get_iflink = ip_tunnel_get_iflink,
|
||||
.ndo_tunnel_ctl = ipip_tunnel_ctl,
|
||||
};
|
||||
|
@ -1391,7 +1391,7 @@ static const struct net_device_ops ip6gre_netdev_ops = {
|
||||
.ndo_start_xmit = ip6gre_tunnel_xmit,
|
||||
.ndo_do_ioctl = ip6gre_tunnel_ioctl,
|
||||
.ndo_change_mtu = ip6_tnl_change_mtu,
|
||||
.ndo_get_stats64 = ip_tunnel_get_stats64,
|
||||
.ndo_get_stats64 = dev_get_tstats64,
|
||||
.ndo_get_iflink = ip6_tnl_get_iflink,
|
||||
};
|
||||
|
||||
@ -1828,7 +1828,7 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_change_mtu = ip6_tnl_change_mtu,
|
||||
.ndo_get_stats64 = ip_tunnel_get_stats64,
|
||||
.ndo_get_stats64 = dev_get_tstats64,
|
||||
.ndo_get_iflink = ip6_tnl_get_iflink,
|
||||
};
|
||||
|
||||
@ -1896,7 +1896,7 @@ static const struct net_device_ops ip6erspan_netdev_ops = {
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_change_mtu = ip6_tnl_change_mtu,
|
||||
.ndo_get_stats64 = ip_tunnel_get_stats64,
|
||||
.ndo_get_stats64 = dev_get_tstats64,
|
||||
.ndo_get_iflink = ip6_tnl_get_iflink,
|
||||
};
|
||||
|
||||
|
@ -94,36 +94,6 @@ static inline int ip6_tnl_mpls_supported(void)
|
||||
return IS_ENABLED(CONFIG_MPLS);
|
||||
}
|
||||
|
||||
static struct net_device_stats *ip6_get_stats(struct net_device *dev)
|
||||
{
|
||||
struct pcpu_sw_netstats tmp, sum = { 0 };
|
||||
int i;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
unsigned int start;
|
||||
const struct pcpu_sw_netstats *tstats =
|
||||
per_cpu_ptr(dev->tstats, i);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&tstats->syncp);
|
||||
tmp.rx_packets = tstats->rx_packets;
|
||||
tmp.rx_bytes = tstats->rx_bytes;
|
||||
tmp.tx_packets = tstats->tx_packets;
|
||||
tmp.tx_bytes = tstats->tx_bytes;
|
||||
} while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
|
||||
|
||||
sum.rx_packets += tmp.rx_packets;
|
||||
sum.rx_bytes += tmp.rx_bytes;
|
||||
sum.tx_packets += tmp.tx_packets;
|
||||
sum.tx_bytes += tmp.tx_bytes;
|
||||
}
|
||||
dev->stats.rx_packets = sum.rx_packets;
|
||||
dev->stats.rx_bytes = sum.rx_bytes;
|
||||
dev->stats.tx_packets = sum.tx_packets;
|
||||
dev->stats.tx_bytes = sum.tx_bytes;
|
||||
return &dev->stats;
|
||||
}
|
||||
|
||||
#define for_each_ip6_tunnel_rcu(start) \
|
||||
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
|
||||
|
||||
@ -1834,7 +1804,7 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
|
||||
.ndo_start_xmit = ip6_tnl_start_xmit,
|
||||
.ndo_do_ioctl = ip6_tnl_ioctl,
|
||||
.ndo_change_mtu = ip6_tnl_change_mtu,
|
||||
.ndo_get_stats = ip6_get_stats,
|
||||
.ndo_get_stats64 = dev_get_tstats64,
|
||||
.ndo_get_iflink = ip6_tnl_get_iflink,
|
||||
};
|
||||
|
||||
|
@ -890,7 +890,7 @@ static const struct net_device_ops vti6_netdev_ops = {
|
||||
.ndo_uninit = vti6_dev_uninit,
|
||||
.ndo_start_xmit = vti6_tnl_xmit,
|
||||
.ndo_do_ioctl = vti6_ioctl,
|
||||
.ndo_get_stats64 = ip_tunnel_get_stats64,
|
||||
.ndo_get_stats64 = dev_get_tstats64,
|
||||
.ndo_get_iflink = ip6_tnl_get_iflink,
|
||||
};
|
||||
|
||||
|
@ -1396,7 +1396,7 @@ static const struct net_device_ops ipip6_netdev_ops = {
|
||||
.ndo_uninit = ipip6_tunnel_uninit,
|
||||
.ndo_start_xmit = sit_tunnel_xmit,
|
||||
.ndo_do_ioctl = ipip6_tunnel_ioctl,
|
||||
.ndo_get_stats64 = ip_tunnel_get_stats64,
|
||||
.ndo_get_stats64 = dev_get_tstats64,
|
||||
.ndo_get_iflink = ip_tunnel_get_iflink,
|
||||
.ndo_tunnel_ctl = ipip6_tunnel_ctl,
|
||||
};
|
||||
|
Loading…
x
Reference in New Issue
Block a user