net: thunderbolt: Add tracepoints
These are useful when debugging various performance issues. Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Acked-by: Yehezkel Bernat <YehezkelShB@gmail.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
7b3502c159
commit
f758652703
@ -1,3 +1,6 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
obj-$(CONFIG_USB4_NET) := thunderbolt_net.o
|
obj-$(CONFIG_USB4_NET) := thunderbolt_net.o
|
||||||
thunderbolt_net-objs := main.o
|
thunderbolt_net-objs := main.o trace.o
|
||||||
|
|
||||||
|
# Tracepoints need to know where to find trace.h
|
||||||
|
CFLAGS_trace.o := -I$(src)
|
||||||
|
@ -23,6 +23,8 @@
|
|||||||
|
|
||||||
#include <net/ip6_checksum.h>
|
#include <net/ip6_checksum.h>
|
||||||
|
|
||||||
|
#include "trace.h"
|
||||||
|
|
||||||
/* Protocol timeouts in ms */
|
/* Protocol timeouts in ms */
|
||||||
#define TBNET_LOGIN_DELAY 4500
|
#define TBNET_LOGIN_DELAY 4500
|
||||||
#define TBNET_LOGIN_TIMEOUT 500
|
#define TBNET_LOGIN_TIMEOUT 500
|
||||||
@ -353,6 +355,8 @@ static void tbnet_free_buffers(struct tbnet_ring *ring)
|
|||||||
size = TBNET_RX_PAGE_SIZE;
|
size = TBNET_RX_PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
trace_tbnet_free_frame(i, tf->page, tf->frame.buffer_phy, dir);
|
||||||
|
|
||||||
if (tf->frame.buffer_phy)
|
if (tf->frame.buffer_phy)
|
||||||
dma_unmap_page(dma_dev, tf->frame.buffer_phy, size,
|
dma_unmap_page(dma_dev, tf->frame.buffer_phy, size,
|
||||||
dir);
|
dir);
|
||||||
@ -526,6 +530,9 @@ static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers)
|
|||||||
tf->frame.buffer_phy = dma_addr;
|
tf->frame.buffer_phy = dma_addr;
|
||||||
tf->dev = net->dev;
|
tf->dev = net->dev;
|
||||||
|
|
||||||
|
trace_tbnet_alloc_rx_frame(index, tf->page, dma_addr,
|
||||||
|
DMA_FROM_DEVICE);
|
||||||
|
|
||||||
tb_ring_rx(ring->ring, &tf->frame);
|
tb_ring_rx(ring->ring, &tf->frame);
|
||||||
|
|
||||||
ring->prod++;
|
ring->prod++;
|
||||||
@ -602,6 +609,8 @@ static int tbnet_alloc_tx_buffers(struct tbnet *net)
|
|||||||
tf->frame.callback = tbnet_tx_callback;
|
tf->frame.callback = tbnet_tx_callback;
|
||||||
tf->frame.sof = TBIP_PDF_FRAME_START;
|
tf->frame.sof = TBIP_PDF_FRAME_START;
|
||||||
tf->frame.eof = TBIP_PDF_FRAME_END;
|
tf->frame.eof = TBIP_PDF_FRAME_END;
|
||||||
|
|
||||||
|
trace_tbnet_alloc_tx_frame(i, tf->page, dma_addr, DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
ring->cons = 0;
|
ring->cons = 0;
|
||||||
@ -832,12 +841,16 @@ static int tbnet_poll(struct napi_struct *napi, int budget)
|
|||||||
|
|
||||||
hdr = page_address(page);
|
hdr = page_address(page);
|
||||||
if (!tbnet_check_frame(net, tf, hdr)) {
|
if (!tbnet_check_frame(net, tf, hdr)) {
|
||||||
|
trace_tbnet_invalid_rx_ip_frame(hdr->frame_size,
|
||||||
|
hdr->frame_id, hdr->frame_index, hdr->frame_count);
|
||||||
__free_pages(page, TBNET_RX_PAGE_ORDER);
|
__free_pages(page, TBNET_RX_PAGE_ORDER);
|
||||||
dev_kfree_skb_any(net->skb);
|
dev_kfree_skb_any(net->skb);
|
||||||
net->skb = NULL;
|
net->skb = NULL;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
trace_tbnet_rx_ip_frame(hdr->frame_size, hdr->frame_id,
|
||||||
|
hdr->frame_index, hdr->frame_count);
|
||||||
frame_size = le32_to_cpu(hdr->frame_size);
|
frame_size = le32_to_cpu(hdr->frame_size);
|
||||||
|
|
||||||
skb = net->skb;
|
skb = net->skb;
|
||||||
@ -871,6 +884,7 @@ static int tbnet_poll(struct napi_struct *napi, int budget)
|
|||||||
|
|
||||||
if (last) {
|
if (last) {
|
||||||
skb->protocol = eth_type_trans(skb, net->dev);
|
skb->protocol = eth_type_trans(skb, net->dev);
|
||||||
|
trace_tbnet_rx_skb(skb);
|
||||||
napi_gro_receive(&net->napi, skb);
|
napi_gro_receive(&net->napi, skb);
|
||||||
net->skb = NULL;
|
net->skb = NULL;
|
||||||
}
|
}
|
||||||
@ -990,6 +1004,8 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
|
|||||||
for (i = 0; i < frame_count; i++) {
|
for (i = 0; i < frame_count; i++) {
|
||||||
hdr = page_address(frames[i]->page);
|
hdr = page_address(frames[i]->page);
|
||||||
hdr->frame_count = cpu_to_le32(frame_count);
|
hdr->frame_count = cpu_to_le32(frame_count);
|
||||||
|
trace_tbnet_tx_ip_frame(hdr->frame_size, hdr->frame_id,
|
||||||
|
hdr->frame_index, hdr->frame_count);
|
||||||
dma_sync_single_for_device(dma_dev,
|
dma_sync_single_for_device(dma_dev,
|
||||||
frames[i]->frame.buffer_phy,
|
frames[i]->frame.buffer_phy,
|
||||||
tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
|
tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
|
||||||
@ -1054,6 +1070,8 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
|
|||||||
len = le32_to_cpu(hdr->frame_size) - offset;
|
len = le32_to_cpu(hdr->frame_size) - offset;
|
||||||
wsum = csum_partial(dest, len, wsum);
|
wsum = csum_partial(dest, len, wsum);
|
||||||
hdr->frame_count = cpu_to_le32(frame_count);
|
hdr->frame_count = cpu_to_le32(frame_count);
|
||||||
|
trace_tbnet_tx_ip_frame(hdr->frame_size, hdr->frame_id,
|
||||||
|
hdr->frame_index, hdr->frame_count);
|
||||||
|
|
||||||
offset = 0;
|
offset = 0;
|
||||||
}
|
}
|
||||||
@ -1096,6 +1114,8 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
|
|||||||
bool unmap = false;
|
bool unmap = false;
|
||||||
void *dest;
|
void *dest;
|
||||||
|
|
||||||
|
trace_tbnet_tx_skb(skb);
|
||||||
|
|
||||||
nframes = DIV_ROUND_UP(data_len, TBNET_MAX_PAYLOAD_SIZE);
|
nframes = DIV_ROUND_UP(data_len, TBNET_MAX_PAYLOAD_SIZE);
|
||||||
if (tbnet_available_buffers(&net->tx_ring) < nframes) {
|
if (tbnet_available_buffers(&net->tx_ring) < nframes) {
|
||||||
netif_stop_queue(net->dev);
|
netif_stop_queue(net->dev);
|
||||||
@ -1202,6 +1222,7 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
|
|||||||
net->stats.tx_packets++;
|
net->stats.tx_packets++;
|
||||||
net->stats.tx_bytes += skb->len;
|
net->stats.tx_bytes += skb->len;
|
||||||
|
|
||||||
|
trace_tbnet_consume_skb(skb);
|
||||||
dev_consume_skb_any(skb);
|
dev_consume_skb_any(skb);
|
||||||
|
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
|
10
drivers/net/thunderbolt/trace.c
Normal file
10
drivers/net/thunderbolt/trace.c
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
/*
|
||||||
|
* Tracepoints for Thunderbolt/USB4 networking driver
|
||||||
|
*
|
||||||
|
* Copyright (C) 2023, Intel Corporation
|
||||||
|
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define CREATE_TRACE_POINTS
|
||||||
|
#include "trace.h"
|
141
drivers/net/thunderbolt/trace.h
Normal file
141
drivers/net/thunderbolt/trace.h
Normal file
@ -0,0 +1,141 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
/*
|
||||||
|
* Tracepoints for Thunderbolt/USB4 networking driver
|
||||||
|
*
|
||||||
|
* Copyright (C) 2023, Intel Corporation
|
||||||
|
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#undef TRACE_SYSTEM
|
||||||
|
#define TRACE_SYSTEM thunderbolt_net
|
||||||
|
|
||||||
|
#if !defined(__TRACE_THUNDERBOLT_NET_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||||
|
#define __TRACE_THUNDERBOLT_NET_H
|
||||||
|
|
||||||
|
#include <linux/dma-direction.h>
|
||||||
|
#include <linux/skbuff.h>
|
||||||
|
#include <linux/tracepoint.h>
|
||||||
|
|
||||||
|
#define DMA_DATA_DIRECTION_NAMES \
|
||||||
|
{ DMA_BIDIRECTIONAL, "DMA_BIDIRECTIONAL" }, \
|
||||||
|
{ DMA_TO_DEVICE, "DMA_TO_DEVICE" }, \
|
||||||
|
{ DMA_FROM_DEVICE, "DMA_FROM_DEVICE" }, \
|
||||||
|
{ DMA_NONE, "DMA_NONE" }
|
||||||
|
|
||||||
|
DECLARE_EVENT_CLASS(tbnet_frame,
|
||||||
|
TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
|
||||||
|
enum dma_data_direction dir),
|
||||||
|
TP_ARGS(index, page, phys, dir),
|
||||||
|
TP_STRUCT__entry(
|
||||||
|
__field(unsigned int, index)
|
||||||
|
__field(const void *, page)
|
||||||
|
__field(dma_addr_t, phys)
|
||||||
|
__field(enum dma_data_direction, dir)
|
||||||
|
),
|
||||||
|
TP_fast_assign(
|
||||||
|
__entry->index = index;
|
||||||
|
__entry->page = page;
|
||||||
|
__entry->phys = phys;
|
||||||
|
__entry->dir = dir;
|
||||||
|
),
|
||||||
|
TP_printk("index=%u page=%p phys=%pad dir=%s",
|
||||||
|
__entry->index, __entry->page, &__entry->phys,
|
||||||
|
__print_symbolic(__entry->dir, DMA_DATA_DIRECTION_NAMES))
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(tbnet_frame, tbnet_alloc_rx_frame,
|
||||||
|
TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
|
||||||
|
enum dma_data_direction dir),
|
||||||
|
TP_ARGS(index, page, phys, dir)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(tbnet_frame, tbnet_alloc_tx_frame,
|
||||||
|
TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
|
||||||
|
enum dma_data_direction dir),
|
||||||
|
TP_ARGS(index, page, phys, dir)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(tbnet_frame, tbnet_free_frame,
|
||||||
|
TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
|
||||||
|
enum dma_data_direction dir),
|
||||||
|
TP_ARGS(index, page, phys, dir)
|
||||||
|
);
|
||||||
|
|
||||||
|
DECLARE_EVENT_CLASS(tbnet_ip_frame,
|
||||||
|
TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
|
||||||
|
TP_ARGS(size, id, index, count),
|
||||||
|
TP_STRUCT__entry(
|
||||||
|
__field(u32, size)
|
||||||
|
__field(u16, id)
|
||||||
|
__field(u16, index)
|
||||||
|
__field(u32, count)
|
||||||
|
),
|
||||||
|
TP_fast_assign(
|
||||||
|
__entry->size = le32_to_cpu(size);
|
||||||
|
__entry->id = le16_to_cpu(id);
|
||||||
|
__entry->index = le16_to_cpu(index);
|
||||||
|
__entry->count = le32_to_cpu(count);
|
||||||
|
),
|
||||||
|
TP_printk("id=%u size=%u index=%u count=%u",
|
||||||
|
__entry->id, __entry->size, __entry->index, __entry->count)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(tbnet_ip_frame, tbnet_rx_ip_frame,
|
||||||
|
TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
|
||||||
|
TP_ARGS(size, id, index, count)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(tbnet_ip_frame, tbnet_invalid_rx_ip_frame,
|
||||||
|
TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
|
||||||
|
TP_ARGS(size, id, index, count)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(tbnet_ip_frame, tbnet_tx_ip_frame,
|
||||||
|
TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
|
||||||
|
TP_ARGS(size, id, index, count)
|
||||||
|
);
|
||||||
|
|
||||||
|
DECLARE_EVENT_CLASS(tbnet_skb,
|
||||||
|
TP_PROTO(const struct sk_buff *skb),
|
||||||
|
TP_ARGS(skb),
|
||||||
|
TP_STRUCT__entry(
|
||||||
|
__field(const void *, addr)
|
||||||
|
__field(unsigned int, len)
|
||||||
|
__field(unsigned int, data_len)
|
||||||
|
__field(unsigned int, nr_frags)
|
||||||
|
),
|
||||||
|
TP_fast_assign(
|
||||||
|
__entry->addr = skb;
|
||||||
|
__entry->len = skb->len;
|
||||||
|
__entry->data_len = skb->data_len;
|
||||||
|
__entry->nr_frags = skb_shinfo(skb)->nr_frags;
|
||||||
|
),
|
||||||
|
TP_printk("skb=%p len=%u data_len=%u nr_frags=%u",
|
||||||
|
__entry->addr, __entry->len, __entry->data_len,
|
||||||
|
__entry->nr_frags)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(tbnet_skb, tbnet_rx_skb,
|
||||||
|
TP_PROTO(const struct sk_buff *skb),
|
||||||
|
TP_ARGS(skb)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(tbnet_skb, tbnet_tx_skb,
|
||||||
|
TP_PROTO(const struct sk_buff *skb),
|
||||||
|
TP_ARGS(skb)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(tbnet_skb, tbnet_consume_skb,
|
||||||
|
TP_PROTO(const struct sk_buff *skb),
|
||||||
|
TP_ARGS(skb)
|
||||||
|
);
|
||||||
|
|
||||||
|
#endif /* _TRACE_THUNDERBOLT_NET_H */
|
||||||
|
|
||||||
|
#undef TRACE_INCLUDE_PATH
|
||||||
|
#define TRACE_INCLUDE_PATH .
|
||||||
|
|
||||||
|
#undef TRACE_INCLUDE_FILE
|
||||||
|
#define TRACE_INCLUDE_FILE trace
|
||||||
|
|
||||||
|
#include <trace/define_trace.h>
|
Loading…
Reference in New Issue
Block a user