net: thunderbolt: Add tracepoints

These are useful when debugging various performance issues.

Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
Acked-by: Yehezkel Bernat <YehezkelShB@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Mika Westerberg 2023-01-11 08:26:33 +02:00 committed by Jakub Kicinski
parent 7b3502c159
commit f758652703
4 changed files with 176 additions and 1 deletions

View File

@ -1,3 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_USB4_NET) := thunderbolt_net.o
thunderbolt_net-objs := main.o
thunderbolt_net-objs := main.o trace.o
# Tracepoints need to know where to find trace.h
CFLAGS_trace.o := -I$(src)

View File

@ -23,6 +23,8 @@
#include <net/ip6_checksum.h>
#include "trace.h"
/* Protocol timeouts in ms */
#define TBNET_LOGIN_DELAY 4500
#define TBNET_LOGIN_TIMEOUT 500
@ -353,6 +355,8 @@ static void tbnet_free_buffers(struct tbnet_ring *ring)
size = TBNET_RX_PAGE_SIZE;
}
trace_tbnet_free_frame(i, tf->page, tf->frame.buffer_phy, dir);
if (tf->frame.buffer_phy)
dma_unmap_page(dma_dev, tf->frame.buffer_phy, size,
dir);
@ -526,6 +530,9 @@ static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers)
tf->frame.buffer_phy = dma_addr;
tf->dev = net->dev;
trace_tbnet_alloc_rx_frame(index, tf->page, dma_addr,
DMA_FROM_DEVICE);
tb_ring_rx(ring->ring, &tf->frame);
ring->prod++;
@ -602,6 +609,8 @@ static int tbnet_alloc_tx_buffers(struct tbnet *net)
tf->frame.callback = tbnet_tx_callback;
tf->frame.sof = TBIP_PDF_FRAME_START;
tf->frame.eof = TBIP_PDF_FRAME_END;
trace_tbnet_alloc_tx_frame(i, tf->page, dma_addr, DMA_TO_DEVICE);
}
ring->cons = 0;
@ -832,12 +841,16 @@ static int tbnet_poll(struct napi_struct *napi, int budget)
hdr = page_address(page);
if (!tbnet_check_frame(net, tf, hdr)) {
trace_tbnet_invalid_rx_ip_frame(hdr->frame_size,
hdr->frame_id, hdr->frame_index, hdr->frame_count);
__free_pages(page, TBNET_RX_PAGE_ORDER);
dev_kfree_skb_any(net->skb);
net->skb = NULL;
continue;
}
trace_tbnet_rx_ip_frame(hdr->frame_size, hdr->frame_id,
hdr->frame_index, hdr->frame_count);
frame_size = le32_to_cpu(hdr->frame_size);
skb = net->skb;
@ -871,6 +884,7 @@ static int tbnet_poll(struct napi_struct *napi, int budget)
if (last) {
skb->protocol = eth_type_trans(skb, net->dev);
trace_tbnet_rx_skb(skb);
napi_gro_receive(&net->napi, skb);
net->skb = NULL;
}
@ -990,6 +1004,8 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
for (i = 0; i < frame_count; i++) {
hdr = page_address(frames[i]->page);
hdr->frame_count = cpu_to_le32(frame_count);
trace_tbnet_tx_ip_frame(hdr->frame_size, hdr->frame_id,
hdr->frame_index, hdr->frame_count);
dma_sync_single_for_device(dma_dev,
frames[i]->frame.buffer_phy,
tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
@ -1054,6 +1070,8 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
len = le32_to_cpu(hdr->frame_size) - offset;
wsum = csum_partial(dest, len, wsum);
hdr->frame_count = cpu_to_le32(frame_count);
trace_tbnet_tx_ip_frame(hdr->frame_size, hdr->frame_id,
hdr->frame_index, hdr->frame_count);
offset = 0;
}
@ -1096,6 +1114,8 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
bool unmap = false;
void *dest;
trace_tbnet_tx_skb(skb);
nframes = DIV_ROUND_UP(data_len, TBNET_MAX_PAYLOAD_SIZE);
if (tbnet_available_buffers(&net->tx_ring) < nframes) {
netif_stop_queue(net->dev);
@ -1202,6 +1222,7 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
net->stats.tx_packets++;
net->stats.tx_bytes += skb->len;
trace_tbnet_consume_skb(skb);
dev_consume_skb_any(skb);
return NETDEV_TX_OK;

View File

@ -0,0 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Tracepoints for Thunderbolt/USB4 networking driver
*
* Copyright (C) 2023, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
#define CREATE_TRACE_POINTS
#include "trace.h"

View File

@ -0,0 +1,141 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Tracepoints for Thunderbolt/USB4 networking driver
*
* Copyright (C) 2023, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM thunderbolt_net
#if !defined(__TRACE_THUNDERBOLT_NET_H) || defined(TRACE_HEADER_MULTI_READ)
#define __TRACE_THUNDERBOLT_NET_H
#include <linux/dma-direction.h>
#include <linux/skbuff.h>
#include <linux/tracepoint.h>
#define DMA_DATA_DIRECTION_NAMES \
{ DMA_BIDIRECTIONAL, "DMA_BIDIRECTIONAL" }, \
{ DMA_TO_DEVICE, "DMA_TO_DEVICE" }, \
{ DMA_FROM_DEVICE, "DMA_FROM_DEVICE" }, \
{ DMA_NONE, "DMA_NONE" }
DECLARE_EVENT_CLASS(tbnet_frame,
TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
enum dma_data_direction dir),
TP_ARGS(index, page, phys, dir),
TP_STRUCT__entry(
__field(unsigned int, index)
__field(const void *, page)
__field(dma_addr_t, phys)
__field(enum dma_data_direction, dir)
),
TP_fast_assign(
__entry->index = index;
__entry->page = page;
__entry->phys = phys;
__entry->dir = dir;
),
TP_printk("index=%u page=%p phys=%pad dir=%s",
__entry->index, __entry->page, &__entry->phys,
__print_symbolic(__entry->dir, DMA_DATA_DIRECTION_NAMES))
);
DEFINE_EVENT(tbnet_frame, tbnet_alloc_rx_frame,
TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
enum dma_data_direction dir),
TP_ARGS(index, page, phys, dir)
);
DEFINE_EVENT(tbnet_frame, tbnet_alloc_tx_frame,
TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
enum dma_data_direction dir),
TP_ARGS(index, page, phys, dir)
);
DEFINE_EVENT(tbnet_frame, tbnet_free_frame,
TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
enum dma_data_direction dir),
TP_ARGS(index, page, phys, dir)
);
DECLARE_EVENT_CLASS(tbnet_ip_frame,
TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
TP_ARGS(size, id, index, count),
TP_STRUCT__entry(
__field(u32, size)
__field(u16, id)
__field(u16, index)
__field(u32, count)
),
TP_fast_assign(
__entry->size = le32_to_cpu(size);
__entry->id = le16_to_cpu(id);
__entry->index = le16_to_cpu(index);
__entry->count = le32_to_cpu(count);
),
TP_printk("id=%u size=%u index=%u count=%u",
__entry->id, __entry->size, __entry->index, __entry->count)
);
DEFINE_EVENT(tbnet_ip_frame, tbnet_rx_ip_frame,
TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
TP_ARGS(size, id, index, count)
);
DEFINE_EVENT(tbnet_ip_frame, tbnet_invalid_rx_ip_frame,
TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
TP_ARGS(size, id, index, count)
);
DEFINE_EVENT(tbnet_ip_frame, tbnet_tx_ip_frame,
TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
TP_ARGS(size, id, index, count)
);
DECLARE_EVENT_CLASS(tbnet_skb,
TP_PROTO(const struct sk_buff *skb),
TP_ARGS(skb),
TP_STRUCT__entry(
__field(const void *, addr)
__field(unsigned int, len)
__field(unsigned int, data_len)
__field(unsigned int, nr_frags)
),
TP_fast_assign(
__entry->addr = skb;
__entry->len = skb->len;
__entry->data_len = skb->data_len;
__entry->nr_frags = skb_shinfo(skb)->nr_frags;
),
TP_printk("skb=%p len=%u data_len=%u nr_frags=%u",
__entry->addr, __entry->len, __entry->data_len,
__entry->nr_frags)
);
DEFINE_EVENT(tbnet_skb, tbnet_rx_skb,
TP_PROTO(const struct sk_buff *skb),
TP_ARGS(skb)
);
DEFINE_EVENT(tbnet_skb, tbnet_tx_skb,
TP_PROTO(const struct sk_buff *skb),
TP_ARGS(skb)
);
DEFINE_EVENT(tbnet_skb, tbnet_consume_skb,
TP_PROTO(const struct sk_buff *skb),
TP_ARGS(skb)
);
#endif /* _TRACE_THUNDERBOLT_NET_H */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>