Merge branch 'mlx4-better-big-tcp-support'
Eric Dumazet says: ==================== mlx4: better BIG-TCP support mlx4 uses a bounce buffer in TX whenever the tx descriptors wrap around the right edge of the ring. Size of this bounce buffer was hard coded and can be increased if/when needed. ==================== Link: https://lore.kernel.org/r/20221207141237.2575012-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
ff36c447e2
@ -65,7 +65,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
|
||||
ring->size = size;
|
||||
ring->size_mask = size - 1;
|
||||
ring->sp_stride = stride;
|
||||
ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS;
|
||||
ring->full_size = ring->size - HEADROOM - MLX4_MAX_DESC_TXBBS;
|
||||
|
||||
tmp = size * sizeof(struct mlx4_en_tx_info);
|
||||
ring->tx_info = kvmalloc_node(tmp, GFP_KERNEL, node);
|
||||
@ -77,9 +77,11 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
|
||||
en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
|
||||
ring->tx_info, tmp);
|
||||
|
||||
ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node);
|
||||
ring->bounce_buf = kmalloc_node(MLX4_TX_BOUNCE_BUFFER_SIZE,
|
||||
GFP_KERNEL, node);
|
||||
if (!ring->bounce_buf) {
|
||||
ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
|
||||
ring->bounce_buf = kmalloc(MLX4_TX_BOUNCE_BUFFER_SIZE,
|
||||
GFP_KERNEL);
|
||||
if (!ring->bounce_buf) {
|
||||
err = -ENOMEM;
|
||||
goto err_info;
|
||||
@ -909,11 +911,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
/* Align descriptor to TXBB size */
|
||||
desc_size = ALIGN(real_size, TXBB_SIZE);
|
||||
nr_txbb = desc_size >> LOG_TXBB_SIZE;
|
||||
if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
|
||||
if (netif_msg_tx_err(priv))
|
||||
en_warn(priv, "Oversized header or SG list\n");
|
||||
goto tx_drop_count;
|
||||
}
|
||||
|
||||
bf_ok = ring->bf_enabled;
|
||||
if (skb_vlan_tag_present(skb)) {
|
||||
@ -941,6 +938,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
if (likely(index + nr_txbb <= ring->size))
|
||||
tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
|
||||
else {
|
||||
if (unlikely(nr_txbb > MLX4_MAX_DESC_TXBBS)) {
|
||||
if (netif_msg_tx_err(priv))
|
||||
en_warn(priv, "Oversized header or SG list\n");
|
||||
goto tx_drop_count;
|
||||
}
|
||||
tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
|
||||
bounce = true;
|
||||
bf_ok = false;
|
||||
|
@ -89,9 +89,19 @@
|
||||
#define MLX4_EN_FILTER_HASH_SHIFT 4
|
||||
#define MLX4_EN_FILTER_EXPIRY_QUOTA 60
|
||||
|
||||
/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
|
||||
#define MAX_DESC_SIZE 512
|
||||
#define MAX_DESC_TXBBS (MAX_DESC_SIZE / TXBB_SIZE)
|
||||
#define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg)
|
||||
#define DS_SIZE sizeof(struct mlx4_wqe_data_seg)
|
||||
|
||||
/* Maximal size of the bounce buffer:
|
||||
* 256 bytes for LSO headers.
|
||||
* CTRL_SIZE for control desc.
|
||||
* DS_SIZE if skb->head contains some payload.
|
||||
* MAX_SKB_FRAGS frags.
|
||||
*/
|
||||
#define MLX4_TX_BOUNCE_BUFFER_SIZE \
|
||||
ALIGN(256 + CTRL_SIZE + DS_SIZE + MAX_SKB_FRAGS * DS_SIZE, TXBB_SIZE)
|
||||
|
||||
#define MLX4_MAX_DESC_TXBBS (MLX4_TX_BOUNCE_BUFFER_SIZE / TXBB_SIZE)
|
||||
|
||||
/*
|
||||
* OS related constants and tunables
|
||||
@ -217,9 +227,7 @@ struct mlx4_en_tx_info {
|
||||
|
||||
|
||||
#define MLX4_EN_BIT_DESC_OWN 0x80000000
|
||||
#define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg)
|
||||
#define MLX4_EN_MEMTYPE_PAD 0x100
|
||||
#define DS_SIZE sizeof(struct mlx4_wqe_data_seg)
|
||||
|
||||
|
||||
struct mlx4_en_tx_desc {
|
||||
|
Loading…
x
Reference in New Issue
Block a user