gve: Remove dependency on 4k page size.

Prior to this change, gve crashes when attempting to run in kernels with
page sizes other than 4k. This change removes unnecessary references to
PAGE_SIZE and replaces them with more meaningful constants.

Signed-off-by: Jordan Kimbrough <jrkim@google.com>
Signed-off-by: John Fraker <jfraker@google.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Link: https://lore.kernel.org/r/20231128002648.320892-6-jfraker@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
John Fraker 2023-11-27 16:26:48 -08:00 committed by Jakub Kicinski
parent 513072fb4b
commit da7d4b42ca
5 changed files with 11 additions and 10 deletions

View File

@ -49,7 +49,9 @@
/* PTYPEs are always 10 bits. */
#define GVE_NUM_PTYPES 1024
#define GVE_RX_BUFFER_SIZE_DQO 2048
#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
#define GVE_XDP_ACTIONS 5

View File

@ -519,7 +519,7 @@ static int gve_set_tunable(struct net_device *netdev,
case ETHTOOL_RX_COPYBREAK:
{
u32 max_copybreak = gve_is_gqi(priv) ?
(PAGE_SIZE / 2) : priv->data_buffer_size_dqo;
GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo;
len = *(u32 *)value;
if (len > max_copybreak)

View File

@ -1328,7 +1328,7 @@ static int gve_open(struct net_device *dev)
/* Hard code this for now. This may be tuned in the future for
* performance.
*/
priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO;
priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
}
err = gve_create_rings(priv);
if (err)
@ -1664,7 +1664,7 @@ static int verify_xdp_configuration(struct net_device *dev)
return -EOPNOTSUPP;
}
if (dev->mtu > (PAGE_SIZE / 2) - sizeof(struct ethhdr) - GVE_RX_PAD) {
if (dev->mtu > GVE_DEFAULT_RX_BUFFER_SIZE - sizeof(struct ethhdr) - GVE_RX_PAD) {
netdev_warn(dev, "XDP is not supported for mtu %d.\n",
dev->mtu);
return -EOPNOTSUPP;

View File

@ -283,7 +283,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
/* Allocating half-page buffers allows page-flipping which is faster
* than copying or allocating new pages.
*/
rx->packet_buffer_size = PAGE_SIZE / 2;
rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_rx_ctx_clear(&rx->ctx);
gve_rx_add_to_block(priv, idx);
@ -399,10 +399,10 @@ static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *slot_addr)
{
const __be64 offset = cpu_to_be64(PAGE_SIZE / 2);
const __be64 offset = cpu_to_be64(GVE_DEFAULT_RX_BUFFER_OFFSET);
/* "flip" to other packet buffer on this page */
page_info->page_offset ^= PAGE_SIZE / 2;
page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET;
*(slot_addr) ^= offset;
}
@ -507,8 +507,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
return NULL;
gve_dec_pagecnt_bias(copy_page_info);
copy_page_info->page_offset += rx->packet_buffer_size;
copy_page_info->page_offset &= (PAGE_SIZE - 1);
copy_page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET;
if (copy_page_info->can_flip) {
/* We have used both halves of this copy page, it

View File

@ -819,7 +819,7 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
return 0;
}
#define GVE_TX_START_THRESH PAGE_SIZE
#define GVE_TX_START_THRESH 4096
static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
u32 to_do, bool try_to_wake)