Merge branch 'ravb-ethernet-driver-bugfixes'
Paul Barker says: ==================== ravb Ethernet driver bugfixes These patches fix bugs found during recent work on the ravb driver. Patches 1 & 2 affect the R-Car code paths so have been tested on an R-Car M3N Salvator-XS board - this is the only R-Car board I currently have access to. Patches 2, 3 & 4 affect the GbEth code paths so have been tested on RZ/G2L and RZ/G2UL SMARC EVK boards. Changes v2->v3: * Incorporate feedback from Niklas and add Reviewed-by tag to patch "net: ravb: Count packets instead of descriptors in R-Car RX path". Changes v1->v2: * Fixed typos in commit message of patch "net: ravb: Allow RX loop to move past DMA mapping errors". * Added Sergey's Reviewed-by tags. * Expanded Cc list as Patchwork complained that I had missed people. * Trimmed the call trace in accordance with the docs [1] in patch "net: ravb: Fix GbEth jumbo packet RX checksum handling". [1]: https://docs.kernel.org/process/submitting-patches.html#backtraces-in-commit-messages ==================== Link: https://lore.kernel.org/r/20240416120254.2620-1-paul.barker.ct@bp.renesas.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
commit
d10a7f551e
@ -769,25 +769,28 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
|
||||
dma_addr_t dma_addr;
|
||||
int rx_packets = 0;
|
||||
u8 desc_status;
|
||||
u16 pkt_len;
|
||||
u16 desc_len;
|
||||
u8 die_dt;
|
||||
int entry;
|
||||
int limit;
|
||||
int i;
|
||||
|
||||
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
|
||||
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
|
||||
stats = &priv->stats[q];
|
||||
|
||||
desc = &priv->rx_ring[q].desc[entry];
|
||||
for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
|
||||
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
|
||||
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
|
||||
desc = &priv->rx_ring[q].desc[entry];
|
||||
if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
|
||||
break;
|
||||
|
||||
/* Descriptor type must be checked before all other reads */
|
||||
dma_rmb();
|
||||
desc_status = desc->msc;
|
||||
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
|
||||
desc_len = le16_to_cpu(desc->ds_cc) & RX_DS;
|
||||
|
||||
/* We use 0-byte descriptors to mark the DMA mapping errors */
|
||||
if (!pkt_len)
|
||||
if (!desc_len)
|
||||
continue;
|
||||
|
||||
if (desc_status & MSC_MC)
|
||||
@ -808,25 +811,25 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
|
||||
switch (die_dt) {
|
||||
case DT_FSINGLE:
|
||||
skb = ravb_get_skb_gbeth(ndev, entry, desc);
|
||||
skb_put(skb, pkt_len);
|
||||
skb_put(skb, desc_len);
|
||||
skb->protocol = eth_type_trans(skb, ndev);
|
||||
if (ndev->features & NETIF_F_RXCSUM)
|
||||
ravb_rx_csum_gbeth(skb);
|
||||
napi_gro_receive(&priv->napi[q], skb);
|
||||
rx_packets++;
|
||||
stats->rx_bytes += pkt_len;
|
||||
stats->rx_bytes += desc_len;
|
||||
break;
|
||||
case DT_FSTART:
|
||||
priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
|
||||
skb_put(priv->rx_1st_skb, pkt_len);
|
||||
skb_put(priv->rx_1st_skb, desc_len);
|
||||
break;
|
||||
case DT_FMID:
|
||||
skb = ravb_get_skb_gbeth(ndev, entry, desc);
|
||||
skb_copy_to_linear_data_offset(priv->rx_1st_skb,
|
||||
priv->rx_1st_skb->len,
|
||||
skb->data,
|
||||
pkt_len);
|
||||
skb_put(priv->rx_1st_skb, pkt_len);
|
||||
desc_len);
|
||||
skb_put(priv->rx_1st_skb, desc_len);
|
||||
dev_kfree_skb(skb);
|
||||
break;
|
||||
case DT_FEND:
|
||||
@ -834,23 +837,20 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
|
||||
skb_copy_to_linear_data_offset(priv->rx_1st_skb,
|
||||
priv->rx_1st_skb->len,
|
||||
skb->data,
|
||||
pkt_len);
|
||||
skb_put(priv->rx_1st_skb, pkt_len);
|
||||
desc_len);
|
||||
skb_put(priv->rx_1st_skb, desc_len);
|
||||
dev_kfree_skb(skb);
|
||||
priv->rx_1st_skb->protocol =
|
||||
eth_type_trans(priv->rx_1st_skb, ndev);
|
||||
if (ndev->features & NETIF_F_RXCSUM)
|
||||
ravb_rx_csum_gbeth(skb);
|
||||
ravb_rx_csum_gbeth(priv->rx_1st_skb);
|
||||
stats->rx_bytes += priv->rx_1st_skb->len;
|
||||
napi_gro_receive(&priv->napi[q],
|
||||
priv->rx_1st_skb);
|
||||
rx_packets++;
|
||||
stats->rx_bytes += pkt_len;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
|
||||
desc = &priv->rx_ring[q].desc[entry];
|
||||
}
|
||||
|
||||
/* Refill the RX ring buffers. */
|
||||
@ -891,30 +891,29 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
|
||||
{
|
||||
struct ravb_private *priv = netdev_priv(ndev);
|
||||
const struct ravb_hw_info *info = priv->info;
|
||||
int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
|
||||
int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
|
||||
priv->cur_rx[q];
|
||||
struct net_device_stats *stats = &priv->stats[q];
|
||||
struct ravb_ex_rx_desc *desc;
|
||||
unsigned int limit, i;
|
||||
struct sk_buff *skb;
|
||||
dma_addr_t dma_addr;
|
||||
struct timespec64 ts;
|
||||
int rx_packets = 0;
|
||||
u8 desc_status;
|
||||
u16 pkt_len;
|
||||
int limit;
|
||||
int entry;
|
||||
|
||||
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
|
||||
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
|
||||
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
|
||||
desc = &priv->rx_ring[q].ex_desc[entry];
|
||||
if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
|
||||
break;
|
||||
|
||||
boguscnt = min(boguscnt, *quota);
|
||||
limit = boguscnt;
|
||||
desc = &priv->rx_ring[q].ex_desc[entry];
|
||||
while (desc->die_dt != DT_FEMPTY) {
|
||||
/* Descriptor type must be checked before all other reads */
|
||||
dma_rmb();
|
||||
desc_status = desc->msc;
|
||||
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
|
||||
|
||||
if (--boguscnt < 0)
|
||||
break;
|
||||
|
||||
/* We use 0-byte descriptors to mark the DMA mapping errors */
|
||||
if (!pkt_len)
|
||||
continue;
|
||||
@ -960,12 +959,9 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
|
||||
if (ndev->features & NETIF_F_RXCSUM)
|
||||
ravb_rx_csum(skb);
|
||||
napi_gro_receive(&priv->napi[q], skb);
|
||||
stats->rx_packets++;
|
||||
rx_packets++;
|
||||
stats->rx_bytes += pkt_len;
|
||||
}
|
||||
|
||||
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
|
||||
desc = &priv->rx_ring[q].ex_desc[entry];
|
||||
}
|
||||
|
||||
/* Refill the RX ring buffers. */
|
||||
@ -995,9 +991,9 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
|
||||
desc->die_dt = DT_FEMPTY;
|
||||
}
|
||||
|
||||
*quota -= limit - (++boguscnt);
|
||||
|
||||
return boguscnt <= 0;
|
||||
stats->rx_packets += rx_packets;
|
||||
*quota -= rx_packets;
|
||||
return *quota == 0;
|
||||
}
|
||||
|
||||
/* Packet receive function for Ethernet AVB */
|
||||
|
Loading…
x
Reference in New Issue
Block a user