Merge branch 'ethernet-fixes-for-stmmac-driver'
Joakim Zhang says: ==================== ethernet: fixes for stmmac driver Fixes for stmmac driver. --- ChangeLogs: V1->V2: * subject prefix: ethernet: stmmac: -> net: stmmac: * use dma_addr_t instead of unsigned int for physical address * use cpu_to_le32() V2->V3: * fix the build issue pointed out by kbuild bot. * add error handling for stmmac_reinit_rx_buffers() function. V3->V4: * remove patch (net: stmmac: remove redundant null check for ptp clock), reviewer thinks it should target net-next. V4->V5: * use %pad format to print dma_addr_t. * extend dwmac4_display_ring() to support all descriptor types. * while() -> do-while() ==================== Link: https://lore.kernel.org/r/20210225090114.17562-1-qiangqing.zhang@nxp.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
7ae845d767
@ -402,19 +402,53 @@ static void dwmac4_rd_set_tx_ic(struct dma_desc *p)
|
||||
p->des2 |= cpu_to_le32(TDES2_INTERRUPT_ON_COMPLETION);
|
||||
}
|
||||
|
||||
static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
|
||||
static void dwmac4_display_ring(void *head, unsigned int size, bool rx,
|
||||
dma_addr_t dma_rx_phy, unsigned int desc_size)
|
||||
{
|
||||
struct dma_desc *p = (struct dma_desc *)head;
|
||||
dma_addr_t dma_addr;
|
||||
int i;
|
||||
|
||||
pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
|
||||
i, (unsigned int)virt_to_phys(p),
|
||||
le32_to_cpu(p->des0), le32_to_cpu(p->des1),
|
||||
le32_to_cpu(p->des2), le32_to_cpu(p->des3));
|
||||
p++;
|
||||
if (desc_size == sizeof(struct dma_desc)) {
|
||||
struct dma_desc *p = (struct dma_desc *)head;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
dma_addr = dma_rx_phy + i * sizeof(*p);
|
||||
pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
|
||||
i, &dma_addr,
|
||||
le32_to_cpu(p->des0), le32_to_cpu(p->des1),
|
||||
le32_to_cpu(p->des2), le32_to_cpu(p->des3));
|
||||
p++;
|
||||
}
|
||||
} else if (desc_size == sizeof(struct dma_extended_desc)) {
|
||||
struct dma_extended_desc *extp = (struct dma_extended_desc *)head;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
dma_addr = dma_rx_phy + i * sizeof(*extp);
|
||||
pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
|
||||
i, &dma_addr,
|
||||
le32_to_cpu(extp->basic.des0), le32_to_cpu(extp->basic.des1),
|
||||
le32_to_cpu(extp->basic.des2), le32_to_cpu(extp->basic.des3),
|
||||
le32_to_cpu(extp->des4), le32_to_cpu(extp->des5),
|
||||
le32_to_cpu(extp->des6), le32_to_cpu(extp->des7));
|
||||
extp++;
|
||||
}
|
||||
} else if (desc_size == sizeof(struct dma_edesc)) {
|
||||
struct dma_edesc *ep = (struct dma_edesc *)head;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
dma_addr = dma_rx_phy + i * sizeof(*ep);
|
||||
pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
|
||||
i, &dma_addr,
|
||||
le32_to_cpu(ep->des4), le32_to_cpu(ep->des5),
|
||||
le32_to_cpu(ep->des6), le32_to_cpu(ep->des7),
|
||||
le32_to_cpu(ep->basic.des0), le32_to_cpu(ep->basic.des1),
|
||||
le32_to_cpu(ep->basic.des2), le32_to_cpu(ep->basic.des3));
|
||||
ep++;
|
||||
}
|
||||
} else {
|
||||
pr_err("unsupported descriptor!");
|
||||
}
|
||||
}
|
||||
|
||||
@ -499,10 +533,15 @@ static void dwmac4_get_rx_header_len(struct dma_desc *p, unsigned int *len)
|
||||
*len = le32_to_cpu(p->des2) & RDES2_HL;
|
||||
}
|
||||
|
||||
static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr)
|
||||
static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr, bool buf2_valid)
|
||||
{
|
||||
p->des2 = cpu_to_le32(lower_32_bits(addr));
|
||||
p->des3 = cpu_to_le32(upper_32_bits(addr) | RDES3_BUFFER2_VALID_ADDR);
|
||||
p->des3 = cpu_to_le32(upper_32_bits(addr));
|
||||
|
||||
if (buf2_valid)
|
||||
p->des3 |= cpu_to_le32(RDES3_BUFFER2_VALID_ADDR);
|
||||
else
|
||||
p->des3 &= cpu_to_le32(~RDES3_BUFFER2_VALID_ADDR);
|
||||
}
|
||||
|
||||
static void dwmac4_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec)
|
||||
|
@ -53,10 +53,6 @@ void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan)
|
||||
|
||||
value &= ~DMA_CONTROL_ST;
|
||||
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
|
||||
|
||||
value = readl(ioaddr + GMAC_CONFIG);
|
||||
value &= ~GMAC_CONFIG_TE;
|
||||
writel(value, ioaddr + GMAC_CONFIG);
|
||||
}
|
||||
|
||||
void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan)
|
||||
|
@ -292,7 +292,7 @@ static void dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len)
|
||||
*len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL;
|
||||
}
|
||||
|
||||
static void dwxgmac2_set_sec_addr(struct dma_desc *p, dma_addr_t addr)
|
||||
static void dwxgmac2_set_sec_addr(struct dma_desc *p, dma_addr_t addr, bool is_valid)
|
||||
{
|
||||
p->des2 = cpu_to_le32(lower_32_bits(addr));
|
||||
p->des3 = cpu_to_le32(upper_32_bits(addr));
|
||||
|
@ -417,19 +417,22 @@ static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc,
|
||||
}
|
||||
}
|
||||
|
||||
static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
|
||||
static void enh_desc_display_ring(void *head, unsigned int size, bool rx,
|
||||
dma_addr_t dma_rx_phy, unsigned int desc_size)
|
||||
{
|
||||
struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
|
||||
dma_addr_t dma_addr;
|
||||
int i;
|
||||
|
||||
pr_info("Extended %s descriptor ring:\n", rx ? "RX" : "TX");
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
u64 x;
|
||||
dma_addr = dma_rx_phy + i * sizeof(*ep);
|
||||
|
||||
x = *(u64 *)ep;
|
||||
pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
|
||||
i, (unsigned int)virt_to_phys(ep),
|
||||
pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
|
||||
i, &dma_addr,
|
||||
(unsigned int)x, (unsigned int)(x >> 32),
|
||||
ep->basic.des2, ep->basic.des3);
|
||||
ep++;
|
||||
|
@ -78,7 +78,8 @@ struct stmmac_desc_ops {
|
||||
/* get rx timestamp status */
|
||||
int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats);
|
||||
/* Display ring */
|
||||
void (*display_ring)(void *head, unsigned int size, bool rx);
|
||||
void (*display_ring)(void *head, unsigned int size, bool rx,
|
||||
dma_addr_t dma_rx_phy, unsigned int desc_size);
|
||||
/* set MSS via context descriptor */
|
||||
void (*set_mss)(struct dma_desc *p, unsigned int mss);
|
||||
/* get descriptor skbuff address */
|
||||
@ -91,7 +92,7 @@ struct stmmac_desc_ops {
|
||||
int (*get_rx_hash)(struct dma_desc *p, u32 *hash,
|
||||
enum pkt_hash_types *type);
|
||||
void (*get_rx_header_len)(struct dma_desc *p, unsigned int *len);
|
||||
void (*set_sec_addr)(struct dma_desc *p, dma_addr_t addr);
|
||||
void (*set_sec_addr)(struct dma_desc *p, dma_addr_t addr, bool buf2_valid);
|
||||
void (*set_sarc)(struct dma_desc *p, u32 sarc_type);
|
||||
void (*set_vlan_tag)(struct dma_desc *p, u16 tag, u16 inner_tag,
|
||||
u32 inner_type);
|
||||
|
@ -269,19 +269,22 @@ static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void ndesc_display_ring(void *head, unsigned int size, bool rx)
|
||||
static void ndesc_display_ring(void *head, unsigned int size, bool rx,
|
||||
dma_addr_t dma_rx_phy, unsigned int desc_size)
|
||||
{
|
||||
struct dma_desc *p = (struct dma_desc *)head;
|
||||
dma_addr_t dma_addr;
|
||||
int i;
|
||||
|
||||
pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
u64 x;
|
||||
dma_addr = dma_rx_phy + i * sizeof(*p);
|
||||
|
||||
x = *(u64 *)p;
|
||||
pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
|
||||
i, (unsigned int)virt_to_phys(p),
|
||||
pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x",
|
||||
i, &dma_addr,
|
||||
(unsigned int)x, (unsigned int)(x >> 32),
|
||||
p->des2, p->des3);
|
||||
p++;
|
||||
|
@ -1133,6 +1133,7 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
|
||||
static void stmmac_display_rx_rings(struct stmmac_priv *priv)
|
||||
{
|
||||
u32 rx_cnt = priv->plat->rx_queues_to_use;
|
||||
unsigned int desc_size;
|
||||
void *head_rx;
|
||||
u32 queue;
|
||||
|
||||
@ -1142,19 +1143,24 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
|
||||
|
||||
pr_info("\tRX Queue %u rings\n", queue);
|
||||
|
||||
if (priv->extend_desc)
|
||||
if (priv->extend_desc) {
|
||||
head_rx = (void *)rx_q->dma_erx;
|
||||
else
|
||||
desc_size = sizeof(struct dma_extended_desc);
|
||||
} else {
|
||||
head_rx = (void *)rx_q->dma_rx;
|
||||
desc_size = sizeof(struct dma_desc);
|
||||
}
|
||||
|
||||
/* Display RX ring */
|
||||
stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true);
|
||||
stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
|
||||
rx_q->dma_rx_phy, desc_size);
|
||||
}
|
||||
}
|
||||
|
||||
static void stmmac_display_tx_rings(struct stmmac_priv *priv)
|
||||
{
|
||||
u32 tx_cnt = priv->plat->tx_queues_to_use;
|
||||
unsigned int desc_size;
|
||||
void *head_tx;
|
||||
u32 queue;
|
||||
|
||||
@ -1164,14 +1170,19 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
|
||||
|
||||
pr_info("\tTX Queue %d rings\n", queue);
|
||||
|
||||
if (priv->extend_desc)
|
||||
if (priv->extend_desc) {
|
||||
head_tx = (void *)tx_q->dma_etx;
|
||||
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
|
||||
desc_size = sizeof(struct dma_extended_desc);
|
||||
} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
|
||||
head_tx = (void *)tx_q->dma_entx;
|
||||
else
|
||||
desc_size = sizeof(struct dma_edesc);
|
||||
} else {
|
||||
head_tx = (void *)tx_q->dma_tx;
|
||||
desc_size = sizeof(struct dma_desc);
|
||||
}
|
||||
|
||||
stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false);
|
||||
stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
|
||||
tx_q->dma_tx_phy, desc_size);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1303,9 +1314,10 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
|
||||
return -ENOMEM;
|
||||
|
||||
buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
|
||||
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
|
||||
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
|
||||
} else {
|
||||
buf->sec_page = NULL;
|
||||
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
|
||||
}
|
||||
|
||||
buf->addr = page_pool_get_dma_addr(buf->page);
|
||||
@ -1367,6 +1379,88 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* stmmac_reinit_rx_buffers - reinit the RX descriptor buffer.
|
||||
* @priv: driver private structure
|
||||
* Description: this function is called to re-allocate a receive buffer, perform
|
||||
* the DMA mapping and init the descriptor.
|
||||
*/
|
||||
static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv)
|
||||
{
|
||||
u32 rx_count = priv->plat->rx_queues_to_use;
|
||||
u32 queue;
|
||||
int i;
|
||||
|
||||
for (queue = 0; queue < rx_count; queue++) {
|
||||
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
|
||||
|
||||
for (i = 0; i < priv->dma_rx_size; i++) {
|
||||
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
|
||||
|
||||
if (buf->page) {
|
||||
page_pool_recycle_direct(rx_q->page_pool, buf->page);
|
||||
buf->page = NULL;
|
||||
}
|
||||
|
||||
if (priv->sph && buf->sec_page) {
|
||||
page_pool_recycle_direct(rx_q->page_pool, buf->sec_page);
|
||||
buf->sec_page = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (queue = 0; queue < rx_count; queue++) {
|
||||
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
|
||||
|
||||
for (i = 0; i < priv->dma_rx_size; i++) {
|
||||
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
|
||||
struct dma_desc *p;
|
||||
|
||||
if (priv->extend_desc)
|
||||
p = &((rx_q->dma_erx + i)->basic);
|
||||
else
|
||||
p = rx_q->dma_rx + i;
|
||||
|
||||
if (!buf->page) {
|
||||
buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
|
||||
if (!buf->page)
|
||||
goto err_reinit_rx_buffers;
|
||||
|
||||
buf->addr = page_pool_get_dma_addr(buf->page);
|
||||
}
|
||||
|
||||
if (priv->sph && !buf->sec_page) {
|
||||
buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
|
||||
if (!buf->sec_page)
|
||||
goto err_reinit_rx_buffers;
|
||||
|
||||
buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
|
||||
}
|
||||
|
||||
stmmac_set_desc_addr(priv, p, buf->addr);
|
||||
if (priv->sph)
|
||||
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
|
||||
else
|
||||
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
|
||||
if (priv->dma_buf_sz == BUF_SIZE_16KiB)
|
||||
stmmac_init_desc3(priv, p);
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
err_reinit_rx_buffers:
|
||||
do {
|
||||
while (--i >= 0)
|
||||
stmmac_free_rx_buffer(priv, queue, i);
|
||||
|
||||
if (queue == 0)
|
||||
break;
|
||||
|
||||
i = priv->dma_rx_size;
|
||||
} while (queue-- > 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* init_dma_rx_desc_rings - init the RX descriptor rings
|
||||
* @dev: net device structure
|
||||
@ -3648,7 +3742,10 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
stmmac_set_desc_addr(priv, p, buf->addr);
|
||||
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
|
||||
if (priv->sph)
|
||||
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
|
||||
else
|
||||
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
|
||||
stmmac_refill_desc3(priv, rx_q, p);
|
||||
|
||||
rx_q->rx_count_frames++;
|
||||
@ -3736,18 +3833,23 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
||||
unsigned int count = 0, error = 0, len = 0;
|
||||
int status = 0, coe = priv->hw->rx_csum;
|
||||
unsigned int next_entry = rx_q->cur_rx;
|
||||
unsigned int desc_size;
|
||||
struct sk_buff *skb = NULL;
|
||||
|
||||
if (netif_msg_rx_status(priv)) {
|
||||
void *rx_head;
|
||||
|
||||
netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
|
||||
if (priv->extend_desc)
|
||||
if (priv->extend_desc) {
|
||||
rx_head = (void *)rx_q->dma_erx;
|
||||
else
|
||||
desc_size = sizeof(struct dma_extended_desc);
|
||||
} else {
|
||||
rx_head = (void *)rx_q->dma_rx;
|
||||
desc_size = sizeof(struct dma_desc);
|
||||
}
|
||||
|
||||
stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true);
|
||||
stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
|
||||
rx_q->dma_rx_phy, desc_size);
|
||||
}
|
||||
while (count < limit) {
|
||||
unsigned int buf1_len = 0, buf2_len = 0;
|
||||
@ -4315,24 +4417,27 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
|
||||
static struct dentry *stmmac_fs_dir;
|
||||
|
||||
static void sysfs_display_ring(void *head, int size, int extend_desc,
|
||||
struct seq_file *seq)
|
||||
struct seq_file *seq, dma_addr_t dma_phy_addr)
|
||||
{
|
||||
int i;
|
||||
struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
|
||||
struct dma_desc *p = (struct dma_desc *)head;
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (extend_desc) {
|
||||
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
|
||||
i, (unsigned int)virt_to_phys(ep),
|
||||
dma_addr = dma_phy_addr + i * sizeof(*ep);
|
||||
seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
|
||||
i, &dma_addr,
|
||||
le32_to_cpu(ep->basic.des0),
|
||||
le32_to_cpu(ep->basic.des1),
|
||||
le32_to_cpu(ep->basic.des2),
|
||||
le32_to_cpu(ep->basic.des3));
|
||||
ep++;
|
||||
} else {
|
||||
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
|
||||
i, (unsigned int)virt_to_phys(p),
|
||||
dma_addr = dma_phy_addr + i * sizeof(*p);
|
||||
seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
|
||||
i, &dma_addr,
|
||||
le32_to_cpu(p->des0), le32_to_cpu(p->des1),
|
||||
le32_to_cpu(p->des2), le32_to_cpu(p->des3));
|
||||
p++;
|
||||
@ -4360,11 +4465,11 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
|
||||
if (priv->extend_desc) {
|
||||
seq_printf(seq, "Extended descriptor ring:\n");
|
||||
sysfs_display_ring((void *)rx_q->dma_erx,
|
||||
priv->dma_rx_size, 1, seq);
|
||||
priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
|
||||
} else {
|
||||
seq_printf(seq, "Descriptor ring:\n");
|
||||
sysfs_display_ring((void *)rx_q->dma_rx,
|
||||
priv->dma_rx_size, 0, seq);
|
||||
priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4376,11 +4481,11 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
|
||||
if (priv->extend_desc) {
|
||||
seq_printf(seq, "Extended descriptor ring:\n");
|
||||
sysfs_display_ring((void *)tx_q->dma_etx,
|
||||
priv->dma_tx_size, 1, seq);
|
||||
priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
|
||||
} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
|
||||
seq_printf(seq, "Descriptor ring:\n");
|
||||
sysfs_display_ring((void *)tx_q->dma_tx,
|
||||
priv->dma_tx_size, 0, seq);
|
||||
priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
|
||||
}
|
||||
}
|
||||
|
||||
@ -5257,6 +5362,8 @@ static void stmmac_reset_queues_param(struct stmmac_priv *priv)
|
||||
tx_q->cur_tx = 0;
|
||||
tx_q->dirty_tx = 0;
|
||||
tx_q->mss = 0;
|
||||
|
||||
netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
|
||||
}
|
||||
}
|
||||
|
||||
@ -5318,7 +5425,7 @@ int stmmac_resume(struct device *dev)
|
||||
mutex_lock(&priv->lock);
|
||||
|
||||
stmmac_reset_queues_param(priv);
|
||||
|
||||
stmmac_reinit_rx_buffers(priv);
|
||||
stmmac_free_tx_skbufs(priv);
|
||||
stmmac_clear_descriptors(priv);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user