From a163c5761019b94258ca655b27b46e82657fd6f5 Mon Sep 17 00:00:00 2001 From: Markus Schneider-Pargmann Date: Wed, 7 Feb 2024 10:32:07 +0100 Subject: [PATCH 01/14] can: m_can: Start/Cancel polling timer together with interrupts Interrupts are enabled/disabled in more places than just m_can_start() and m_can_stop(). Couple the polling timer with enabling/disabling of all interrupts to achieve equivalent behavior. Cc: Judith Mendez Fixes: b382380c0d2d ("can: m_can: Add hrtimer to generate software interrupt") Signed-off-by: Markus Schneider-Pargmann Reviewed-by: Simon Horman Link: https://lore.kernel.org/all/20240207093220.2681425-2-msp@baylibre.com Signed-off-by: Marc Kleine-Budde --- drivers/net/can/m_can/m_can.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 16ecc11c7f62..2395b1225cc8 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -418,6 +418,13 @@ static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable) static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev) { + if (!cdev->net->irq) { + dev_dbg(cdev->dev, "Start hrtimer\n"); + hrtimer_start(&cdev->hrtimer, + ms_to_ktime(HRTIMER_POLL_INTERVAL_MS), + HRTIMER_MODE_REL_PINNED); + } + /* Only interrupt line 0 is used in this driver */ m_can_write(cdev, M_CAN_ILE, ILE_EINT0); } @@ -425,6 +432,11 @@ static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev) static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev) { m_can_write(cdev, M_CAN_ILE, 0x0); + + if (!cdev->net->irq) { + dev_dbg(cdev->dev, "Stop hrtimer\n"); + hrtimer_cancel(&cdev->hrtimer); + } } /* Retrieve internal timestamp counter from TSCV.TSC, and shift it to 32-bit @@ -1417,12 +1429,6 @@ static int m_can_start(struct net_device *dev) m_can_enable_all_interrupts(cdev); - if (!dev->irq) { - dev_dbg(cdev->dev, "Start hrtimer\n"); - hrtimer_start(&cdev->hrtimer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS), - HRTIMER_MODE_REL_PINNED); - } - return 0; } @@ -1577,11 +1583,6 @@ static void m_can_stop(struct net_device *dev) { struct m_can_classdev *cdev = netdev_priv(dev); - if (!dev->irq) { - dev_dbg(cdev->dev, "Stop hrtimer\n"); - hrtimer_cancel(&cdev->hrtimer); - } - /* disable all interrupts */ m_can_disable_all_interrupts(cdev); From ba72f6c78b9beb3d85736b62b8167eaa93e0866b Mon Sep 17 00:00:00 2001 From: Markus Schneider-Pargmann Date: Wed, 7 Feb 2024 10:32:08 +0100 Subject: [PATCH 02/14] can: m_can: Move hrtimer init to m_can_class_register The hrtimer_init() is called in m_can_plat_probe() and the hrtimer function is set in m_can_class_register(). For readability it is better to keep these two together in m_can_class_register(). Cc: Judith Mendez Signed-off-by: Markus Schneider-Pargmann Reviewed-by: Simon Horman Link: https://lore.kernel.org/all/20240207093220.2681425-3-msp@baylibre.com Signed-off-by: Marc Kleine-Budde --- drivers/net/can/m_can/m_can.c | 6 +++++- drivers/net/can/m_can/m_can_platform.c | 4 ---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 2395b1225cc8..45391492339e 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -2070,8 +2070,12 @@ int m_can_class_register(struct m_can_classdev *cdev) goto clk_disable; } - if (!cdev->net->irq) + if (!cdev->net->irq) { + dev_dbg(cdev->dev, "Polling enabled, initialize hrtimer"); + hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); cdev->hrtimer.function = &hrtimer_callback; + } ret = m_can_dev_setup(cdev); if (ret) diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c index cdb28d6a092c..ab1b8211a61c 100644 --- a/drivers/net/can/m_can/m_can_platform.c +++ b/drivers/net/can/m_can/m_can_platform.c @@ -109,10 +109,6 @@ static int m_can_plat_probe(struct platform_device *pdev) ret = irq; goto probe_fail; } - } else { - dev_dbg(mcan_class->dev, "Polling enabled, initialize hrtimer"); - hrtimer_init(&mcan_class->hrtimer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL_PINNED); } /* message ram could be shared */ From 4248ba9ea24fa2c8a2106bfb14f775035e9ea8aa Mon Sep 17 00:00:00 2001 From: Markus Schneider-Pargmann Date: Wed, 7 Feb 2024 10:32:09 +0100 Subject: [PATCH 03/14] can: m_can: Write transmit header and data in one transaction Combine header and data before writing to the transmit fifo to reduce the overhead for peripheral chips. Signed-off-by: Markus Schneider-Pargmann Reviewed-by: Simon Horman Link: https://lore.kernel.org/all/20240207093220.2681425-4-msp@baylibre.com Signed-off-by: Marc Kleine-Budde --- drivers/net/can/m_can/m_can.c | 35 +++++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 45391492339e..a01c9261331d 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -320,6 +320,12 @@ struct id_and_dlc { u32 dlc; }; +struct m_can_fifo_element { + u32 id; + u32 dlc; + u8 data[CANFD_MAX_DLEN]; +}; + static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg) { return cdev->ops->read_reg(cdev, reg); @@ -1637,9 +1643,10 @@ static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx) static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) { struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data; + u8 len_padded = DIV_ROUND_UP(cf->len, 4); + struct m_can_fifo_element fifo_element; struct net_device *dev = cdev->net; struct sk_buff *skb = cdev->tx_skb; - struct id_and_dlc fifo_header; u32 cccr, fdflags; u32 txfqs; int err; @@ -1650,27 +1657,27 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) /* Generate ID field for TX buffer Element */ /* Common to all supported M_CAN versions */ if (cf->can_id & CAN_EFF_FLAG) { - fifo_header.id = cf->can_id & CAN_EFF_MASK; - fifo_header.id |= TX_BUF_XTD; + fifo_element.id = cf->can_id & CAN_EFF_MASK; + fifo_element.id |= TX_BUF_XTD; } else { - fifo_header.id = ((cf->can_id & CAN_SFF_MASK) << 18); + fifo_element.id = ((cf->can_id & CAN_SFF_MASK) << 18); } if (cf->can_id & CAN_RTR_FLAG) - fifo_header.id |= TX_BUF_RTR; + fifo_element.id |= TX_BUF_RTR; if (cdev->version == 30) { netif_stop_queue(dev); - fifo_header.dlc = can_fd_len2dlc(cf->len) << 16; + fifo_element.dlc = can_fd_len2dlc(cf->len) << 16; /* Write the frame ID, DLC, and payload to the FIFO element. */ - err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_header, 2); + err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_element, 2); if (err) goto out_fail; err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_DATA, - cf->data, DIV_ROUND_UP(cf->len, 4)); + cf->data, len_padded); if (err) goto out_fail; @@ -1732,15 +1739,15 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) fdflags |= TX_BUF_BRS; } - fifo_header.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) | + fifo_element.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) | FIELD_PREP(TX_BUF_DLC_MASK, can_fd_len2dlc(cf->len)) | fdflags | TX_BUF_EFC; - err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, &fifo_header, 2); - if (err) - goto out_fail; - err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DATA, - cf->data, DIV_ROUND_UP(cf->len, 4)); + memcpy_and_pad(fifo_element.data, CANFD_MAX_DLEN, &cf->data, + cf->len, 0); + + err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, + &fifo_element, 2 + len_padded); if (err) goto out_fail; From 07f25091ca0265da65ea7a4bd2409c627f529c6f Mon Sep 17 00:00:00 2001 From: Markus Schneider-Pargmann Date: Wed, 7 Feb 2024 10:32:10 +0100 Subject: [PATCH 04/14] can: m_can: Implement receive coalescing m_can offers the possibility to set an interrupt on reaching a watermark level in the receive FIFO. This can be used to implement coalescing. Unfortunately there is no hardware timeout available to trigger an interrupt if only a few messages were received within a given time. To solve this I am using a hrtimer to wake up the irq thread after x microseconds. The timer is always started if receive coalescing is enabled and new received frames were available during an interrupt. The timer is stopped if during a interrupt handling no new data was available. If the timer is started the new item interrupt is disabled and the watermark interrupt takes over. If the timer is not started again, the new item interrupt is enabled again, notifying the handler about every new item received. Signed-off-by: Markus Schneider-Pargmann Link: https://lore.kernel.org/all/20240207093220.2681425-5-msp@baylibre.com Signed-off-by: Marc Kleine-Budde --- drivers/net/can/m_can/m_can.c | 77 ++++++++++++++++++++++++++++++++--- drivers/net/can/m_can/m_can.h | 5 +++ 2 files changed, 76 insertions(+), 6 deletions(-) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index a01c9261331d..b76fcf5f3889 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -422,6 +422,25 @@ static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable) } } +static void m_can_interrupt_enable(struct m_can_classdev *cdev, u32 interrupts) +{ + if (cdev->active_interrupts == interrupts) + return; + cdev->ops->write_reg(cdev, M_CAN_IE, interrupts); + cdev->active_interrupts = interrupts; +} + +static void m_can_coalescing_disable(struct m_can_classdev *cdev) +{ + u32 new_interrupts = cdev->active_interrupts | IR_RF0N; + + if (!cdev->net->irq) + return; + + hrtimer_cancel(&cdev->hrtimer); + m_can_interrupt_enable(cdev, new_interrupts); +} + static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev) { if (!cdev->net->irq) { @@ -437,7 +456,9 @@ static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev) static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev) { + m_can_coalescing_disable(cdev); m_can_write(cdev, M_CAN_ILE, 0x0); + cdev->active_interrupts = 0x0; if (!cdev->net->irq) { dev_dbg(cdev->dev, "Stop hrtimer\n"); @@ -1091,15 +1112,42 @@ static int m_can_echo_tx_event(struct net_device *dev) return err; } +static void m_can_coalescing_update(struct m_can_classdev *cdev, u32 ir) +{ + u32 new_interrupts = cdev->active_interrupts; + bool enable_timer = false; + + if (!cdev->net->irq) + return; + + if (cdev->rx_coalesce_usecs_irq > 0 && (ir & (IR_RF0N | IR_RF0W))) { + enable_timer = true; + new_interrupts &= ~IR_RF0N; + } else if (!hrtimer_active(&cdev->hrtimer)) { + new_interrupts |= IR_RF0N; + } + + m_can_interrupt_enable(cdev, new_interrupts); + if (enable_timer) { + hrtimer_start(&cdev->hrtimer, + ns_to_ktime(cdev->rx_coalesce_usecs_irq * NSEC_PER_USEC), + HRTIMER_MODE_REL); + } +} + static irqreturn_t m_can_isr(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct m_can_classdev *cdev = netdev_priv(dev); u32 ir; - if (pm_runtime_suspended(cdev->dev)) + if (pm_runtime_suspended(cdev->dev)) { + m_can_coalescing_disable(cdev); return IRQ_NONE; + } + ir = m_can_read(cdev, M_CAN_IR); + m_can_coalescing_update(cdev, ir); if (!ir) return IRQ_NONE; @@ -1114,13 +1162,17 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) * - state change IRQ * - bus error IRQ and bus error reporting */ - if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) { + if (ir & (IR_RF0N | IR_RF0W | IR_ERR_ALL_30X)) { cdev->irqstatus = ir; if (!cdev->is_peripheral) { m_can_disable_all_interrupts(cdev); napi_schedule(&cdev->napi); - } else if (m_can_rx_peripheral(dev, ir) < 0) { - goto out_fail; + } else { + int pkts; + + pkts = m_can_rx_peripheral(dev, ir); + if (pkts < 0) + goto out_fail; } } @@ -1156,6 +1208,15 @@ out_fail: return IRQ_HANDLED; } +static enum hrtimer_restart m_can_coalescing_timer(struct hrtimer *timer) +{ + struct m_can_classdev *cdev = container_of(timer, struct m_can_classdev, hrtimer); + + irq_wake_thread(cdev->net->irq, cdev->net); + + return HRTIMER_NORESTART; +} + static const struct can_bittiming_const m_can_bittiming_const_30X = { .name = KBUILD_MODNAME, .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ @@ -1296,7 +1357,7 @@ static int m_can_chip_config(struct net_device *dev) /* Disable unused interrupts */ interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TEFW | IR_TFE | IR_TCF | IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | - IR_RF0F | IR_RF0W); + IR_RF0F); m_can_config_endisable(cdev, true); @@ -1340,6 +1401,7 @@ static int m_can_chip_config(struct net_device *dev) /* rx fifo configuration, blocking mode, fifo size 1 */ m_can_write(cdev, M_CAN_RXF0C, + FIELD_PREP(RXFC_FWM_MASK, cdev->rx_max_coalesced_frames_irq) | FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF0].num) | cdev->mcfg[MRAM_RXF0].off); @@ -1398,7 +1460,7 @@ static int m_can_chip_config(struct net_device *dev) else interrupts &= ~(IR_ERR_LEC_31X); } - m_can_write(cdev, M_CAN_IE, interrupts); + m_can_interrupt_enable(cdev, interrupts); /* route all interrupts to INT0 */ m_can_write(cdev, M_CAN_ILS, ILS_ALL_INT0); @@ -2082,6 +2144,9 @@ int m_can_class_register(struct m_can_classdev *cdev) hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); cdev->hrtimer.function = &hrtimer_callback; + } else { + hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + cdev->hrtimer.function = m_can_coalescing_timer; } ret = m_can_dev_setup(cdev); diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h index 520e14277dff..b916206199f1 100644 --- a/drivers/net/can/m_can/m_can.h +++ b/drivers/net/can/m_can/m_can.h @@ -92,6 +92,11 @@ struct m_can_classdev { int pm_clock_support; int is_peripheral; + // Cached M_CAN_IE register content + u32 active_interrupts; + u32 rx_max_coalesced_frames_irq; + u32 rx_coalesce_usecs_irq; + struct mram_cfg mcfg[MRAM_CFG_NUM]; struct hrtimer hrtimer; From ec390d0876170c075fb5ada568db1e03dd790b5b Mon Sep 17 00:00:00 2001 From: Markus Schneider-Pargmann Date: Wed, 7 Feb 2024 10:32:11 +0100 Subject: [PATCH 05/14] can: m_can: Implement transmit coalescing Extend the coalescing implementation for transmits. In normal mode the chip raises an interrupt for every finished transmit. This implementation switches to coalescing mode as soon as an interrupt handled a transmit. For coalescing the watermark level interrupt is used to interrupt exactly after x frames were sent. It switches back into normal mode once there was an interrupt with no finished transmit and the timer being inactive. The timer is shared with receive coalescing. The time for receive and transmit coalescing timers have to be the same for that to work. The benefit is to have only a single running timer. Signed-off-by: Markus Schneider-Pargmann Reviewed-by: Simon Horman Link: https://lore.kernel.org/all/20240207093220.2681425-6-msp@baylibre.com Signed-off-by: Marc Kleine-Budde --- drivers/net/can/m_can/m_can.c | 33 ++++++++++++++++++++------------- drivers/net/can/m_can/m_can.h | 4 ++++ 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index b76fcf5f3889..9b3e8e09f3aa 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -255,6 +255,7 @@ enum m_can_reg { #define TXESC_TBDS_64B 0x7 /* Tx Event FIFO Configuration (TXEFC) */ +#define TXEFC_EFWM_MASK GENMASK(29, 24) #define TXEFC_EFS_MASK GENMASK(21, 16) /* Tx Event FIFO Status (TXEFS) */ @@ -432,7 +433,7 @@ static void m_can_interrupt_enable(struct m_can_classdev *cdev, u32 interrupts) static void m_can_coalescing_disable(struct m_can_classdev *cdev) { - u32 new_interrupts = cdev->active_interrupts | IR_RF0N; + u32 new_interrupts = cdev->active_interrupts | IR_RF0N | IR_TEFN; if (!cdev->net->irq) return; @@ -1115,24 +1116,29 @@ static int m_can_echo_tx_event(struct net_device *dev) static void m_can_coalescing_update(struct m_can_classdev *cdev, u32 ir) { u32 new_interrupts = cdev->active_interrupts; - bool enable_timer = false; + bool enable_rx_timer = false; + bool enable_tx_timer = false; if (!cdev->net->irq) return; if (cdev->rx_coalesce_usecs_irq > 0 && (ir & (IR_RF0N | IR_RF0W))) { - enable_timer = true; + enable_rx_timer = true; new_interrupts &= ~IR_RF0N; - } else if (!hrtimer_active(&cdev->hrtimer)) { - new_interrupts |= IR_RF0N; } + if (cdev->tx_coalesce_usecs_irq > 0 && (ir & (IR_TEFN | IR_TEFW))) { + enable_tx_timer = true; + new_interrupts &= ~IR_TEFN; + } + if (!enable_rx_timer && !hrtimer_active(&cdev->hrtimer)) + new_interrupts |= IR_RF0N; + if (!enable_tx_timer && !hrtimer_active(&cdev->hrtimer)) + new_interrupts |= IR_TEFN; m_can_interrupt_enable(cdev, new_interrupts); - if (enable_timer) { - hrtimer_start(&cdev->hrtimer, - ns_to_ktime(cdev->rx_coalesce_usecs_irq * NSEC_PER_USEC), + if (enable_rx_timer | enable_tx_timer) + hrtimer_start(&cdev->hrtimer, cdev->irq_timer_wait, HRTIMER_MODE_REL); - } } static irqreturn_t m_can_isr(int irq, void *dev_id) @@ -1187,7 +1193,7 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) netif_wake_queue(dev); } } else { - if (ir & IR_TEFN) { + if (ir & (IR_TEFN | IR_TEFW)) { /* New TX FIFO Element arrived */ if (m_can_echo_tx_event(dev) != 0) goto out_fail; @@ -1355,9 +1361,8 @@ static int m_can_chip_config(struct net_device *dev) } /* Disable unused interrupts */ - interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TEFW | IR_TFE | - IR_TCF | IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | - IR_RF0F); + interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TFE | IR_TCF | + IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | IR_RF0F); m_can_config_endisable(cdev, true); @@ -1394,6 +1399,8 @@ static int m_can_chip_config(struct net_device *dev) } else { /* Full TX Event FIFO is used */ m_can_write(cdev, M_CAN_TXEFC, + FIELD_PREP(TXEFC_EFWM_MASK, + cdev->tx_max_coalesced_frames_irq) | FIELD_PREP(TXEFC_EFS_MASK, cdev->mcfg[MRAM_TXE].num) | cdev->mcfg[MRAM_TXE].off); diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h index b916206199f1..1e461d305bce 100644 --- a/drivers/net/can/m_can/m_can.h +++ b/drivers/net/can/m_can/m_can.h @@ -84,6 +84,8 @@ struct m_can_classdev { struct sk_buff *tx_skb; struct phy *transceiver; + ktime_t irq_timer_wait; + struct m_can_ops *ops; int version; @@ -96,6 +98,8 @@ struct m_can_classdev { u32 active_interrupts; u32 rx_max_coalesced_frames_irq; u32 rx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames_irq; + u32 tx_coalesce_usecs_irq; struct mram_cfg mcfg[MRAM_CFG_NUM]; From 9515223bd0bb9bb0a31b61a68f1049148c1d85ff Mon Sep 17 00:00:00 2001 From: Markus Schneider-Pargmann Date: Wed, 7 Feb 2024 10:32:12 +0100 Subject: [PATCH 06/14] can: m_can: Add rx coalescing ethtool support Add the possibility to set coalescing parameters with ethtool. rx-frames-irq and rx-usecs-irq can only be set and unset together as the implemented mechanism would not work otherwise. rx-frames-irq can't be greater than the RX FIFO size. Also all values can only be changed if the chip is not active. Polling is excluded from irq coalescing support. Signed-off-by: Markus Schneider-Pargmann Reviewed-by: Simon Horman Link: https://lore.kernel.org/all/20240207093220.2681425-7-msp@baylibre.com Signed-off-by: Marc Kleine-Budde --- drivers/net/can/m_can/m_can.c | 55 ++++++++++++++++++++++++++++++++++- 1 file changed, 54 insertions(+), 1 deletion(-) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 9b3e8e09f3aa..6dad1f569f82 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1977,7 +1977,57 @@ static const struct net_device_ops m_can_netdev_ops = { .ndo_change_mtu = can_change_mtu, }; +static int m_can_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kec, + struct netlink_ext_ack *ext_ack) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + + ec->rx_max_coalesced_frames_irq = cdev->rx_max_coalesced_frames_irq; + ec->rx_coalesce_usecs_irq = cdev->rx_coalesce_usecs_irq; + + return 0; +} + +static int m_can_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kec, + struct netlink_ext_ack *ext_ack) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + + if (cdev->can.state != CAN_STATE_STOPPED) { + netdev_err(dev, "Device is in use, please shut it down first\n"); + return -EBUSY; + } + + if (ec->rx_max_coalesced_frames_irq > cdev->mcfg[MRAM_RXF0].num) { + netdev_err(dev, "rx-frames-irq %u greater than the RX FIFO %u\n", + ec->rx_max_coalesced_frames_irq, + cdev->mcfg[MRAM_RXF0].num); + return -EINVAL; + } + if ((ec->rx_max_coalesced_frames_irq == 0) != (ec->rx_coalesce_usecs_irq == 0)) { + netdev_err(dev, "rx-frames-irq and rx-usecs-irq can only be set together\n"); + return -EINVAL; + } + + cdev->rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; + cdev->rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; + + return 0; +} + static const struct ethtool_ops m_can_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ | + ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ, + .get_ts_info = ethtool_op_get_ts_info, + .get_coalesce = m_can_get_coalesce, + .set_coalesce = m_can_set_coalesce, +}; + +static const struct ethtool_ops m_can_ethtool_ops_polling = { .get_ts_info = ethtool_op_get_ts_info, }; @@ -1985,7 +2035,10 @@ static int register_m_can_dev(struct net_device *dev) { dev->flags |= IFF_ECHO; /* we support local echo */ dev->netdev_ops = &m_can_netdev_ops; - dev->ethtool_ops = &m_can_ethtool_ops; + if (dev->irq) + dev->ethtool_ops = &m_can_ethtool_ops; + else + dev->ethtool_ops = &m_can_ethtool_ops_polling; return register_candev(dev); } From e55b963e4e94011e9de7a6448ea8e50a12bb9953 Mon Sep 17 00:00:00 2001 From: Markus Schneider-Pargmann Date: Wed, 7 Feb 2024 10:32:13 +0100 Subject: [PATCH 07/14] can: m_can: Add tx coalescing ethtool support Add TX support to get/set functions for ethtool coalescing. tx-frames-irq and tx-usecs-irq can only be set/unset together. tx-frames-irq needs to be less than TXE and TXB. As rx and tx share the same timer, rx-usecs-irq and tx-usecs-irq can be enabled/disabled individually but they need to have the same value if enabled. Polling is excluded from TX irq coalescing. Signed-off-by: Markus Schneider-Pargmann Reviewed-by: Simon Horman Reviewed-by: Simon Horman Link: https://lore.kernel.org/all/20240207093220.2681425-8-msp@baylibre.com Signed-off-by: Marc Kleine-Budde --- drivers/net/can/m_can/m_can.c | 38 ++++++++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 6dad1f569f82..b31df3e3ceeb 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1986,6 +1986,8 @@ static int m_can_get_coalesce(struct net_device *dev, ec->rx_max_coalesced_frames_irq = cdev->rx_max_coalesced_frames_irq; ec->rx_coalesce_usecs_irq = cdev->rx_coalesce_usecs_irq; + ec->tx_max_coalesced_frames_irq = cdev->tx_max_coalesced_frames_irq; + ec->tx_coalesce_usecs_irq = cdev->tx_coalesce_usecs_irq; return 0; } @@ -2012,16 +2014,50 @@ static int m_can_set_coalesce(struct net_device *dev, netdev_err(dev, "rx-frames-irq and rx-usecs-irq can only be set together\n"); return -EINVAL; } + if (ec->tx_max_coalesced_frames_irq > cdev->mcfg[MRAM_TXE].num) { + netdev_err(dev, "tx-frames-irq %u greater than the TX event FIFO %u\n", + ec->tx_max_coalesced_frames_irq, + cdev->mcfg[MRAM_TXE].num); + return -EINVAL; + } + if (ec->tx_max_coalesced_frames_irq > cdev->mcfg[MRAM_TXB].num) { + netdev_err(dev, "tx-frames-irq %u greater than the TX FIFO %u\n", + ec->tx_max_coalesced_frames_irq, + cdev->mcfg[MRAM_TXB].num); + return -EINVAL; + } + if ((ec->tx_max_coalesced_frames_irq == 0) != (ec->tx_coalesce_usecs_irq == 0)) { + netdev_err(dev, "tx-frames-irq and tx-usecs-irq can only be set together\n"); + return -EINVAL; + } + if (ec->rx_coalesce_usecs_irq != 0 && ec->tx_coalesce_usecs_irq != 0 && + ec->rx_coalesce_usecs_irq != ec->tx_coalesce_usecs_irq) { + netdev_err(dev, "rx-usecs-irq %u needs to be equal to tx-usecs-irq %u if both are enabled\n", + ec->rx_coalesce_usecs_irq, + ec->tx_coalesce_usecs_irq); + return -EINVAL; + } cdev->rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; cdev->rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; + cdev->tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; + cdev->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; + + if (cdev->rx_coalesce_usecs_irq) + cdev->irq_timer_wait = + ns_to_ktime(cdev->rx_coalesce_usecs_irq * NSEC_PER_USEC); + else + cdev->irq_timer_wait = + ns_to_ktime(cdev->tx_coalesce_usecs_irq * NSEC_PER_USEC); return 0; } static const struct ethtool_ops m_can_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ | - ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ, + ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ | + ETHTOOL_COALESCE_TX_USECS_IRQ | + ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, .get_ts_info = ethtool_op_get_ts_info, .get_coalesce = m_can_get_coalesce, .set_coalesce = m_can_set_coalesce, From 14f0a0a4407ebb6f36eb7d394d497d4632c654dc Mon Sep 17 00:00:00 2001 From: Markus Schneider-Pargmann Date: Wed, 7 Feb 2024 10:32:14 +0100 Subject: [PATCH 08/14] can: m_can: Use u32 for putidx putidx is not an integer normally, it is an unsigned field used in hardware registers. Use a u32 for it. Signed-off-by: Markus Schneider-Pargmann Reviewed-by: Simon Horman Link: https://lore.kernel.org/all/20240207093220.2681425-9-msp@baylibre.com Signed-off-by: Marc Kleine-Budde --- drivers/net/can/m_can/m_can.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index b31df3e3ceeb..1b62613f195c 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -486,7 +486,7 @@ static void m_can_clean(struct net_device *net) struct m_can_classdev *cdev = netdev_priv(net); if (cdev->tx_skb) { - int putidx = 0; + u32 putidx = 0; net->stats.tx_errors++; if (cdev->version > 30) @@ -1695,12 +1695,12 @@ static int m_can_close(struct net_device *dev) return 0; } -static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx) +static int m_can_next_echo_skb_occupied(struct net_device *dev, u32 putidx) { struct m_can_classdev *cdev = netdev_priv(dev); /*get wrap around for loopback skb index */ unsigned int wrap = cdev->can.echo_skb_max; - int next_idx; + u32 next_idx; /* calculate next index */ next_idx = (++putidx >= wrap ? 0 : putidx); @@ -1719,7 +1719,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) u32 cccr, fdflags; u32 txfqs; int err; - int putidx; + u32 putidx; cdev->tx_skb = NULL; From 80c5bac02a820dc569e38e102456ec4b34f6f607 Mon Sep 17 00:00:00 2001 From: Markus Schneider-Pargmann Date: Wed, 7 Feb 2024 10:32:15 +0100 Subject: [PATCH 09/14] can: m_can: Cache tx putidx m_can_tx_handler is the only place where data is written to the tx fifo. We can calculate the putidx in the driver code here to avoid the dependency on the txfqs register. Signed-off-by: Markus Schneider-Pargmann Reviewed-by: Simon Horman Link: https://lore.kernel.org/all/20240207093220.2681425-10-msp@baylibre.com Signed-off-by: Marc Kleine-Budde --- drivers/net/can/m_can/m_can.c | 8 +++++++- drivers/net/can/m_can/m_can.h | 3 +++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 1b62613f195c..a8e7b910ef81 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1504,6 +1504,10 @@ static int m_can_start(struct net_device *dev) m_can_enable_all_interrupts(cdev); + if (cdev->version > 30) + cdev->tx_fifo_putidx = FIELD_GET(TXFQS_TFQPI_MASK, + m_can_read(cdev, M_CAN_TXFQS)); + return 0; } @@ -1793,7 +1797,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) } /* get put index for frame */ - putidx = FIELD_GET(TXFQS_TFQPI_MASK, txfqs); + putidx = cdev->tx_fifo_putidx; /* Construct DLC Field, with CAN-FD configuration. * Use the put index of the fifo as the message marker, @@ -1827,6 +1831,8 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) /* Enable TX FIFO element to start transfer */ m_can_write(cdev, M_CAN_TXBAR, (1 << putidx)); + cdev->tx_fifo_putidx = (++cdev->tx_fifo_putidx >= cdev->can.echo_skb_max ? + 0 : cdev->tx_fifo_putidx); /* stop network queue if fifo full */ if (m_can_tx_fifo_full(cdev) || diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h index 1e461d305bce..0de42fc5ef1e 100644 --- a/drivers/net/can/m_can/m_can.h +++ b/drivers/net/can/m_can/m_can.h @@ -101,6 +101,9 @@ struct m_can_classdev { u32 tx_max_coalesced_frames_irq; u32 tx_coalesce_usecs_irq; + // Store this internally to avoid fetch delays on peripheral chips + int tx_fifo_putidx; + struct mram_cfg mcfg[MRAM_CFG_NUM]; struct hrtimer hrtimer; From e668673ed3992579f23413a13d7ed314cb62ee74 Mon Sep 17 00:00:00 2001 From: Markus Schneider-Pargmann Date: Wed, 7 Feb 2024 10:32:16 +0100 Subject: [PATCH 10/14] can: m_can: Use the workqueue as queue The current implementation uses the workqueue for peripheral chips to submit work. Only a single work item is queued and used at any time. To be able to keep more than one transmit in flight at a time, prepare the workqueue to support multiple transmits at the same time. Each work item now has a separate storage for a skb and a pointer to cdev. This assures that each workitem can be processed individually. The workqueue is replaced by an ordered workqueue which makes sure that only a single worker processes the items queued on the workqueue. Also items are ordered by the order they were enqueued. This removes most of the concurrency the workqueue normally offers. It is not necessary for this driver. The cleanup functions have to be adopted a bit to handle this new mechanism. Signed-off-by: Markus Schneider-Pargmann Link: https://lore.kernel.org/all/20240207093220.2681425-11-msp@baylibre.com Signed-off-by: Marc Kleine-Budde --- drivers/net/can/m_can/m_can.c | 109 ++++++++++++++++++++-------------- drivers/net/can/m_can/m_can.h | 14 ++++- 2 files changed, 76 insertions(+), 47 deletions(-) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index a8e7b910ef81..8d7dbf2eb46c 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -485,17 +485,18 @@ static void m_can_clean(struct net_device *net) { struct m_can_classdev *cdev = netdev_priv(net); - if (cdev->tx_skb) { - u32 putidx = 0; + if (cdev->tx_ops) { + for (int i = 0; i != cdev->tx_fifo_size; ++i) { + if (!cdev->tx_ops[i].skb) + continue; - net->stats.tx_errors++; - if (cdev->version > 30) - putidx = FIELD_GET(TXFQS_TFQPI_MASK, - m_can_read(cdev, M_CAN_TXFQS)); - - can_free_echo_skb(cdev->net, putidx, NULL); - cdev->tx_skb = NULL; + net->stats.tx_errors++; + cdev->tx_ops[i].skb = NULL; + } } + + for (int i = 0; i != cdev->can.echo_skb_max; ++i) + can_free_echo_skb(cdev->net, i, NULL); } /* For peripherals, pass skb to rx-offload, which will push skb from @@ -1685,8 +1686,9 @@ static int m_can_close(struct net_device *dev) m_can_clk_stop(cdev); free_irq(dev->irq, dev); + m_can_clean(dev); + if (cdev->is_peripheral) { - cdev->tx_skb = NULL; destroy_workqueue(cdev->tx_wq); cdev->tx_wq = NULL; can_rx_offload_disable(&cdev->offload); @@ -1713,20 +1715,18 @@ static int m_can_next_echo_skb_occupied(struct net_device *dev, u32 putidx) return !!cdev->can.echo_skb[next_idx]; } -static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) +static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev, + struct sk_buff *skb) { - struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data; + struct canfd_frame *cf = (struct canfd_frame *)skb->data; u8 len_padded = DIV_ROUND_UP(cf->len, 4); struct m_can_fifo_element fifo_element; struct net_device *dev = cdev->net; - struct sk_buff *skb = cdev->tx_skb; u32 cccr, fdflags; u32 txfqs; int err; u32 putidx; - cdev->tx_skb = NULL; - /* Generate ID field for TX buffer Element */ /* Common to all supported M_CAN versions */ if (cf->can_id & CAN_EFF_FLAG) { @@ -1850,10 +1850,31 @@ out_fail: static void m_can_tx_work_queue(struct work_struct *ws) { - struct m_can_classdev *cdev = container_of(ws, struct m_can_classdev, - tx_work); + struct m_can_tx_op *op = container_of(ws, struct m_can_tx_op, work); + struct m_can_classdev *cdev = op->cdev; + struct sk_buff *skb = op->skb; - m_can_tx_handler(cdev); + op->skb = NULL; + m_can_tx_handler(cdev, skb); +} + +static void m_can_tx_queue_skb(struct m_can_classdev *cdev, struct sk_buff *skb) +{ + cdev->tx_ops[cdev->next_tx_op].skb = skb; + queue_work(cdev->tx_wq, &cdev->tx_ops[cdev->next_tx_op].work); + + ++cdev->next_tx_op; + if (cdev->next_tx_op >= cdev->tx_fifo_size) + cdev->next_tx_op = 0; +} + +static netdev_tx_t m_can_start_peripheral_xmit(struct m_can_classdev *cdev, + struct sk_buff *skb) +{ + netif_stop_queue(cdev->net); + m_can_tx_queue_skb(cdev, skb); + + return NETDEV_TX_OK; } static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, @@ -1864,30 +1885,15 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; - if (cdev->is_peripheral) { - if (cdev->tx_skb) { - netdev_err(dev, "hard_xmit called while tx busy\n"); - return NETDEV_TX_BUSY; - } - - if (cdev->can.state == CAN_STATE_BUS_OFF) { - m_can_clean(dev); - } else { - /* Need to stop the queue to avoid numerous requests - * from being sent. Suggested improvement is to create - * a queueing mechanism that will queue the skbs and - * process them in order. - */ - cdev->tx_skb = skb; - netif_stop_queue(cdev->net); - queue_work(cdev->tx_wq, &cdev->tx_work); - } - } else { - cdev->tx_skb = skb; - return m_can_tx_handler(cdev); + if (cdev->can.state == CAN_STATE_BUS_OFF) { + m_can_clean(cdev->net); + return NETDEV_TX_OK; } - return NETDEV_TX_OK; + if (cdev->is_peripheral) + return m_can_start_peripheral_xmit(cdev, skb); + else + return m_can_tx_handler(cdev, skb); } static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer) @@ -1927,15 +1933,17 @@ static int m_can_open(struct net_device *dev) /* register interrupt handler */ if (cdev->is_peripheral) { - cdev->tx_skb = NULL; - cdev->tx_wq = alloc_workqueue("mcan_wq", - WQ_FREEZABLE | WQ_MEM_RECLAIM, 0); + cdev->tx_wq = alloc_ordered_workqueue("mcan_wq", + WQ_FREEZABLE | WQ_MEM_RECLAIM); if (!cdev->tx_wq) { err = -ENOMEM; goto out_wq_fail; } - INIT_WORK(&cdev->tx_work, m_can_tx_work_queue); + for (int i = 0; i != cdev->tx_fifo_size; ++i) { + cdev->tx_ops[i].cdev = cdev; + INIT_WORK(&cdev->tx_ops[i].work, m_can_tx_work_queue); + } err = request_threaded_irq(dev->irq, NULL, m_can_isr, IRQF_ONESHOT, @@ -2228,6 +2236,19 @@ int m_can_class_register(struct m_can_classdev *cdev) { int ret; + cdev->tx_fifo_size = max(1, min(cdev->mcfg[MRAM_TXB].num, + cdev->mcfg[MRAM_TXE].num)); + if (cdev->is_peripheral) { + cdev->tx_ops = + devm_kzalloc(cdev->dev, + cdev->tx_fifo_size * sizeof(*cdev->tx_ops), + GFP_KERNEL); + if (!cdev->tx_ops) { + dev_err(cdev->dev, "Failed to allocate tx_ops for workqueue\n"); + return -ENOMEM; + } + } + if (cdev->pm_clock_support) { ret = m_can_clk_start(cdev); if (ret) diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h index 0de42fc5ef1e..be1d2119bd53 100644 --- a/drivers/net/can/m_can/m_can.h +++ b/drivers/net/can/m_can/m_can.h @@ -70,6 +70,12 @@ struct m_can_ops { int (*init)(struct m_can_classdev *cdev); }; +struct m_can_tx_op { + struct m_can_classdev *cdev; + struct work_struct work; + struct sk_buff *skb; +}; + struct m_can_classdev { struct can_priv can; struct can_rx_offload offload; @@ -80,8 +86,6 @@ struct m_can_classdev { struct clk *cclk; struct workqueue_struct *tx_wq; - struct work_struct tx_work; - struct sk_buff *tx_skb; struct phy *transceiver; ktime_t irq_timer_wait; @@ -102,7 +106,11 @@ struct m_can_classdev { u32 tx_coalesce_usecs_irq; // Store this internally to avoid fetch delays on peripheral chips - int tx_fifo_putidx; + u32 tx_fifo_putidx; + + struct m_can_tx_op *tx_ops; + int tx_fifo_size; + int next_tx_op; struct mram_cfg mcfg[MRAM_CFG_NUM]; From 1fa80e23c15051edc1c594270517de3517ded798 Mon Sep 17 00:00:00 2001 From: Markus Schneider-Pargmann Date: Wed, 7 Feb 2024 10:32:17 +0100 Subject: [PATCH 11/14] can: m_can: Introduce a tx_fifo_in_flight counter Keep track of the number of transmits in flight. This patch prepares the driver to control the network interface queue based on this counter. By itself this counter be implemented with an atomic, but as we need to do other things in the critical sections later I am using a spinlock instead. Signed-off-by: Markus Schneider-Pargmann Link: https://lore.kernel.org/all/20240207093220.2681425-12-msp@baylibre.com Signed-off-by: Marc Kleine-Budde --- drivers/net/can/m_can/m_can.c | 30 ++++++++++++++++++++++++++++++ drivers/net/can/m_can/m_can.h | 4 ++++ 2 files changed, 34 insertions(+) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 8d7dbf2eb46c..2c68b1a60887 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -484,6 +484,7 @@ static u32 m_can_get_timestamp(struct m_can_classdev *cdev) static void m_can_clean(struct net_device *net) { struct m_can_classdev *cdev = netdev_priv(net); + unsigned long irqflags; if (cdev->tx_ops) { for (int i = 0; i != cdev->tx_fifo_size; ++i) { @@ -497,6 +498,10 @@ static void m_can_clean(struct net_device *net) for (int i = 0; i != cdev->can.echo_skb_max; ++i) can_free_echo_skb(cdev->net, i, NULL); + + spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags); + cdev->tx_fifo_in_flight = 0; + spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags); } /* For peripherals, pass skb to rx-offload, which will push skb from @@ -1067,6 +1072,24 @@ static void m_can_tx_update_stats(struct m_can_classdev *cdev, stats->tx_packets++; } +static void m_can_finish_tx(struct m_can_classdev *cdev, int transmitted) +{ + unsigned long irqflags; + + spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags); + cdev->tx_fifo_in_flight -= transmitted; + spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags); +} + +static void m_can_start_tx(struct m_can_classdev *cdev) +{ + unsigned long irqflags; + + spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags); + ++cdev->tx_fifo_in_flight; + spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags); +} + static int m_can_echo_tx_event(struct net_device *dev) { u32 txe_count = 0; @@ -1076,6 +1099,7 @@ static int m_can_echo_tx_event(struct net_device *dev) int i = 0; int err = 0; unsigned int msg_mark; + int processed = 0; struct m_can_classdev *cdev = netdev_priv(dev); @@ -1105,12 +1129,15 @@ static int m_can_echo_tx_event(struct net_device *dev) /* update stats */ m_can_tx_update_stats(cdev, msg_mark, timestamp); + ++processed; } if (ack_fgi != -1) m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK, ack_fgi)); + m_can_finish_tx(cdev, processed); + return err; } @@ -1192,6 +1219,7 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) timestamp = m_can_get_timestamp(cdev); m_can_tx_update_stats(cdev, 0, timestamp); netif_wake_queue(dev); + m_can_finish_tx(cdev, 1); } } else { if (ir & (IR_TEFN | IR_TEFW)) { @@ -1890,6 +1918,8 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } + m_can_start_tx(cdev); + if (cdev->is_peripheral) return m_can_start_peripheral_xmit(cdev, skb); else diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h index be1d2119bd53..76b1ce1b7c1b 100644 --- a/drivers/net/can/m_can/m_can.h +++ b/drivers/net/can/m_can/m_can.h @@ -108,6 +108,10 @@ struct m_can_classdev { // Store this internally to avoid fetch delays on peripheral chips u32 tx_fifo_putidx; + /* Protects shared state between start_xmit and m_can_isr */ + spinlock_t tx_handling_spinlock; + int tx_fifo_in_flight; + struct m_can_tx_op *tx_ops; int tx_fifo_size; int next_tx_op; From 7508a10ca295c635fad9c9f34ac96384d806af89 Mon Sep 17 00:00:00 2001 From: Markus Schneider-Pargmann Date: Wed, 7 Feb 2024 10:32:18 +0100 Subject: [PATCH 12/14] can: m_can: Use tx_fifo_in_flight for netif_queue control The network queue is currently always stopped in start_xmit and continued in the interrupt handler. This is not possible anymore if we want to keep multiple transmits in flight in parallel. Use the previously introduced tx_fifo_in_flight counter to control the network queue instead. This has the benefit of not needing to ask the hardware about fifo status. This patch stops the network queue in start_xmit if the number of transmits in flight reaches the size of the fifo and wakes up the queue from the interrupt handler once the transmits in flight drops below the fifo size. This means any skbs over the limit will be rejected immediately in start_xmit (it shouldn't be possible at all to reach that state anyways). The maximum number of transmits in flight is the size of the fifo. Signed-off-by: Markus Schneider-Pargmann Link: https://lore.kernel.org/all/20240207093220.2681425-13-msp@baylibre.com Signed-off-by: Marc Kleine-Budde --- drivers/net/can/m_can/m_can.c | 77 +++++++++-------------------------- 1 file changed, 20 insertions(+), 57 deletions(-) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 2c68b1a60887..20595b7141af 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -379,16 +379,6 @@ m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val) return cdev->ops->read_fifo(cdev, addr_offset, val, 1); } -static inline bool _m_can_tx_fifo_full(u32 txfqs) -{ - return !!(txfqs & TXFQS_TFQF); -} - -static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev) -{ - return _m_can_tx_fifo_full(m_can_read(cdev, M_CAN_TXFQS)); -} - static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable) { u32 cccr = m_can_read(cdev, M_CAN_CCCR); @@ -1077,17 +1067,31 @@ static void m_can_finish_tx(struct m_can_classdev *cdev, int transmitted) unsigned long irqflags; spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags); + if (cdev->tx_fifo_in_flight >= cdev->tx_fifo_size && transmitted > 0) + netif_wake_queue(cdev->net); cdev->tx_fifo_in_flight -= transmitted; spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags); } -static void m_can_start_tx(struct m_can_classdev *cdev) +static netdev_tx_t m_can_start_tx(struct m_can_classdev *cdev) { unsigned long irqflags; + int tx_fifo_in_flight; spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags); - ++cdev->tx_fifo_in_flight; + tx_fifo_in_flight = cdev->tx_fifo_in_flight + 1; + if (tx_fifo_in_flight >= cdev->tx_fifo_size) { + netif_stop_queue(cdev->net); + if (tx_fifo_in_flight > cdev->tx_fifo_size) { + netdev_err_once(cdev->net, "hard_xmit called while TX FIFO full\n"); + spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags); + return NETDEV_TX_BUSY; + } + } + cdev->tx_fifo_in_flight = tx_fifo_in_flight; spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags); + + return NETDEV_TX_OK; } static int m_can_echo_tx_event(struct net_device *dev) @@ -1218,7 +1222,6 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) if (cdev->is_peripheral) timestamp = m_can_get_timestamp(cdev); m_can_tx_update_stats(cdev, 0, timestamp); - netif_wake_queue(dev); m_can_finish_tx(cdev, 1); } } else { @@ -1226,10 +1229,6 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) /* New TX FIFO Element arrived */ if (m_can_echo_tx_event(dev) != 0) goto out_fail; - - if (netif_queue_stopped(dev) && - !m_can_tx_fifo_full(cdev)) - netif_wake_queue(dev); } } @@ -1729,20 +1728,6 @@ static int m_can_close(struct net_device *dev) return 0; } -static int m_can_next_echo_skb_occupied(struct net_device *dev, u32 putidx) -{ - struct m_can_classdev *cdev = netdev_priv(dev); - /*get wrap around for loopback skb index */ - unsigned int wrap = cdev->can.echo_skb_max; - u32 next_idx; - - /* calculate next index */ - next_idx = (++putidx >= wrap ? 0 : putidx); - - /* check if occupied */ - return !!cdev->can.echo_skb[next_idx]; -} - static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev, struct sk_buff *skb) { @@ -1751,7 +1736,6 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev, struct m_can_fifo_element fifo_element; struct net_device *dev = cdev->net; u32 cccr, fdflags; - u32 txfqs; int err; u32 putidx; @@ -1806,24 +1790,6 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev, } else { /* Transmit routine for version >= v3.1.x */ - txfqs = m_can_read(cdev, M_CAN_TXFQS); - - /* Check if FIFO full */ - if (_m_can_tx_fifo_full(txfqs)) { - /* This shouldn't happen */ - netif_stop_queue(dev); - netdev_warn(dev, - "TX queue active although FIFO is full."); - - if (cdev->is_peripheral) { - kfree_skb(skb); - dev->stats.tx_dropped++; - return NETDEV_TX_OK; - } else { - return NETDEV_TX_BUSY; - } - } - /* get put index for frame */ putidx = cdev->tx_fifo_putidx; @@ -1861,11 +1827,6 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev, m_can_write(cdev, M_CAN_TXBAR, (1 << putidx)); cdev->tx_fifo_putidx = (++cdev->tx_fifo_putidx >= cdev->can.echo_skb_max ? 0 : cdev->tx_fifo_putidx); - - /* stop network queue if fifo full */ - if (m_can_tx_fifo_full(cdev) || - m_can_next_echo_skb_occupied(dev, putidx)) - netif_stop_queue(dev); } return NETDEV_TX_OK; @@ -1899,7 +1860,6 @@ static void m_can_tx_queue_skb(struct m_can_classdev *cdev, struct sk_buff *skb) static netdev_tx_t m_can_start_peripheral_xmit(struct m_can_classdev *cdev, struct sk_buff *skb) { - netif_stop_queue(cdev->net); m_can_tx_queue_skb(cdev, skb); return NETDEV_TX_OK; @@ -1909,6 +1869,7 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct m_can_classdev *cdev = netdev_priv(dev); + netdev_tx_t ret; if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; @@ -1918,7 +1879,9 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } - m_can_start_tx(cdev); + ret = m_can_start_tx(cdev); + if (ret != NETDEV_TX_OK) + return ret; if (cdev->is_peripheral) return m_can_start_peripheral_xmit(cdev, skb); From 251f913d19a8a960126359c20bd5719461e5399f Mon Sep 17 00:00:00 2001 From: Markus Schneider-Pargmann Date: Wed, 7 Feb 2024 10:32:19 +0100 Subject: [PATCH 13/14] can: m_can: Implement BQL Implement byte queue limiting in preparation for the use of xmit_more(). Signed-off-by: Markus Schneider-Pargmann Link: https://lore.kernel.org/all/20240207093220.2681425-14-msp@baylibre.com Signed-off-by: Marc Kleine-Budde --- drivers/net/can/m_can/m_can.c | 50 +++++++++++++++++++++++++---------- 1 file changed, 36 insertions(+), 14 deletions(-) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 20595b7141af..48968da69ae9 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -489,6 +489,8 @@ static void m_can_clean(struct net_device *net) for (int i = 0; i != cdev->can.echo_skb_max; ++i) can_free_echo_skb(cdev->net, i, NULL); + netdev_reset_queue(cdev->net); + spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags); cdev->tx_fifo_in_flight = 0; spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags); @@ -1043,29 +1045,34 @@ static int m_can_poll(struct napi_struct *napi, int quota) * echo. timestamp is used for peripherals to ensure correct ordering * by rx-offload, and is ignored for non-peripherals. */ -static void m_can_tx_update_stats(struct m_can_classdev *cdev, - unsigned int msg_mark, - u32 timestamp) +static unsigned int m_can_tx_update_stats(struct m_can_classdev *cdev, + unsigned int msg_mark, u32 timestamp) { struct net_device *dev = cdev->net; struct net_device_stats *stats = &dev->stats; + unsigned int frame_len; if (cdev->is_peripheral) stats->tx_bytes += can_rx_offload_get_echo_skb_queue_timestamp(&cdev->offload, msg_mark, timestamp, - NULL); + &frame_len); else - stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL); + stats->tx_bytes += can_get_echo_skb(dev, msg_mark, &frame_len); stats->tx_packets++; + + return frame_len; } -static void m_can_finish_tx(struct m_can_classdev *cdev, int transmitted) +static void m_can_finish_tx(struct m_can_classdev *cdev, int transmitted, + unsigned int transmitted_frame_len) { unsigned long irqflags; + netdev_completed_queue(cdev->net, transmitted, transmitted_frame_len); + spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags); if (cdev->tx_fifo_in_flight >= cdev->tx_fifo_size && transmitted > 0) netif_wake_queue(cdev->net); @@ -1104,6 +1111,7 @@ static int m_can_echo_tx_event(struct net_device *dev) int err = 0; unsigned int msg_mark; int processed = 0; + unsigned int processed_frame_len = 0; struct m_can_classdev *cdev = netdev_priv(dev); @@ -1132,7 +1140,9 @@ static int m_can_echo_tx_event(struct net_device *dev) fgi = (++fgi >= cdev->mcfg[MRAM_TXE].num ? 0 : fgi); /* update stats */ - m_can_tx_update_stats(cdev, msg_mark, timestamp); + processed_frame_len += m_can_tx_update_stats(cdev, msg_mark, + timestamp); + ++processed; } @@ -1140,7 +1150,7 @@ static int m_can_echo_tx_event(struct net_device *dev) m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK, ack_fgi)); - m_can_finish_tx(cdev, processed); + m_can_finish_tx(cdev, processed, processed_frame_len); return err; } @@ -1218,11 +1228,12 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) if (ir & IR_TC) { /* Transmission Complete Interrupt*/ u32 timestamp = 0; + unsigned int frame_len; if (cdev->is_peripheral) timestamp = m_can_get_timestamp(cdev); - m_can_tx_update_stats(cdev, 0, timestamp); - m_can_finish_tx(cdev, 1); + frame_len = m_can_tx_update_stats(cdev, 0, timestamp); + m_can_finish_tx(cdev, 1, frame_len); } } else { if (ir & (IR_TEFN | IR_TEFW)) { @@ -1738,6 +1749,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev, u32 cccr, fdflags; int err; u32 putidx; + unsigned int frame_len = can_skb_get_frame_len(skb); /* Generate ID field for TX buffer Element */ /* Common to all supported M_CAN versions */ @@ -1783,7 +1795,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev, } m_can_write(cdev, M_CAN_TXBTIE, 0x1); - can_put_echo_skb(skb, dev, 0, 0); + can_put_echo_skb(skb, dev, 0, frame_len); m_can_write(cdev, M_CAN_TXBAR, 0x1); /* End of xmit function for version 3.0.x */ @@ -1821,7 +1833,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev, /* Push loopback echo. * Will be looped back on TX interrupt based on message marker */ - can_put_echo_skb(skb, dev, putidx, 0); + can_put_echo_skb(skb, dev, putidx, frame_len); /* Enable TX FIFO element to start transfer */ m_can_write(cdev, M_CAN_TXBAR, (1 << putidx)); @@ -1869,11 +1881,14 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct m_can_classdev *cdev = netdev_priv(dev); + unsigned int frame_len; netdev_tx_t ret; if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; + frame_len = can_skb_get_frame_len(skb); + if (cdev->can.state == CAN_STATE_BUS_OFF) { m_can_clean(cdev->net); return NETDEV_TX_OK; @@ -1883,10 +1898,17 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, if (ret != NETDEV_TX_OK) return ret; + netdev_sent_queue(dev, frame_len); + if (cdev->is_peripheral) - return m_can_start_peripheral_xmit(cdev, skb); + ret = m_can_start_peripheral_xmit(cdev, skb); else - return m_can_tx_handler(cdev, skb); + ret = m_can_tx_handler(cdev, skb); + + if (ret != NETDEV_TX_OK) + netdev_completed_queue(dev, 1, frame_len); + + return ret; } static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer) From c306c3873de0804bea4bb2ac717bd2c088acc589 Mon Sep 17 00:00:00 2001 From: Markus Schneider-Pargmann Date: Wed, 7 Feb 2024 10:32:20 +0100 Subject: [PATCH 14/14] can: m_can: Implement transmit submission coalescing m_can supports submitting multiple transmits with one register write. This is an interesting option to reduce the number of SPI transfers for peripheral chips. The m_can_tx_op is extended with a bool that signals if it is the last transmission and the submit should be executed immediately. The worker then writes the skb to the FIFO and submits it only if the submit bool is set. If it isn't set, the worker will write the next skb which is waiting in the workqueue to the FIFO, etc. Signed-off-by: Markus Schneider-Pargmann Link: https://lore.kernel.org/all/20240207093220.2681425-15-msp@baylibre.com Signed-off-by: Marc Kleine-Budde --- drivers/net/can/m_can/m_can.c | 56 ++++++++++++++++++++++++++++++++--- drivers/net/can/m_can/m_can.h | 6 ++++ 2 files changed, 58 insertions(+), 4 deletions(-) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 48968da69ae9..b7dbce4c342a 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1539,6 +1539,9 @@ static int m_can_start(struct net_device *dev) if (ret) return ret; + netdev_queue_set_dql_min_limit(netdev_get_tx_queue(cdev->net, 0), + cdev->tx_max_coalesced_frames); + cdev->can.state = CAN_STATE_ERROR_ACTIVE; m_can_enable_all_interrupts(cdev); @@ -1835,8 +1838,13 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev, */ can_put_echo_skb(skb, dev, putidx, frame_len); - /* Enable TX FIFO element to start transfer */ - m_can_write(cdev, M_CAN_TXBAR, (1 << putidx)); + if (cdev->is_peripheral) { + /* Delay enabling TX FIFO element */ + cdev->tx_peripheral_submit |= BIT(putidx); + } else { + /* Enable TX FIFO element to start transfer */ + m_can_write(cdev, M_CAN_TXBAR, BIT(putidx)); + } cdev->tx_fifo_putidx = (++cdev->tx_fifo_putidx >= cdev->can.echo_skb_max ? 0 : cdev->tx_fifo_putidx); } @@ -1849,6 +1857,17 @@ out_fail: return NETDEV_TX_BUSY; } +static void m_can_tx_submit(struct m_can_classdev *cdev) +{ + if (cdev->version == 30) + return; + if (!cdev->is_peripheral) + return; + + m_can_write(cdev, M_CAN_TXBAR, cdev->tx_peripheral_submit); + cdev->tx_peripheral_submit = 0; +} + static void m_can_tx_work_queue(struct work_struct *ws) { struct m_can_tx_op *op = container_of(ws, struct m_can_tx_op, work); @@ -1857,11 +1876,15 @@ static void m_can_tx_work_queue(struct work_struct *ws) op->skb = NULL; m_can_tx_handler(cdev, skb); + if (op->submit) + m_can_tx_submit(cdev); } -static void m_can_tx_queue_skb(struct m_can_classdev *cdev, struct sk_buff *skb) +static void m_can_tx_queue_skb(struct m_can_classdev *cdev, struct sk_buff *skb, + bool submit) { cdev->tx_ops[cdev->next_tx_op].skb = skb; + cdev->tx_ops[cdev->next_tx_op].submit = submit; queue_work(cdev->tx_wq, &cdev->tx_ops[cdev->next_tx_op].work); ++cdev->next_tx_op; @@ -1872,7 +1895,17 @@ static void m_can_tx_queue_skb(struct m_can_classdev *cdev, struct sk_buff *skb) static netdev_tx_t m_can_start_peripheral_xmit(struct m_can_classdev *cdev, struct sk_buff *skb) { - m_can_tx_queue_skb(cdev, skb); + bool submit; + + ++cdev->nr_txs_without_submit; + if (cdev->nr_txs_without_submit >= cdev->tx_max_coalesced_frames || + !netdev_xmit_more()) { + cdev->nr_txs_without_submit = 0; + submit = true; + } else { + submit = false; + } + m_can_tx_queue_skb(cdev, skb, submit); return NETDEV_TX_OK; } @@ -2015,6 +2048,7 @@ static int m_can_get_coalesce(struct net_device *dev, ec->rx_max_coalesced_frames_irq = cdev->rx_max_coalesced_frames_irq; ec->rx_coalesce_usecs_irq = cdev->rx_coalesce_usecs_irq; + ec->tx_max_coalesced_frames = cdev->tx_max_coalesced_frames; ec->tx_max_coalesced_frames_irq = cdev->tx_max_coalesced_frames_irq; ec->tx_coalesce_usecs_irq = cdev->tx_coalesce_usecs_irq; @@ -2059,6 +2093,18 @@ static int m_can_set_coalesce(struct net_device *dev, netdev_err(dev, "tx-frames-irq and tx-usecs-irq can only be set together\n"); return -EINVAL; } + if (ec->tx_max_coalesced_frames > cdev->mcfg[MRAM_TXE].num) { + netdev_err(dev, "tx-frames %u greater than the TX event FIFO %u\n", + ec->tx_max_coalesced_frames, + cdev->mcfg[MRAM_TXE].num); + return -EINVAL; + } + if (ec->tx_max_coalesced_frames > cdev->mcfg[MRAM_TXB].num) { + netdev_err(dev, "tx-frames %u greater than the TX FIFO %u\n", + ec->tx_max_coalesced_frames, + cdev->mcfg[MRAM_TXB].num); + return -EINVAL; + } if (ec->rx_coalesce_usecs_irq != 0 && ec->tx_coalesce_usecs_irq != 0 && ec->rx_coalesce_usecs_irq != ec->tx_coalesce_usecs_irq) { netdev_err(dev, "rx-usecs-irq %u needs to be equal to tx-usecs-irq %u if both are enabled\n", @@ -2069,6 +2115,7 @@ static int m_can_set_coalesce(struct net_device *dev, cdev->rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; cdev->rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; + cdev->tx_max_coalesced_frames = ec->tx_max_coalesced_frames; cdev->tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; cdev->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; @@ -2086,6 +2133,7 @@ static const struct ethtool_ops m_can_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ | ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ | ETHTOOL_COALESCE_TX_USECS_IRQ | + ETHTOOL_COALESCE_TX_MAX_FRAMES | ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, .get_ts_info = ethtool_op_get_ts_info, .get_coalesce = m_can_get_coalesce, diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h index 76b1ce1b7c1b..2986c4ce0b2f 100644 --- a/drivers/net/can/m_can/m_can.h +++ b/drivers/net/can/m_can/m_can.h @@ -74,6 +74,7 @@ struct m_can_tx_op { struct m_can_classdev *cdev; struct work_struct work; struct sk_buff *skb; + bool submit; }; struct m_can_classdev { @@ -102,6 +103,7 @@ struct m_can_classdev { u32 active_interrupts; u32 rx_max_coalesced_frames_irq; u32 rx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames; u32 tx_max_coalesced_frames_irq; u32 tx_coalesce_usecs_irq; @@ -116,6 +118,10 @@ struct m_can_classdev { int tx_fifo_size; int next_tx_op; + int nr_txs_without_submit; + /* bitfield of fifo elements that will be submitted together */ + u32 tx_peripheral_submit; + struct mram_cfg mcfg[MRAM_CFG_NUM]; struct hrtimer hrtimer;