2019-05-09 11:11:05 -05:00
// SPDX-License-Identifier: GPL-2.0
// CAN bus driver for Bosch M_CAN controller
// Copyright (C) 2014 Freescale Semiconductor, Inc.
// Dong Aisheng <b29396@freescale.com>
// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
/* Bosch M_CAN user manual can be obtained from:
2020-12-12 18:55:12 +01:00
* https : //github.com/linux-can/can-doc/tree/master/m_can
2014-07-16 17:30:50 +08:00
*/
2021-03-08 10:24:26 +00:00
# include <linux/bitfield.h>
2022-11-04 10:46:17 +05:30
# include <linux/can/dev.h>
2022-07-27 19:16:32 +09:00
# include <linux/ethtool.h>
2023-07-07 15:47:14 -05:00
# include <linux/hrtimer.h>
2014-07-16 17:30:50 +08:00
# include <linux/interrupt.h>
# include <linux/io.h>
2022-11-04 10:46:17 +05:30
# include <linux/iopoll.h>
2014-07-16 17:30:50 +08:00
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/netdevice.h>
# include <linux/of.h>
# include <linux/of_device.h>
2022-11-04 10:46:17 +05:30
# include <linux/phy/phy.h>
# include <linux/pinctrl/consumer.h>
2014-07-16 17:30:50 +08:00
# include <linux/platform_device.h>
2018-01-16 17:07:13 +05:30
# include <linux/pm_runtime.h>
2014-07-16 17:30:50 +08:00
2019-05-09 11:11:05 -05:00
# include "m_can.h"
2014-07-16 17:30:50 +08:00
/* registers definition */
enum m_can_reg {
M_CAN_CREL = 0x0 ,
M_CAN_ENDN = 0x4 ,
M_CAN_CUST = 0x8 ,
2017-04-08 14:10:12 +02:00
M_CAN_DBTP = 0xc ,
2014-07-16 17:30:50 +08:00
M_CAN_TEST = 0x10 ,
M_CAN_RWD = 0x14 ,
M_CAN_CCCR = 0x18 ,
2017-04-08 14:10:12 +02:00
M_CAN_NBTP = 0x1c ,
2014-07-16 17:30:50 +08:00
M_CAN_TSCC = 0x20 ,
M_CAN_TSCV = 0x24 ,
M_CAN_TOCC = 0x28 ,
M_CAN_TOCV = 0x2c ,
M_CAN_ECR = 0x40 ,
M_CAN_PSR = 0x44 ,
2020-12-12 18:55:13 +01:00
/* TDCR Register only available for version >=3.1.x */
2017-04-08 14:10:12 +02:00
M_CAN_TDCR = 0x48 ,
2014-07-16 17:30:50 +08:00
M_CAN_IR = 0x50 ,
M_CAN_IE = 0x54 ,
M_CAN_ILS = 0x58 ,
M_CAN_ILE = 0x5c ,
M_CAN_GFC = 0x80 ,
M_CAN_SIDFC = 0x84 ,
M_CAN_XIDFC = 0x88 ,
M_CAN_XIDAM = 0x90 ,
M_CAN_HPMS = 0x94 ,
M_CAN_NDAT1 = 0x98 ,
M_CAN_NDAT2 = 0x9c ,
M_CAN_RXF0C = 0xa0 ,
M_CAN_RXF0S = 0xa4 ,
M_CAN_RXF0A = 0xa8 ,
M_CAN_RXBC = 0xac ,
M_CAN_RXF1C = 0xb0 ,
M_CAN_RXF1S = 0xb4 ,
M_CAN_RXF1A = 0xb8 ,
M_CAN_RXESC = 0xbc ,
M_CAN_TXBC = 0xc0 ,
M_CAN_TXFQS = 0xc4 ,
M_CAN_TXESC = 0xc8 ,
M_CAN_TXBRP = 0xcc ,
M_CAN_TXBAR = 0xd0 ,
M_CAN_TXBCR = 0xd4 ,
M_CAN_TXBTO = 0xd8 ,
M_CAN_TXBCF = 0xdc ,
M_CAN_TXBTIE = 0xe0 ,
M_CAN_TXBCIE = 0xe4 ,
M_CAN_TXEFC = 0xf0 ,
M_CAN_TXEFS = 0xf4 ,
M_CAN_TXEFA = 0xf8 ,
} ;
2019-05-09 11:11:05 -05:00
/* message ram configuration data length */
# define MRAM_CFG_LEN 8
2014-07-16 17:30:50 +08:00
2017-04-08 14:10:13 +02:00
/* Core Release Register (CREL) */
2021-05-04 13:51:20 +01:00
# define CREL_REL_MASK GENMASK(31, 28)
# define CREL_STEP_MASK GENMASK(27, 24)
# define CREL_SUBSTEP_MASK GENMASK(23, 20)
2017-04-08 14:10:13 +02:00
2017-04-08 14:10:12 +02:00
/* Data Bit Timing & Prescaler Register (DBTP) */
# define DBTP_TDC BIT(23)
2021-05-04 13:51:20 +01:00
# define DBTP_DBRP_MASK GENMASK(20, 16)
# define DBTP_DTSEG1_MASK GENMASK(12, 8)
# define DBTP_DTSEG2_MASK GENMASK(7, 4)
# define DBTP_DSJW_MASK GENMASK(3, 0)
2014-11-18 19:00:55 +08:00
2018-01-16 17:07:14 +05:30
/* Transmitter Delay Compensation Register (TDCR) */
2021-05-04 13:51:20 +01:00
# define TDCR_TDCO_MASK GENMASK(14, 8)
# define TDCR_TDCF_MASK GENMASK(6, 0)
2018-01-16 17:07:14 +05:30
2014-07-16 17:30:50 +08:00
/* Test Register (TEST) */
2017-04-08 14:10:12 +02:00
# define TEST_LBCK BIT(4)
2014-07-16 17:30:50 +08:00
2021-05-04 13:51:23 +01:00
/* CC Control Register (CCCR) */
2017-04-08 14:10:12 +02:00
# define CCCR_TXP BIT(14)
2014-11-18 19:00:55 +08:00
# define CCCR_TEST BIT(7)
2019-10-21 17:34:40 +05:30
# define CCCR_DAR BIT(6)
2014-11-18 19:00:55 +08:00
# define CCCR_MON BIT(5)
2017-04-08 14:10:12 +02:00
# define CCCR_CSR BIT(4)
# define CCCR_CSA BIT(3)
# define CCCR_ASM BIT(2)
2014-11-18 19:00:55 +08:00
# define CCCR_CCE BIT(1)
# define CCCR_INIT BIT(0)
2021-05-04 13:51:21 +01:00
/* for version 3.0.x */
# define CCCR_CMR_MASK GENMASK(11, 10)
# define CCCR_CMR_CANFD 0x1
# define CCCR_CMR_CANFD_BRS 0x2
# define CCCR_CMR_CAN 0x3
# define CCCR_CME_MASK GENMASK(9, 8)
# define CCCR_CME_CAN 0
# define CCCR_CME_CANFD 0x1
# define CCCR_CME_CANFD_BRS 0x2
2017-04-08 14:10:12 +02:00
/* for version >=3.1.x */
# define CCCR_EFBI BIT(13)
# define CCCR_PXHD BIT(12)
# define CCCR_BRSE BIT(9)
# define CCCR_FDOE BIT(8)
2021-05-04 13:51:21 +01:00
/* for version >=3.2.x */
2017-04-08 14:10:12 +02:00
# define CCCR_NISO BIT(15)
2021-05-04 13:51:21 +01:00
/* for version >=3.3.x */
# define CCCR_WMM BIT(11)
# define CCCR_UTSU BIT(10)
2017-04-08 14:10:12 +02:00
/* Nominal Bit Timing & Prescaler Register (NBTP) */
2021-05-04 13:51:20 +01:00
# define NBTP_NSJW_MASK GENMASK(31, 25)
# define NBTP_NBRP_MASK GENMASK(24, 16)
# define NBTP_NTSEG1_MASK GENMASK(15, 8)
# define NBTP_NTSEG2_MASK GENMASK(6, 0)
2014-07-16 17:30:50 +08:00
2021-03-08 10:24:26 +00:00
/* Timestamp Counter Configuration Register (TSCC) */
# define TSCC_TCP_MASK GENMASK(19, 16)
# define TSCC_TSS_MASK GENMASK(1, 0)
# define TSCC_TSS_DISABLE 0x0
# define TSCC_TSS_INTERNAL 0x1
# define TSCC_TSS_EXTERNAL 0x2
/* Timestamp Counter Value Register (TSCV) */
# define TSCV_TSC_MASK GENMASK(15, 0)
2021-05-04 13:51:23 +01:00
/* Error Counter Register (ECR) */
2014-07-16 17:30:50 +08:00
# define ECR_RP BIT(15)
2021-05-04 13:51:20 +01:00
# define ECR_REC_MASK GENMASK(14, 8)
# define ECR_TEC_MASK GENMASK(7, 0)
2014-07-16 17:30:50 +08:00
2021-05-04 13:51:23 +01:00
/* Protocol Status Register (PSR) */
2014-07-16 17:30:50 +08:00
# define PSR_BO BIT(7)
# define PSR_EW BIT(6)
# define PSR_EP BIT(5)
2021-05-04 13:51:20 +01:00
# define PSR_LEC_MASK GENMASK(2, 0)
2022-10-18 10:03:33 +05:30
# define PSR_DLEC_MASK GENMASK(10, 8)
2014-07-16 17:30:50 +08:00
2021-05-04 13:51:23 +01:00
/* Interrupt Register (IR) */
2014-07-16 17:30:50 +08:00
# define IR_ALL_INT 0xffffffff
2017-04-08 14:10:12 +02:00
/* Renamed bits for versions > 3.1.x */
# define IR_ARA BIT(29)
# define IR_PED BIT(28)
# define IR_PEA BIT(27)
/* Bits for version 3.0.x */
2014-07-16 17:30:50 +08:00
# define IR_STE BIT(31)
# define IR_FOE BIT(30)
# define IR_ACKE BIT(29)
# define IR_BE BIT(28)
# define IR_CRCE BIT(27)
# define IR_WDI BIT(26)
# define IR_BO BIT(25)
# define IR_EW BIT(24)
# define IR_EP BIT(23)
# define IR_ELO BIT(22)
# define IR_BEU BIT(21)
# define IR_BEC BIT(20)
# define IR_DRX BIT(19)
# define IR_TOO BIT(18)
# define IR_MRAF BIT(17)
# define IR_TSW BIT(16)
# define IR_TEFL BIT(15)
# define IR_TEFF BIT(14)
# define IR_TEFW BIT(13)
# define IR_TEFN BIT(12)
# define IR_TFE BIT(11)
# define IR_TCF BIT(10)
# define IR_TC BIT(9)
# define IR_HPM BIT(8)
# define IR_RF1L BIT(7)
# define IR_RF1F BIT(6)
# define IR_RF1W BIT(5)
# define IR_RF1N BIT(4)
# define IR_RF0L BIT(3)
# define IR_RF0F BIT(2)
# define IR_RF0W BIT(1)
# define IR_RF0N BIT(0)
# define IR_ERR_STATE (IR_BO | IR_EW | IR_EP)
2017-04-08 14:10:12 +02:00
/* Interrupts for version 3.0.x */
# define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
2021-11-29 14:26:28 -08:00
# define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \
IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
IR_RF0L )
2017-04-08 14:10:12 +02:00
# define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X)
2021-05-04 13:51:20 +01:00
2017-04-08 14:10:12 +02:00
/* Interrupts for version >= 3.1.x */
# define IR_ERR_LEC_31X (IR_PED | IR_PEA)
2022-10-12 10:46:10 +02:00
# define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \
2021-11-29 14:26:28 -08:00
IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
IR_RF0L )
2017-04-08 14:10:12 +02:00
# define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X)
2014-07-16 17:30:50 +08:00
/* Interrupt Line Select (ILS) */
# define ILS_ALL_INT0 0x0
# define ILS_ALL_INT1 0xFFFFFFFF
/* Interrupt Line Enable (ILE) */
# define ILE_EINT1 BIT(1)
2017-04-08 14:10:12 +02:00
# define ILE_EINT0 BIT(0)
2014-07-16 17:30:50 +08:00
/* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */
2021-05-04 13:51:20 +01:00
# define RXFC_FWM_MASK GENMASK(30, 24)
# define RXFC_FS_MASK GENMASK(22, 16)
2014-07-16 17:30:50 +08:00
/* Rx FIFO 0/1 Status (RXF0S/RXF1S) */
# define RXFS_RFL BIT(25)
# define RXFS_FF BIT(24)
2021-05-04 13:51:20 +01:00
# define RXFS_FPI_MASK GENMASK(21, 16)
# define RXFS_FGI_MASK GENMASK(13, 8)
# define RXFS_FFL_MASK GENMASK(6, 0)
2014-07-16 17:30:50 +08:00
/* Rx Buffer / FIFO Element Size Configuration (RXESC) */
2021-05-04 13:51:22 +01:00
# define RXESC_RBDS_MASK GENMASK(10, 8)
# define RXESC_F1DS_MASK GENMASK(6, 4)
# define RXESC_F0DS_MASK GENMASK(2, 0)
# define RXESC_64B 0x7
2014-07-16 17:30:50 +08:00
2021-05-04 13:51:20 +01:00
/* Tx Buffer Configuration (TXBC) */
# define TXBC_TFQS_MASK GENMASK(29, 24)
# define TXBC_NDTB_MASK GENMASK(21, 16)
2017-04-08 14:10:12 +02:00
/* Tx FIFO/Queue Status (TXFQS) */
# define TXFQS_TFQF BIT(21)
2021-05-04 13:51:20 +01:00
# define TXFQS_TFQPI_MASK GENMASK(20, 16)
# define TXFQS_TFGI_MASK GENMASK(12, 8)
# define TXFQS_TFFL_MASK GENMASK(5, 0)
2014-07-16 17:30:50 +08:00
2021-05-04 13:51:23 +01:00
/* Tx Buffer Element Size Configuration (TXESC) */
2021-05-04 13:51:22 +01:00
# define TXESC_TBDS_MASK GENMASK(2, 0)
# define TXESC_TBDS_64B 0x7
2014-07-16 17:30:50 +08:00
2017-04-08 14:10:12 +02:00
/* Tx Event FIFO Configuration (TXEFC) */
2021-05-04 13:51:20 +01:00
# define TXEFC_EFS_MASK GENMASK(21, 16)
2017-04-08 14:10:12 +02:00
/* Tx Event FIFO Status (TXEFS) */
# define TXEFS_TEFL BIT(25)
# define TXEFS_EFF BIT(24)
2021-05-04 13:51:20 +01:00
# define TXEFS_EFGI_MASK GENMASK(12, 8)
# define TXEFS_EFFL_MASK GENMASK(5, 0)
2017-04-08 14:10:12 +02:00
/* Tx Event FIFO Acknowledge (TXEFA) */
2021-05-04 13:51:20 +01:00
# define TXEFA_EFAI_MASK GENMASK(4, 0)
2014-07-16 17:30:50 +08:00
/* Message RAM Configuration (in bytes) */
# define SIDF_ELEMENT_SIZE 4
# define XIDF_ELEMENT_SIZE 8
2014-11-18 19:00:55 +08:00
# define RXF0_ELEMENT_SIZE 72
# define RXF1_ELEMENT_SIZE 72
2017-04-08 14:10:12 +02:00
# define RXB_ELEMENT_SIZE 72
2014-07-16 17:30:50 +08:00
# define TXE_ELEMENT_SIZE 8
2014-11-18 19:00:55 +08:00
# define TXB_ELEMENT_SIZE 72
2014-07-16 17:30:50 +08:00
/* Message RAM Elements */
# define M_CAN_FIFO_ID 0x0
# define M_CAN_FIFO_DLC 0x4
2021-08-16 22:08:53 -07:00
# define M_CAN_FIFO_DATA 0x8
2014-07-16 17:30:50 +08:00
/* Rx Buffer Element */
2014-11-18 19:00:55 +08:00
/* R0 */
2014-07-16 17:30:50 +08:00
# define RX_BUF_ESI BIT(31)
# define RX_BUF_XTD BIT(30)
# define RX_BUF_RTR BIT(29)
2014-11-18 19:00:55 +08:00
/* R1 */
# define RX_BUF_ANMF BIT(31)
2017-04-08 14:10:12 +02:00
# define RX_BUF_FDF BIT(21)
2014-11-18 19:00:55 +08:00
# define RX_BUF_BRS BIT(20)
2021-03-08 10:24:26 +00:00
# define RX_BUF_RXTS_MASK GENMASK(15, 0)
2014-07-16 17:30:50 +08:00
/* Tx Buffer Element */
2017-04-08 14:10:12 +02:00
/* T0 */
# define TX_BUF_ESI BIT(31)
2014-07-16 17:30:50 +08:00
# define TX_BUF_XTD BIT(30)
# define TX_BUF_RTR BIT(29)
2017-04-08 14:10:12 +02:00
/* T1 */
# define TX_BUF_EFC BIT(23)
# define TX_BUF_FDF BIT(21)
# define TX_BUF_BRS BIT(20)
2021-05-04 13:51:20 +01:00
# define TX_BUF_MM_MASK GENMASK(31, 24)
# define TX_BUF_DLC_MASK GENMASK(19, 16)
2014-07-16 17:30:50 +08:00
2017-04-08 14:10:15 +02:00
/* Tx event FIFO Element */
/* E1 */
2021-05-04 13:51:20 +01:00
# define TX_EVENT_MM_MASK GENMASK(31, 24)
2021-03-08 10:24:26 +00:00
# define TX_EVENT_TXTS_MASK GENMASK(15, 0)
2017-04-08 14:10:15 +02:00
2023-07-07 15:47:14 -05:00
/* Hrtimer polling interval */
# define HRTIMER_POLL_INTERVAL_MS 1
2021-08-16 22:08:52 -07:00
/* The ID and DLC registers are adjacent in M_CAN FIFO memory,
* and we can save a ( potentially slow ) bus round trip by combining
* reads and writes to them .
*/
struct id_and_dlc {
u32 id ;
u32 dlc ;
} ;
2019-05-09 11:11:06 -05:00
static inline u32 m_can_read ( struct m_can_classdev * cdev , enum m_can_reg reg )
2019-05-09 11:11:05 -05:00
{
2019-05-09 11:11:06 -05:00
return cdev - > ops - > read_reg ( cdev , reg ) ;
2019-05-09 11:11:05 -05:00
}
2014-07-16 17:30:50 +08:00
2019-05-09 11:11:06 -05:00
static inline void m_can_write ( struct m_can_classdev * cdev , enum m_can_reg reg ,
2019-05-09 11:11:05 -05:00
u32 val )
2014-07-16 17:30:50 +08:00
{
2019-05-09 11:11:06 -05:00
cdev - > ops - > write_reg ( cdev , reg , val ) ;
2014-07-16 17:30:50 +08:00
}
2021-08-16 22:08:51 -07:00
static int
m_can_fifo_read ( struct m_can_classdev * cdev ,
u32 fgi , unsigned int offset , void * val , size_t val_count )
2014-07-16 17:30:50 +08:00
{
2019-05-09 11:11:06 -05:00
u32 addr_offset = cdev - > mcfg [ MRAM_RXF0 ] . off + fgi * RXF0_ELEMENT_SIZE +
2020-12-12 18:55:13 +01:00
offset ;
2019-05-09 11:11:05 -05:00
2022-01-14 15:35:01 +01:00
if ( val_count = = 0 )
return 0 ;
2021-08-16 22:08:51 -07:00
return cdev - > ops - > read_fifo ( cdev , addr_offset , val , val_count ) ;
2014-07-16 17:30:50 +08:00
}
2021-08-16 22:08:51 -07:00
static int
m_can_fifo_write ( struct m_can_classdev * cdev ,
u32 fpi , unsigned int offset , const void * val , size_t val_count )
2014-07-16 17:30:50 +08:00
{
2019-05-09 11:11:06 -05:00
u32 addr_offset = cdev - > mcfg [ MRAM_TXB ] . off + fpi * TXB_ELEMENT_SIZE +
2020-12-12 18:55:13 +01:00
offset ;
2019-05-09 11:11:05 -05:00
2022-01-14 15:35:01 +01:00
if ( val_count = = 0 )
return 0 ;
2021-08-16 22:08:51 -07:00
return cdev - > ops - > write_fifo ( cdev , addr_offset , val , val_count ) ;
2014-07-16 17:30:50 +08:00
}
2021-08-16 22:08:51 -07:00
static inline int m_can_fifo_write_no_off ( struct m_can_classdev * cdev ,
u32 fpi , u32 val )
2014-07-16 17:30:50 +08:00
{
2021-08-16 22:08:51 -07:00
return cdev - > ops - > write_fifo ( cdev , fpi , & val , 1 ) ;
2014-07-16 17:30:50 +08:00
}
2021-08-16 22:08:51 -07:00
static int
m_can_txe_fifo_read ( struct m_can_classdev * cdev , u32 fgi , u32 offset , u32 * val )
2019-05-09 11:11:05 -05:00
{
2019-05-09 11:11:06 -05:00
u32 addr_offset = cdev - > mcfg [ MRAM_TXE ] . off + fgi * TXE_ELEMENT_SIZE +
2020-12-12 18:55:13 +01:00
offset ;
2019-05-09 11:11:05 -05:00
2021-08-16 22:08:51 -07:00
return cdev - > ops - > read_fifo ( cdev , addr_offset , val , 1 ) ;
2017-04-08 14:10:14 +02:00
}
2022-12-06 12:57:18 +01:00
static inline bool _m_can_tx_fifo_full ( u32 txfqs )
{
return ! ! ( txfqs & TXFQS_TFQF ) ;
}
2019-05-09 11:11:06 -05:00
static inline bool m_can_tx_fifo_full ( struct m_can_classdev * cdev )
2017-04-08 14:10:14 +02:00
{
2022-12-06 12:57:18 +01:00
return _m_can_tx_fifo_full ( m_can_read ( cdev , M_CAN_TXFQS ) ) ;
2017-04-08 14:10:14 +02:00
}
2020-12-12 18:55:15 +01:00
static void m_can_config_endisable ( struct m_can_classdev * cdev , bool enable )
2014-07-16 17:30:50 +08:00
{
2019-05-09 11:11:06 -05:00
u32 cccr = m_can_read ( cdev , M_CAN_CCCR ) ;
2014-07-16 17:30:50 +08:00
u32 timeout = 10 ;
u32 val = 0 ;
2019-05-09 11:11:05 -05:00
/* Clear the Clock stop request if it was set */
if ( cccr & CCCR_CSR )
cccr & = ~ CCCR_CSR ;
2014-07-16 17:30:50 +08:00
if ( enable ) {
/* enable m_can configuration */
2019-05-09 11:11:06 -05:00
m_can_write ( cdev , M_CAN_CCCR , cccr | CCCR_INIT ) ;
2014-10-29 18:45:24 +08:00
udelay ( 5 ) ;
2014-07-16 17:30:50 +08:00
/* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */
2019-05-09 11:11:06 -05:00
m_can_write ( cdev , M_CAN_CCCR , cccr | CCCR_INIT | CCCR_CCE ) ;
2014-07-16 17:30:50 +08:00
} else {
2019-05-09 11:11:06 -05:00
m_can_write ( cdev , M_CAN_CCCR , cccr & ~ ( CCCR_INIT | CCCR_CCE ) ) ;
2014-07-16 17:30:50 +08:00
}
/* there's a delay for module initialization */
if ( enable )
val = CCCR_INIT | CCCR_CCE ;
2019-05-09 11:11:06 -05:00
while ( ( m_can_read ( cdev , M_CAN_CCCR ) & ( CCCR_INIT | CCCR_CCE ) ) ! = val ) {
2014-07-16 17:30:50 +08:00
if ( timeout = = 0 ) {
2019-05-09 11:11:06 -05:00
netdev_warn ( cdev - > net , " Failed to init module \n " ) ;
2014-07-16 17:30:50 +08:00
return ;
}
timeout - - ;
udelay ( 1 ) ;
}
}
2019-05-09 11:11:06 -05:00
static inline void m_can_enable_all_interrupts ( struct m_can_classdev * cdev )
2014-07-16 17:30:50 +08:00
{
2017-04-08 14:10:09 +02:00
/* Only interrupt line 0 is used in this driver */
2019-05-09 11:11:06 -05:00
m_can_write ( cdev , M_CAN_ILE , ILE_EINT0 ) ;
2014-07-16 17:30:50 +08:00
}
2019-05-09 11:11:06 -05:00
static inline void m_can_disable_all_interrupts ( struct m_can_classdev * cdev )
2014-07-16 17:30:50 +08:00
{
2019-05-09 11:11:06 -05:00
m_can_write ( cdev , M_CAN_ILE , 0x0 ) ;
2014-07-16 17:30:50 +08:00
}
2021-03-08 10:24:26 +00:00
/* Retrieve internal timestamp counter from TSCV.TSC, and shift it to 32-bit
* width .
*/
static u32 m_can_get_timestamp ( struct m_can_classdev * cdev )
{
u32 tscv ;
u32 tsc ;
tscv = m_can_read ( cdev , M_CAN_TSCV ) ;
tsc = FIELD_GET ( TSCV_TSC_MASK , tscv ) ;
return ( tsc < < 16 ) ;
}
2019-05-09 11:11:05 -05:00
static void m_can_clean ( struct net_device * net )
{
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = netdev_priv ( net ) ;
2019-05-09 11:11:05 -05:00
2019-05-09 11:11:06 -05:00
if ( cdev - > tx_skb ) {
2019-05-09 11:11:05 -05:00
int putidx = 0 ;
net - > stats . tx_errors + + ;
2019-05-09 11:11:06 -05:00
if ( cdev - > version > 30 )
2021-05-04 13:51:20 +01:00
putidx = FIELD_GET ( TXFQS_TFQPI_MASK ,
m_can_read ( cdev , M_CAN_TXFQS ) ) ;
2019-05-09 11:11:05 -05:00
2021-03-19 15:21:32 +01:00
can_free_echo_skb ( cdev - > net , putidx , NULL ) ;
2019-05-09 11:11:06 -05:00
cdev - > tx_skb = NULL ;
2019-05-09 11:11:05 -05:00
}
}
2021-03-08 10:24:28 +00:00
/* For peripherals, pass skb to rx-offload, which will push skb from
* napi . For non - peripherals , RX is done in napi already , so push
* directly . timestamp is used to ensure good skb ordering in
* rx - offload and is ignored for non - peripherals .
2021-08-19 12:20:03 +02:00
*/
2021-03-08 10:24:28 +00:00
static void m_can_receive_skb ( struct m_can_classdev * cdev ,
struct sk_buff * skb ,
u32 timestamp )
{
2021-04-01 10:37:31 +02:00
if ( cdev - > is_peripheral ) {
struct net_device_stats * stats = & cdev - > net - > stats ;
int err ;
2022-03-14 23:09:10 +01:00
err = can_rx_offload_queue_timestamp ( & cdev - > offload , skb ,
2023-05-23 08:11:16 +02:00
timestamp ) ;
2021-04-01 10:37:31 +02:00
if ( err )
stats - > rx_fifo_errors + + ;
} else {
2021-03-08 10:24:28 +00:00
netif_receive_skb ( skb ) ;
2021-04-01 10:37:31 +02:00
}
2021-03-08 10:24:28 +00:00
}
2022-12-06 12:57:22 +01:00
static int m_can_read_fifo ( struct net_device * dev , u32 fgi )
2014-07-16 17:30:50 +08:00
{
2014-11-18 19:00:55 +08:00
struct net_device_stats * stats = & dev - > stats ;
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
2014-11-18 19:00:55 +08:00
struct canfd_frame * cf ;
struct sk_buff * skb ;
2021-08-16 22:08:52 -07:00
struct id_and_dlc fifo_header ;
2021-03-08 10:24:28 +00:00
u32 timestamp = 0 ;
2021-08-16 22:08:52 -07:00
int err ;
2014-07-16 17:30:50 +08:00
2021-08-16 22:08:52 -07:00
err = m_can_fifo_read ( cdev , fgi , M_CAN_FIFO_ID , & fifo_header , 2 ) ;
2021-08-16 22:08:51 -07:00
if ( err )
goto out_fail ;
2021-08-16 22:08:52 -07:00
if ( fifo_header . dlc & RX_BUF_FDF )
2014-11-18 19:00:55 +08:00
skb = alloc_canfd_skb ( dev , & cf ) ;
else
skb = alloc_can_skb ( dev , ( struct can_frame * * ) & cf ) ;
if ( ! skb ) {
stats - > rx_dropped + + ;
2021-08-16 22:08:51 -07:00
return 0 ;
2014-11-18 19:00:55 +08:00
}
2021-08-16 22:08:52 -07:00
if ( fifo_header . dlc & RX_BUF_FDF )
cf - > len = can_fd_dlc2len ( ( fifo_header . dlc > > 16 ) & 0x0F ) ;
2014-11-18 19:00:55 +08:00
else
2021-08-16 22:08:52 -07:00
cf - > len = can_cc_dlc2len ( ( fifo_header . dlc > > 16 ) & 0x0F ) ;
2014-11-18 19:00:55 +08:00
2021-08-16 22:08:52 -07:00
if ( fifo_header . id & RX_BUF_XTD )
cf - > can_id = ( fifo_header . id & CAN_EFF_MASK ) | CAN_EFF_FLAG ;
2014-07-16 17:30:50 +08:00
else
2021-08-16 22:08:52 -07:00
cf - > can_id = ( fifo_header . id > > 18 ) & CAN_SFF_MASK ;
2014-07-16 17:30:50 +08:00
2021-08-16 22:08:52 -07:00
if ( fifo_header . id & RX_BUF_ESI ) {
2014-11-18 19:00:55 +08:00
cf - > flags | = CANFD_ESI ;
netdev_dbg ( dev , " ESI Error \n " ) ;
}
2014-11-18 19:00:54 +08:00
2021-08-16 22:08:52 -07:00
if ( ! ( fifo_header . dlc & RX_BUF_FDF ) & & ( fifo_header . id & RX_BUF_RTR ) ) {
2014-07-16 17:30:50 +08:00
cf - > can_id | = CAN_RTR_FLAG ;
} else {
2021-08-16 22:08:52 -07:00
if ( fifo_header . dlc & RX_BUF_BRS )
2014-11-18 19:00:55 +08:00
cf - > flags | = CANFD_BRS ;
2021-08-16 22:08:53 -07:00
err = m_can_fifo_read ( cdev , fgi , M_CAN_FIFO_DATA ,
2021-08-16 22:08:52 -07:00
cf - > data , DIV_ROUND_UP ( cf - > len , 4 ) ) ;
if ( err )
2021-11-07 14:07:55 +09:00
goto out_free_skb ;
2021-12-07 21:15:30 +09:00
stats - > rx_bytes + = cf - > len ;
2014-07-16 17:30:50 +08:00
}
2021-12-07 21:15:30 +09:00
stats - > rx_packets + + ;
2014-07-16 17:30:50 +08:00
2022-05-30 19:30:28 +02:00
timestamp = FIELD_GET ( RX_BUF_RXTS_MASK , fifo_header . dlc ) < < 16 ;
2021-03-08 10:24:28 +00:00
m_can_receive_skb ( cdev , skb , timestamp ) ;
2021-08-16 22:08:51 -07:00
return 0 ;
2021-11-07 14:07:55 +09:00
out_free_skb :
kfree_skb ( skb ) ;
2021-08-16 22:08:51 -07:00
out_fail :
netdev_err ( dev , " FIFO read returned %d \n " , err ) ;
return err ;
2014-07-16 17:30:50 +08:00
}
static int m_can_do_rx_poll ( struct net_device * dev , int quota )
{
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
2014-07-16 17:30:50 +08:00
u32 pkts = 0 ;
u32 rxfs ;
2022-12-06 12:57:22 +01:00
u32 rx_count ;
u32 fgi ;
2022-12-06 12:57:24 +01:00
int ack_fgi = - 1 ;
2022-12-06 12:57:22 +01:00
int i ;
2022-12-06 12:57:24 +01:00
int err = 0 ;
2014-07-16 17:30:50 +08:00
2019-05-09 11:11:06 -05:00
rxfs = m_can_read ( cdev , M_CAN_RXF0S ) ;
2014-07-16 17:30:50 +08:00
if ( ! ( rxfs & RXFS_FFL_MASK ) ) {
netdev_dbg ( dev , " no messages in fifo0 \n " ) ;
return 0 ;
}
2022-12-06 12:57:22 +01:00
rx_count = FIELD_GET ( RXFS_FFL_MASK , rxfs ) ;
fgi = FIELD_GET ( RXFS_FGI_MASK , rxfs ) ;
for ( i = 0 ; i < rx_count & & quota > 0 ; + + i ) {
err = m_can_read_fifo ( dev , fgi ) ;
2021-08-16 22:08:51 -07:00
if ( err )
2022-12-06 12:57:24 +01:00
break ;
2014-07-16 17:30:50 +08:00
quota - - ;
pkts + + ;
2022-12-06 12:57:24 +01:00
ack_fgi = fgi ;
2022-12-06 12:57:22 +01:00
fgi = ( + + fgi > = cdev - > mcfg [ MRAM_RXF0 ] . num ? 0 : fgi ) ;
2014-07-16 17:30:50 +08:00
}
2022-12-06 12:57:24 +01:00
if ( ack_fgi ! = - 1 )
m_can_write ( cdev , M_CAN_RXF0A , ack_fgi ) ;
if ( err )
return err ;
2014-07-16 17:30:50 +08:00
return pkts ;
}
static int m_can_handle_lost_msg ( struct net_device * dev )
{
2021-03-08 10:24:28 +00:00
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
2014-07-16 17:30:50 +08:00
struct net_device_stats * stats = & dev - > stats ;
struct sk_buff * skb ;
struct can_frame * frame ;
2021-03-08 10:24:28 +00:00
u32 timestamp = 0 ;
2014-07-16 17:30:50 +08:00
netdev_err ( dev , " msg lost in rxf0 \n " ) ;
stats - > rx_errors + + ;
stats - > rx_over_errors + + ;
skb = alloc_can_err_skb ( dev , & frame ) ;
if ( unlikely ( ! skb ) )
return 0 ;
frame - > can_id | = CAN_ERR_CRTL ;
frame - > data [ 1 ] = CAN_ERR_CRTL_RX_OVERFLOW ;
2021-03-08 10:24:28 +00:00
if ( cdev - > is_peripheral )
timestamp = m_can_get_timestamp ( cdev ) ;
m_can_receive_skb ( cdev , skb , timestamp ) ;
2014-07-16 17:30:50 +08:00
return 1 ;
}
static int m_can_handle_lec_err ( struct net_device * dev ,
enum m_can_lec_type lec_type )
{
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
2014-07-16 17:30:50 +08:00
struct net_device_stats * stats = & dev - > stats ;
struct can_frame * cf ;
struct sk_buff * skb ;
2021-03-08 10:24:28 +00:00
u32 timestamp = 0 ;
2014-07-16 17:30:50 +08:00
2019-05-09 11:11:06 -05:00
cdev - > can . can_stats . bus_error + + ;
2014-07-16 17:30:50 +08:00
stats - > rx_errors + + ;
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb ( dev , & cf ) ;
if ( unlikely ( ! skb ) )
return 0 ;
/* check for 'last error code' which tells us the
* type of the last error to occur on the CAN bus
*/
cf - > can_id | = CAN_ERR_PROT | CAN_ERR_BUSERROR ;
switch ( lec_type ) {
case LEC_STUFF_ERROR :
netdev_dbg ( dev , " stuff error \n " ) ;
cf - > data [ 2 ] | = CAN_ERR_PROT_STUFF ;
break ;
case LEC_FORM_ERROR :
netdev_dbg ( dev , " form error \n " ) ;
cf - > data [ 2 ] | = CAN_ERR_PROT_FORM ;
break ;
case LEC_ACK_ERROR :
netdev_dbg ( dev , " ack error \n " ) ;
2015-11-21 18:41:20 +01:00
cf - > data [ 3 ] = CAN_ERR_PROT_LOC_ACK ;
2014-07-16 17:30:50 +08:00
break ;
case LEC_BIT1_ERROR :
netdev_dbg ( dev , " bit1 error \n " ) ;
cf - > data [ 2 ] | = CAN_ERR_PROT_BIT1 ;
break ;
case LEC_BIT0_ERROR :
netdev_dbg ( dev , " bit0 error \n " ) ;
cf - > data [ 2 ] | = CAN_ERR_PROT_BIT0 ;
break ;
case LEC_CRC_ERROR :
netdev_dbg ( dev , " CRC error \n " ) ;
2015-11-21 18:41:20 +01:00
cf - > data [ 3 ] = CAN_ERR_PROT_LOC_CRC_SEQ ;
2014-07-16 17:30:50 +08:00
break ;
default :
break ;
}
2021-03-08 10:24:28 +00:00
if ( cdev - > is_peripheral )
timestamp = m_can_get_timestamp ( cdev ) ;
m_can_receive_skb ( cdev , skb , timestamp ) ;
2014-07-16 17:30:50 +08:00
return 1 ;
}
2014-10-29 18:45:21 +08:00
static int __m_can_get_berr_counter ( const struct net_device * dev ,
struct can_berr_counter * bec )
{
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
2014-10-29 18:45:21 +08:00
unsigned int ecr ;
2019-05-09 11:11:06 -05:00
ecr = m_can_read ( cdev , M_CAN_ECR ) ;
2021-05-04 13:51:20 +01:00
bec - > rxerr = FIELD_GET ( ECR_REC_MASK , ecr ) ;
bec - > txerr = FIELD_GET ( ECR_TEC_MASK , ecr ) ;
2014-10-29 18:45:21 +08:00
return 0 ;
}
2019-05-09 11:11:06 -05:00
static int m_can_clk_start ( struct m_can_classdev * cdev )
2014-07-16 17:30:50 +08:00
{
2019-05-09 11:11:06 -05:00
if ( cdev - > pm_clock_support = = 0 )
2019-05-09 11:11:05 -05:00
return 0 ;
2020-12-12 18:55:16 +01:00
return pm_runtime_resume_and_get ( cdev - > dev ) ;
2017-05-05 15:50:32 +02:00
}
2014-07-16 17:30:50 +08:00
2019-05-09 11:11:06 -05:00
static void m_can_clk_stop ( struct m_can_classdev * cdev )
2017-05-05 15:50:32 +02:00
{
2019-05-09 11:11:06 -05:00
if ( cdev - > pm_clock_support )
pm_runtime_put_sync ( cdev - > dev ) ;
2017-05-05 15:50:32 +02:00
}
static int m_can_get_berr_counter ( const struct net_device * dev ,
struct can_berr_counter * bec )
{
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
2017-05-05 15:50:32 +02:00
int err ;
2019-05-09 11:11:06 -05:00
err = m_can_clk_start ( cdev ) ;
2017-05-05 15:50:32 +02:00
if ( err )
return err ;
__m_can_get_berr_counter ( dev , bec ) ;
2019-05-09 11:11:06 -05:00
m_can_clk_stop ( cdev ) ;
2014-07-16 17:30:50 +08:00
return 0 ;
}
static int m_can_handle_state_change ( struct net_device * dev ,
enum can_state new_state )
{
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
2014-07-16 17:30:50 +08:00
struct can_frame * cf ;
struct sk_buff * skb ;
struct can_berr_counter bec ;
unsigned int ecr ;
2021-03-08 10:24:28 +00:00
u32 timestamp = 0 ;
2014-07-16 17:30:50 +08:00
switch ( new_state ) {
2020-01-29 10:23:30 +08:00
case CAN_STATE_ERROR_WARNING :
2014-07-16 17:30:50 +08:00
/* error warning state */
2019-05-09 11:11:06 -05:00
cdev - > can . can_stats . error_warning + + ;
cdev - > can . state = CAN_STATE_ERROR_WARNING ;
2014-07-16 17:30:50 +08:00
break ;
case CAN_STATE_ERROR_PASSIVE :
/* error passive state */
2019-05-09 11:11:06 -05:00
cdev - > can . can_stats . error_passive + + ;
cdev - > can . state = CAN_STATE_ERROR_PASSIVE ;
2014-07-16 17:30:50 +08:00
break ;
case CAN_STATE_BUS_OFF :
/* bus-off state */
2019-05-09 11:11:06 -05:00
cdev - > can . state = CAN_STATE_BUS_OFF ;
m_can_disable_all_interrupts ( cdev ) ;
cdev - > can . can_stats . bus_off + + ;
2014-07-16 17:30:50 +08:00
can_bus_off ( dev ) ;
break ;
default :
break ;
}
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb ( dev , & cf ) ;
if ( unlikely ( ! skb ) )
return 0 ;
2014-10-29 18:45:21 +08:00
__m_can_get_berr_counter ( dev , & bec ) ;
2014-07-16 17:30:50 +08:00
switch ( new_state ) {
2020-01-29 10:23:30 +08:00
case CAN_STATE_ERROR_WARNING :
2014-07-16 17:30:50 +08:00
/* error warning state */
2022-07-19 23:35:49 +09:00
cf - > can_id | = CAN_ERR_CRTL | CAN_ERR_CNT ;
2014-07-16 17:30:50 +08:00
cf - > data [ 1 ] = ( bec . txerr > bec . rxerr ) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING ;
cf - > data [ 6 ] = bec . txerr ;
cf - > data [ 7 ] = bec . rxerr ;
break ;
case CAN_STATE_ERROR_PASSIVE :
/* error passive state */
2022-07-19 23:35:49 +09:00
cf - > can_id | = CAN_ERR_CRTL | CAN_ERR_CNT ;
2019-05-09 11:11:06 -05:00
ecr = m_can_read ( cdev , M_CAN_ECR ) ;
2014-07-16 17:30:50 +08:00
if ( ecr & ECR_RP )
cf - > data [ 1 ] | = CAN_ERR_CRTL_RX_PASSIVE ;
if ( bec . txerr > 127 )
cf - > data [ 1 ] | = CAN_ERR_CRTL_TX_PASSIVE ;
cf - > data [ 6 ] = bec . txerr ;
cf - > data [ 7 ] = bec . rxerr ;
break ;
case CAN_STATE_BUS_OFF :
/* bus-off state */
cf - > can_id | = CAN_ERR_BUSOFF ;
break ;
default :
break ;
}
2021-03-08 10:24:28 +00:00
if ( cdev - > is_peripheral )
timestamp = m_can_get_timestamp ( cdev ) ;
m_can_receive_skb ( cdev , skb , timestamp ) ;
2014-07-16 17:30:50 +08:00
return 1 ;
}
static int m_can_handle_state_errors ( struct net_device * dev , u32 psr )
{
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
2014-07-16 17:30:50 +08:00
int work_done = 0 ;
2019-05-09 11:11:06 -05:00
if ( psr & PSR_EW & & cdev - > can . state ! = CAN_STATE_ERROR_WARNING ) {
2014-07-16 17:30:50 +08:00
netdev_dbg ( dev , " entered error warning state \n " ) ;
work_done + = m_can_handle_state_change ( dev ,
CAN_STATE_ERROR_WARNING ) ;
}
2019-05-09 11:11:06 -05:00
if ( psr & PSR_EP & & cdev - > can . state ! = CAN_STATE_ERROR_PASSIVE ) {
2014-10-29 18:45:22 +08:00
netdev_dbg ( dev , " entered error passive state \n " ) ;
2014-07-16 17:30:50 +08:00
work_done + = m_can_handle_state_change ( dev ,
CAN_STATE_ERROR_PASSIVE ) ;
}
2019-05-09 11:11:06 -05:00
if ( psr & PSR_BO & & cdev - > can . state ! = CAN_STATE_BUS_OFF ) {
2014-10-29 18:45:22 +08:00
netdev_dbg ( dev , " entered error bus off state \n " ) ;
2014-07-16 17:30:50 +08:00
work_done + = m_can_handle_state_change ( dev ,
CAN_STATE_BUS_OFF ) ;
}
return work_done ;
}
static void m_can_handle_other_err ( struct net_device * dev , u32 irqstatus )
{
if ( irqstatus & IR_WDI )
netdev_err ( dev , " Message RAM Watchdog event due to missing READY \n " ) ;
if ( irqstatus & IR_BEU )
netdev_err ( dev , " Bit Error Uncorrected \n " ) ;
if ( irqstatus & IR_BEC )
netdev_err ( dev , " Bit Error Corrected \n " ) ;
if ( irqstatus & IR_TOO )
netdev_err ( dev , " Timeout reached \n " ) ;
if ( irqstatus & IR_MRAF )
netdev_err ( dev , " Message RAM access failure occurred \n " ) ;
}
2022-10-12 09:38:42 +02:00
static inline bool is_lec_err ( u8 lec )
2014-07-16 17:30:50 +08:00
{
2022-10-12 09:38:42 +02:00
return lec ! = LEC_NO_ERROR & & lec ! = LEC_NO_CHANGE ;
2014-07-16 17:30:50 +08:00
}
2019-10-30 17:08:59 +05:30
static inline bool m_can_is_protocol_err ( u32 irqstatus )
{
return irqstatus & IR_ERR_LEC_31X ;
}
static int m_can_handle_protocol_error ( struct net_device * dev , u32 irqstatus )
{
struct net_device_stats * stats = & dev - > stats ;
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
struct can_frame * cf ;
struct sk_buff * skb ;
2021-03-08 10:24:28 +00:00
u32 timestamp = 0 ;
2019-10-30 17:08:59 +05:30
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb ( dev , & cf ) ;
/* update tx error stats since there is protocol error */
stats - > tx_errors + + ;
/* update arbitration lost status */
if ( cdev - > version > = 31 & & ( irqstatus & IR_PEA ) ) {
netdev_dbg ( dev , " Protocol error in Arbitration fail \n " ) ;
cdev - > can . can_stats . arbitration_lost + + ;
if ( skb ) {
cf - > can_id | = CAN_ERR_LOSTARB ;
cf - > data [ 0 ] | = CAN_ERR_LOSTARB_UNSPEC ;
}
}
if ( unlikely ( ! skb ) ) {
netdev_dbg ( dev , " allocation of skb failed \n " ) ;
return 0 ;
}
2021-03-08 10:24:28 +00:00
if ( cdev - > is_peripheral )
timestamp = m_can_get_timestamp ( cdev ) ;
m_can_receive_skb ( cdev , skb , timestamp ) ;
2019-10-30 17:08:59 +05:30
return 1 ;
}
2014-07-16 17:30:50 +08:00
static int m_can_handle_bus_errors ( struct net_device * dev , u32 irqstatus ,
u32 psr )
{
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
2014-07-16 17:30:50 +08:00
int work_done = 0 ;
if ( irqstatus & IR_RF0L )
work_done + = m_can_handle_lost_msg ( dev ) ;
/* handle lec errors on the bus */
2022-10-12 09:38:42 +02:00
if ( cdev - > can . ctrlmode & CAN_CTRLMODE_BERR_REPORTING ) {
u8 lec = FIELD_GET ( PSR_LEC_MASK , psr ) ;
2022-10-18 10:03:33 +05:30
u8 dlec = FIELD_GET ( PSR_DLEC_MASK , psr ) ;
2022-10-12 09:38:42 +02:00
2022-10-18 10:03:33 +05:30
if ( is_lec_err ( lec ) ) {
netdev_dbg ( dev , " Arbitration phase error detected \n " ) ;
2022-10-12 09:38:42 +02:00
work_done + = m_can_handle_lec_err ( dev , lec ) ;
2022-10-18 10:03:33 +05:30
}
2023-05-23 08:11:16 +02:00
2022-10-18 10:03:33 +05:30
if ( is_lec_err ( dlec ) ) {
netdev_dbg ( dev , " Data phase error detected \n " ) ;
work_done + = m_can_handle_lec_err ( dev , dlec ) ;
}
2022-10-12 09:38:42 +02:00
}
2014-07-16 17:30:50 +08:00
2019-10-30 17:08:59 +05:30
/* handle protocol errors in arbitration phase */
if ( ( cdev - > can . ctrlmode & CAN_CTRLMODE_BERR_REPORTING ) & &
m_can_is_protocol_err ( irqstatus ) )
work_done + = m_can_handle_protocol_error ( dev , irqstatus ) ;
2014-07-16 17:30:50 +08:00
/* other unproccessed error interrupts */
m_can_handle_other_err ( dev , irqstatus ) ;
return work_done ;
}
2022-12-06 12:57:19 +01:00
static int m_can_rx_handler ( struct net_device * dev , int quota , u32 irqstatus )
2014-07-16 17:30:50 +08:00
{
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
2021-08-16 22:08:51 -07:00
int rx_work_or_err ;
2014-07-16 17:30:50 +08:00
int work_done = 0 ;
if ( ! irqstatus )
goto end ;
2019-03-04 14:44:13 +00:00
/* Errata workaround for issue "Needless activation of MRAF irq"
* During frame reception while the MCAN is in Error Passive state
* and the Receive Error Counter has the value MCAN_ECR . REC = 127 ,
* it may happen that MCAN_IR . MRAF is set although there was no
* Message RAM access failure .
* If MCAN_IR . MRAF is enabled , an interrupt to the Host CPU is generated
* The Message RAM Access Failure interrupt routine needs to check
* whether MCAN_ECR . RP = ’ 1 ’ and MCAN_ECR . REC = 127.
* In this case , reset MCAN_IR . MRAF . No further action is required .
*/
2019-05-09 11:11:06 -05:00
if ( cdev - > version < = 31 & & irqstatus & IR_MRAF & &
m_can_read ( cdev , M_CAN_ECR ) & ECR_RP ) {
2019-03-04 14:44:13 +00:00
struct can_berr_counter bec ;
__m_can_get_berr_counter ( dev , & bec ) ;
if ( bec . rxerr = = 127 ) {
2019-05-09 11:11:06 -05:00
m_can_write ( cdev , M_CAN_IR , IR_MRAF ) ;
2019-03-04 14:44:13 +00:00
irqstatus & = ~ IR_MRAF ;
}
}
2014-07-16 17:30:50 +08:00
if ( irqstatus & IR_ERR_STATE )
2022-12-06 12:57:20 +01:00
work_done + = m_can_handle_state_errors ( dev ,
m_can_read ( cdev , M_CAN_PSR ) ) ;
2014-07-16 17:30:50 +08:00
2017-04-08 14:10:12 +02:00
if ( irqstatus & IR_ERR_BUS_30X )
2022-12-06 12:57:20 +01:00
work_done + = m_can_handle_bus_errors ( dev , irqstatus ,
m_can_read ( cdev , M_CAN_PSR ) ) ;
2014-07-16 17:30:50 +08:00
2021-08-16 22:08:51 -07:00
if ( irqstatus & IR_RF0N ) {
rx_work_or_err = m_can_do_rx_poll ( dev , ( quota - work_done ) ) ;
if ( rx_work_or_err < 0 )
return rx_work_or_err ;
work_done + = rx_work_or_err ;
}
2019-05-09 11:11:05 -05:00
end :
return work_done ;
}
2014-07-16 17:30:50 +08:00
2022-12-06 12:57:19 +01:00
static int m_can_rx_peripheral ( struct net_device * dev , u32 irqstatus )
2019-05-09 11:11:05 -05:00
{
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
2021-08-16 22:08:51 -07:00
int work_done ;
2019-05-09 11:11:05 -05:00
2022-12-06 12:57:19 +01:00
work_done = m_can_rx_handler ( dev , NAPI_POLL_WEIGHT , irqstatus ) ;
2019-05-09 11:11:05 -05:00
2021-08-16 22:08:51 -07:00
/* Don't re-enable interrupts if the driver had a fatal error
* ( e . g . , FIFO read failure ) .
*/
2023-03-15 12:05:35 +01:00
if ( work_done < 0 )
m_can_disable_all_interrupts ( cdev ) ;
2019-05-09 11:11:05 -05:00
2021-08-16 22:08:51 -07:00
return work_done ;
2019-05-09 11:11:05 -05:00
}
static int m_can_poll ( struct napi_struct * napi , int quota )
{
struct net_device * dev = napi - > dev ;
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
2019-05-09 11:11:05 -05:00
int work_done ;
2022-12-06 12:57:19 +01:00
u32 irqstatus ;
irqstatus = cdev - > irqstatus | m_can_read ( cdev , M_CAN_IR ) ;
2019-05-09 11:11:05 -05:00
2022-12-06 12:57:19 +01:00
work_done = m_can_rx_handler ( dev , quota , irqstatus ) ;
2021-08-16 22:08:51 -07:00
/* Don't re-enable interrupts if the driver had a fatal error
* ( e . g . , FIFO read failure ) .
*/
if ( work_done > = 0 & & work_done < quota ) {
2017-01-30 08:22:01 -08:00
napi_complete_done ( napi , work_done ) ;
2019-05-09 11:11:06 -05:00
m_can_enable_all_interrupts ( cdev ) ;
2014-07-16 17:30:50 +08:00
}
return work_done ;
}
2021-03-08 10:24:28 +00:00
/* Echo tx skb and update net stats. Peripherals use rx-offload for
* echo . timestamp is used for peripherals to ensure correct ordering
* by rx - offload , and is ignored for non - peripherals .
2021-08-19 12:20:03 +02:00
*/
2021-03-08 10:24:28 +00:00
static void m_can_tx_update_stats ( struct m_can_classdev * cdev ,
unsigned int msg_mark ,
u32 timestamp )
{
struct net_device * dev = cdev - > net ;
struct net_device_stats * stats = & dev - > stats ;
if ( cdev - > is_peripheral )
stats - > tx_bytes + =
can_rx_offload_get_echo_skb ( & cdev - > offload ,
msg_mark ,
timestamp ,
NULL ) ;
else
stats - > tx_bytes + = can_get_echo_skb ( dev , msg_mark , NULL ) ;
stats - > tx_packets + + ;
}
2021-08-16 22:08:51 -07:00
static int m_can_echo_tx_event ( struct net_device * dev )
2017-04-08 14:10:15 +02:00
{
u32 txe_count = 0 ;
u32 m_can_txefs ;
u32 fgi = 0 ;
2022-12-06 12:57:23 +01:00
int ack_fgi = - 1 ;
2017-04-08 14:10:15 +02:00
int i = 0 ;
2022-12-06 12:57:23 +01:00
int err = 0 ;
2017-04-08 14:10:15 +02:00
unsigned int msg_mark ;
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
2017-04-08 14:10:15 +02:00
/* read tx event fifo status */
2019-05-09 11:11:06 -05:00
m_can_txefs = m_can_read ( cdev , M_CAN_TXEFS ) ;
2017-04-08 14:10:15 +02:00
/* Get Tx Event fifo element count */
2021-05-04 13:51:20 +01:00
txe_count = FIELD_GET ( TXEFS_EFFL_MASK , m_can_txefs ) ;
2022-12-06 12:57:21 +01:00
fgi = FIELD_GET ( TXEFS_EFGI_MASK , m_can_txefs ) ;
2017-04-08 14:10:15 +02:00
/* Get and process all sent elements */
for ( i = 0 ; i < txe_count ; i + + ) {
2021-03-08 10:24:28 +00:00
u32 txe , timestamp = 0 ;
/* get message marker, timestamp */
2021-08-16 22:08:51 -07:00
err = m_can_txe_fifo_read ( cdev , fgi , 4 , & txe ) ;
if ( err ) {
netdev_err ( dev , " TXE FIFO read returned %d \n " , err ) ;
2022-12-06 12:57:23 +01:00
break ;
2021-08-16 22:08:51 -07:00
}
2021-05-04 13:51:20 +01:00
msg_mark = FIELD_GET ( TX_EVENT_MM_MASK , txe ) ;
2022-05-30 19:30:28 +02:00
timestamp = FIELD_GET ( TX_EVENT_TXTS_MASK , txe ) < < 16 ;
2017-04-08 14:10:15 +02:00
2022-12-06 12:57:23 +01:00
ack_fgi = fgi ;
2022-12-06 12:57:21 +01:00
fgi = ( + + fgi > = cdev - > mcfg [ MRAM_TXE ] . num ? 0 : fgi ) ;
2017-04-08 14:10:15 +02:00
/* update stats */
2021-03-08 10:24:28 +00:00
m_can_tx_update_stats ( cdev , msg_mark , timestamp ) ;
2017-04-08 14:10:15 +02:00
}
2021-08-16 22:08:51 -07:00
2022-12-06 12:57:23 +01:00
if ( ack_fgi ! = - 1 )
m_can_write ( cdev , M_CAN_TXEFA , FIELD_PREP ( TXEFA_EFAI_MASK ,
ack_fgi ) ) ;
return err ;
2017-04-08 14:10:15 +02:00
}
2014-07-16 17:30:50 +08:00
static irqreturn_t m_can_isr ( int irq , void * dev_id )
{
struct net_device * dev = ( struct net_device * ) dev_id ;
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
2014-07-16 17:30:50 +08:00
u32 ir ;
2020-09-15 16:47:15 +03:00
if ( pm_runtime_suspended ( cdev - > dev ) )
return IRQ_NONE ;
2019-05-09 11:11:06 -05:00
ir = m_can_read ( cdev , M_CAN_IR ) ;
2014-07-16 17:30:50 +08:00
if ( ! ir )
return IRQ_NONE ;
/* ACK all irqs */
2023-03-15 12:05:32 +01:00
m_can_write ( cdev , M_CAN_IR , ir ) ;
2014-07-16 17:30:50 +08:00
2019-05-09 11:11:06 -05:00
if ( cdev - > ops - > clear_interrupts )
cdev - > ops - > clear_interrupts ( cdev ) ;
2019-05-09 11:11:05 -05:00
2014-07-16 17:30:50 +08:00
/* schedule NAPI in case of
* - rx IRQ
* - state change IRQ
* - bus error IRQ and bus error reporting
*/
2017-04-08 14:10:12 +02:00
if ( ( ir & IR_RF0N ) | | ( ir & IR_ERR_ALL_30X ) ) {
2019-05-09 11:11:06 -05:00
cdev - > irqstatus = ir ;
2023-03-15 12:05:35 +01:00
if ( ! cdev - > is_peripheral ) {
m_can_disable_all_interrupts ( cdev ) ;
2019-05-09 11:11:06 -05:00
napi_schedule ( & cdev - > napi ) ;
2023-03-15 12:05:35 +01:00
} else if ( m_can_rx_peripheral ( dev , ir ) < 0 ) {
2021-08-16 22:08:51 -07:00
goto out_fail ;
2023-03-15 12:05:35 +01:00
}
2014-07-16 17:30:50 +08:00
}
2019-05-09 11:11:06 -05:00
if ( cdev - > version = = 30 ) {
2017-04-08 14:10:15 +02:00
if ( ir & IR_TC ) {
/* Transmission Complete Interrupt*/
2021-03-08 10:24:28 +00:00
u32 timestamp = 0 ;
if ( cdev - > is_peripheral )
timestamp = m_can_get_timestamp ( cdev ) ;
m_can_tx_update_stats ( cdev , 0 , timestamp ) ;
2017-04-08 14:10:15 +02:00
netif_wake_queue ( dev ) ;
}
} else {
if ( ir & IR_TEFN ) {
/* New TX FIFO Element arrived */
2021-08-16 22:08:51 -07:00
if ( m_can_echo_tx_event ( dev ) ! = 0 )
goto out_fail ;
2017-04-08 14:10:15 +02:00
if ( netif_queue_stopped ( dev ) & &
2019-05-09 11:11:06 -05:00
! m_can_tx_fifo_full ( cdev ) )
2017-04-08 14:10:15 +02:00
netif_wake_queue ( dev ) ;
}
2014-07-16 17:30:50 +08:00
}
2019-10-09 06:41:08 +02:00
if ( cdev - > is_peripheral )
2021-05-10 22:51:39 +02:00
can_rx_offload_threaded_irq_finish ( & cdev - > offload ) ;
2019-10-09 06:41:08 +02:00
2014-07-16 17:30:50 +08:00
return IRQ_HANDLED ;
2021-08-16 22:08:51 -07:00
out_fail :
m_can_disable_all_interrupts ( cdev ) ;
return IRQ_HANDLED ;
2014-07-16 17:30:50 +08:00
}
2017-04-08 14:10:13 +02:00
static const struct can_bittiming_const m_can_bittiming_const_30X = {
2014-07-16 17:30:50 +08:00
. name = KBUILD_MODNAME ,
. tseg1_min = 2 , /* Time segment 1 = prop_seg + phase_seg1 */
. tseg1_max = 64 ,
. tseg2_min = 1 , /* Time segment 2 = phase_seg2 */
. tseg2_max = 16 ,
. sjw_max = 16 ,
. brp_min = 1 ,
. brp_max = 1024 ,
. brp_inc = 1 ,
} ;
2017-04-08 14:10:13 +02:00
static const struct can_bittiming_const m_can_data_bittiming_const_30X = {
2014-11-18 19:00:55 +08:00
. name = KBUILD_MODNAME ,
. tseg1_min = 2 , /* Time segment 1 = prop_seg + phase_seg1 */
. tseg1_max = 16 ,
. tseg2_min = 1 , /* Time segment 2 = phase_seg2 */
. tseg2_max = 8 ,
. sjw_max = 4 ,
. brp_min = 1 ,
. brp_max = 32 ,
. brp_inc = 1 ,
} ;
2017-04-08 14:10:13 +02:00
static const struct can_bittiming_const m_can_bittiming_const_31X = {
. name = KBUILD_MODNAME ,
. tseg1_min = 2 , /* Time segment 1 = prop_seg + phase_seg1 */
. tseg1_max = 256 ,
2020-11-24 19:47:38 +01:00
. tseg2_min = 2 , /* Time segment 2 = phase_seg2 */
2017-04-08 14:10:13 +02:00
. tseg2_max = 128 ,
. sjw_max = 128 ,
. brp_min = 1 ,
. brp_max = 512 ,
. brp_inc = 1 ,
} ;
static const struct can_bittiming_const m_can_data_bittiming_const_31X = {
. name = KBUILD_MODNAME ,
. tseg1_min = 1 , /* Time segment 1 = prop_seg + phase_seg1 */
. tseg1_max = 32 ,
. tseg2_min = 1 , /* Time segment 2 = phase_seg2 */
. tseg2_max = 16 ,
. sjw_max = 16 ,
. brp_min = 1 ,
. brp_max = 32 ,
. brp_inc = 1 ,
} ;
2014-07-16 17:30:50 +08:00
static int m_can_set_bittiming ( struct net_device * dev )
{
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
const struct can_bittiming * bt = & cdev - > can . bittiming ;
const struct can_bittiming * dbt = & cdev - > can . data_bittiming ;
2014-07-16 17:30:50 +08:00
u16 brp , sjw , tseg1 , tseg2 ;
u32 reg_btp ;
brp = bt - > brp - 1 ;
sjw = bt - > sjw - 1 ;
tseg1 = bt - > prop_seg + bt - > phase_seg1 - 1 ;
tseg2 = bt - > phase_seg2 - 1 ;
2021-05-04 13:51:20 +01:00
reg_btp = FIELD_PREP ( NBTP_NBRP_MASK , brp ) |
FIELD_PREP ( NBTP_NSJW_MASK , sjw ) |
FIELD_PREP ( NBTP_NTSEG1_MASK , tseg1 ) |
FIELD_PREP ( NBTP_NTSEG2_MASK , tseg2 ) ;
2019-05-09 11:11:06 -05:00
m_can_write ( cdev , M_CAN_NBTP , reg_btp ) ;
2014-11-18 19:00:55 +08:00
2019-05-09 11:11:06 -05:00
if ( cdev - > can . ctrlmode & CAN_CTRLMODE_FD ) {
2018-01-16 17:07:14 +05:30
reg_btp = 0 ;
2014-11-18 19:00:55 +08:00
brp = dbt - > brp - 1 ;
sjw = dbt - > sjw - 1 ;
tseg1 = dbt - > prop_seg + dbt - > phase_seg1 - 1 ;
tseg2 = dbt - > phase_seg2 - 1 ;
2018-01-16 17:07:14 +05:30
/* TDC is only needed for bitrates beyond 2.5 MBit/s.
* This is mentioned in the " Bit Time Requirements for CAN FD "
* paper presented at the International CAN Conference 2013
*/
if ( dbt - > bitrate > 2500000 ) {
u32 tdco , ssp ;
/* Use the same value of secondary sampling point
* as the data sampling point
*/
ssp = dbt - > sample_point ;
/* Equation based on Bosch's M_CAN User Manual's
* Transmitter Delay Compensation Section
*/
2019-05-09 11:11:06 -05:00
tdco = ( cdev - > can . clock . freq / 1000 ) *
2020-12-12 18:55:13 +01:00
ssp / dbt - > bitrate ;
2018-01-16 17:07:14 +05:30
/* Max valid TDCO value is 127 */
if ( tdco > 127 ) {
netdev_warn ( dev , " TDCO value of %u is beyond maximum. Using maximum possible value \n " ,
tdco ) ;
tdco = 127 ;
}
reg_btp | = DBTP_TDC ;
2019-05-09 11:11:06 -05:00
m_can_write ( cdev , M_CAN_TDCR ,
2021-05-04 13:51:20 +01:00
FIELD_PREP ( TDCR_TDCO_MASK , tdco ) ) ;
2018-01-16 17:07:14 +05:30
}
2021-08-09 17:36:52 +00:00
reg_btp | = FIELD_PREP ( DBTP_DBRP_MASK , brp ) |
FIELD_PREP ( DBTP_DSJW_MASK , sjw ) |
FIELD_PREP ( DBTP_DTSEG1_MASK , tseg1 ) |
FIELD_PREP ( DBTP_DTSEG2_MASK , tseg2 ) ;
2018-01-16 17:07:14 +05:30
2019-05-09 11:11:06 -05:00
m_can_write ( cdev , M_CAN_DBTP , reg_btp ) ;
2014-11-18 19:00:55 +08:00
}
2014-07-16 17:30:50 +08:00
return 0 ;
}
/* Configure M_CAN chip:
* - set rx buffer / fifo element size
* - configure rx fifo
* - accept non - matching frame into fifo 0
* - configure tx buffer
2017-04-08 14:10:14 +02:00
* - > = v3 .1 . x : TX FIFO is used
2014-07-16 17:30:50 +08:00
* - configure mode
* - setup bittiming
2021-03-08 10:24:27 +00:00
* - configure timestamp generation
2014-07-16 17:30:50 +08:00
*/
2022-12-07 15:36:31 +05:30
static int m_can_chip_config ( struct net_device * dev )
2014-07-16 17:30:50 +08:00
{
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
2023-03-15 12:05:34 +01:00
u32 interrupts = IR_ALL_INT ;
2014-07-16 17:30:50 +08:00
u32 cccr , test ;
2022-12-07 15:36:31 +05:30
int err ;
err = m_can_init_ram ( cdev ) ;
if ( err ) {
dev_err ( cdev - > dev , " Message RAM configuration failed \n " ) ;
return err ;
}
2014-07-16 17:30:50 +08:00
2023-03-15 12:05:34 +01:00
/* Disable unused interrupts */
interrupts & = ~ ( IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TEFW | IR_TFE |
IR_TCF | IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N |
IR_RF0F | IR_RF0W ) ;
2019-05-09 11:11:06 -05:00
m_can_config_endisable ( cdev , true ) ;
2014-07-16 17:30:50 +08:00
2014-11-18 19:00:55 +08:00
/* RX Buffer/FIFO Element Size 64 bytes data field */
2021-05-04 13:51:22 +01:00
m_can_write ( cdev , M_CAN_RXESC ,
FIELD_PREP ( RXESC_RBDS_MASK , RXESC_64B ) |
FIELD_PREP ( RXESC_F1DS_MASK , RXESC_64B ) |
FIELD_PREP ( RXESC_F0DS_MASK , RXESC_64B ) ) ;
2014-07-16 17:30:50 +08:00
/* Accept Non-matching Frames Into FIFO 0 */
2019-05-09 11:11:06 -05:00
m_can_write ( cdev , M_CAN_GFC , 0x0 ) ;
2014-07-16 17:30:50 +08:00
2019-05-09 11:11:06 -05:00
if ( cdev - > version = = 30 ) {
2017-04-08 14:10:14 +02:00
/* only support one Tx Buffer currently */
2021-05-04 13:51:20 +01:00
m_can_write ( cdev , M_CAN_TXBC , FIELD_PREP ( TXBC_NDTB_MASK , 1 ) |
2020-12-12 18:55:13 +01:00
cdev - > mcfg [ MRAM_TXB ] . off ) ;
2017-04-08 14:10:14 +02:00
} else {
/* TX FIFO is used for newer IP Core versions */
2019-05-09 11:11:06 -05:00
m_can_write ( cdev , M_CAN_TXBC ,
2021-05-04 13:51:20 +01:00
FIELD_PREP ( TXBC_TFQS_MASK ,
cdev - > mcfg [ MRAM_TXB ] . num ) |
cdev - > mcfg [ MRAM_TXB ] . off ) ;
2017-04-08 14:10:14 +02:00
}
2014-07-16 17:30:50 +08:00
2014-11-18 19:00:55 +08:00
/* support 64 bytes payload */
2021-05-04 13:51:22 +01:00
m_can_write ( cdev , M_CAN_TXESC ,
FIELD_PREP ( TXESC_TBDS_MASK , TXESC_TBDS_64B ) ) ;
2014-07-16 17:30:50 +08:00
2017-04-08 14:10:14 +02:00
/* TX Event FIFO */
2019-05-09 11:11:06 -05:00
if ( cdev - > version = = 30 ) {
2021-05-04 13:51:20 +01:00
m_can_write ( cdev , M_CAN_TXEFC ,
FIELD_PREP ( TXEFC_EFS_MASK , 1 ) |
2020-12-12 18:55:13 +01:00
cdev - > mcfg [ MRAM_TXE ] . off ) ;
2017-04-08 14:10:14 +02:00
} else {
/* Full TX Event FIFO is used */
2019-05-09 11:11:06 -05:00
m_can_write ( cdev , M_CAN_TXEFC ,
2021-05-04 13:51:20 +01:00
FIELD_PREP ( TXEFC_EFS_MASK ,
cdev - > mcfg [ MRAM_TXE ] . num ) |
2019-05-09 11:11:06 -05:00
cdev - > mcfg [ MRAM_TXE ] . off ) ;
2017-04-08 14:10:14 +02:00
}
2014-07-16 17:30:50 +08:00
/* rx fifo configuration, blocking mode, fifo size 1 */
2019-05-09 11:11:06 -05:00
m_can_write ( cdev , M_CAN_RXF0C ,
2021-05-04 13:51:20 +01:00
FIELD_PREP ( RXFC_FS_MASK , cdev - > mcfg [ MRAM_RXF0 ] . num ) |
2020-12-12 18:55:13 +01:00
cdev - > mcfg [ MRAM_RXF0 ] . off ) ;
2014-07-16 17:30:50 +08:00
2019-05-09 11:11:06 -05:00
m_can_write ( cdev , M_CAN_RXF1C ,
2021-05-04 13:51:20 +01:00
FIELD_PREP ( RXFC_FS_MASK , cdev - > mcfg [ MRAM_RXF1 ] . num ) |
2020-12-12 18:55:13 +01:00
cdev - > mcfg [ MRAM_RXF1 ] . off ) ;
2014-07-16 17:30:50 +08:00
2019-05-09 11:11:06 -05:00
cccr = m_can_read ( cdev , M_CAN_CCCR ) ;
test = m_can_read ( cdev , M_CAN_TEST ) ;
2014-07-16 17:30:50 +08:00
test & = ~ TEST_LBCK ;
2019-05-09 11:11:06 -05:00
if ( cdev - > version = = 30 ) {
2020-12-12 18:55:13 +01:00
/* Version 3.0.x */
2014-07-16 17:30:50 +08:00
2019-10-21 17:34:40 +05:30
cccr & = ~ ( CCCR_TEST | CCCR_MON | CCCR_DAR |
2021-05-04 13:51:20 +01:00
FIELD_PREP ( CCCR_CMR_MASK , FIELD_MAX ( CCCR_CMR_MASK ) ) |
FIELD_PREP ( CCCR_CME_MASK , FIELD_MAX ( CCCR_CME_MASK ) ) ) ;
2017-04-08 14:10:13 +02:00
2019-05-09 11:11:06 -05:00
if ( cdev - > can . ctrlmode & CAN_CTRLMODE_FD )
2021-05-04 13:51:20 +01:00
cccr | = FIELD_PREP ( CCCR_CME_MASK , CCCR_CME_CANFD_BRS ) ;
2017-04-08 14:10:13 +02:00
} else {
2020-12-12 18:55:13 +01:00
/* Version 3.1.x or 3.2.x */
2018-07-11 15:36:14 +02:00
cccr & = ~ ( CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
2019-10-21 17:34:40 +05:30
CCCR_NISO | CCCR_DAR ) ;
2017-04-08 14:10:13 +02:00
/* Only 3.2.x has NISO Bit implemented */
2019-05-09 11:11:06 -05:00
if ( cdev - > can . ctrlmode & CAN_CTRLMODE_FD_NON_ISO )
2017-04-08 14:10:13 +02:00
cccr | = CCCR_NISO ;
2019-05-09 11:11:06 -05:00
if ( cdev - > can . ctrlmode & CAN_CTRLMODE_FD )
2017-04-08 14:10:13 +02:00
cccr | = ( CCCR_BRSE | CCCR_FDOE ) ;
}
2014-07-16 17:30:50 +08:00
2017-04-08 14:10:13 +02:00
/* Loopback Mode */
2019-05-09 11:11:06 -05:00
if ( cdev - > can . ctrlmode & CAN_CTRLMODE_LOOPBACK ) {
2017-04-08 14:10:13 +02:00
cccr | = CCCR_TEST | CCCR_MON ;
2014-07-16 17:30:50 +08:00
test | = TEST_LBCK ;
}
2017-04-08 14:10:13 +02:00
/* Enable Monitoring (all versions) */
2019-05-09 11:11:06 -05:00
if ( cdev - > can . ctrlmode & CAN_CTRLMODE_LISTENONLY )
2017-04-08 14:10:13 +02:00
cccr | = CCCR_MON ;
2014-11-18 19:00:55 +08:00
2019-10-21 17:34:40 +05:30
/* Disable Auto Retransmission (all versions) */
if ( cdev - > can . ctrlmode & CAN_CTRLMODE_ONE_SHOT )
cccr | = CCCR_DAR ;
2017-04-08 14:10:13 +02:00
/* Write config */
2019-05-09 11:11:06 -05:00
m_can_write ( cdev , M_CAN_CCCR , cccr ) ;
m_can_write ( cdev , M_CAN_TEST , test ) ;
2014-07-16 17:30:50 +08:00
2017-04-08 14:10:13 +02:00
/* Enable interrupts */
2023-03-15 12:05:34 +01:00
if ( ! ( cdev - > can . ctrlmode & CAN_CTRLMODE_BERR_REPORTING ) ) {
2019-05-09 11:11:06 -05:00
if ( cdev - > version = = 30 )
2023-03-15 12:05:34 +01:00
interrupts & = ~ ( IR_ERR_LEC_30X ) ;
2017-04-08 14:10:13 +02:00
else
2023-03-15 12:05:34 +01:00
interrupts & = ~ ( IR_ERR_LEC_31X ) ;
}
m_can_write ( cdev , M_CAN_IE , interrupts ) ;
2014-07-16 17:30:50 +08:00
/* route all interrupts to INT0 */
2019-05-09 11:11:06 -05:00
m_can_write ( cdev , M_CAN_ILS , ILS_ALL_INT0 ) ;
2014-07-16 17:30:50 +08:00
/* set bittiming params */
m_can_set_bittiming ( dev ) ;
2022-06-09 13:10:53 +02:00
/* enable internal timestamp generation, with a prescaler of 16. The
* prescaler is applied to the nominal bit timing
2021-08-19 12:20:03 +02:00
*/
2022-05-23 17:18:33 +02:00
m_can_write ( cdev , M_CAN_TSCC ,
FIELD_PREP ( TSCC_TCP_MASK , 0xf ) |
FIELD_PREP ( TSCC_TSS_MASK , TSCC_TSS_INTERNAL ) ) ;
2021-03-08 10:24:27 +00:00
2019-05-09 11:11:06 -05:00
m_can_config_endisable ( cdev , false ) ;
2019-05-09 11:11:05 -05:00
2019-05-09 11:11:06 -05:00
if ( cdev - > ops - > init )
cdev - > ops - > init ( cdev ) ;
2022-12-07 15:36:31 +05:30
return 0 ;
2014-07-16 17:30:50 +08:00
}
2022-12-07 15:36:31 +05:30
static int m_can_start ( struct net_device * dev )
2014-07-16 17:30:50 +08:00
{
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
2022-12-07 15:36:31 +05:30
int ret ;
2014-07-16 17:30:50 +08:00
/* basic m_can configuration */
2022-12-07 15:36:31 +05:30
ret = m_can_chip_config ( dev ) ;
if ( ret )
return ret ;
2014-07-16 17:30:50 +08:00
2019-05-09 11:11:06 -05:00
cdev - > can . state = CAN_STATE_ERROR_ACTIVE ;
2014-07-16 17:30:50 +08:00
2019-05-09 11:11:06 -05:00
m_can_enable_all_interrupts ( cdev ) ;
2022-12-07 15:36:31 +05:30
2023-07-07 15:47:14 -05:00
if ( ! dev - > irq ) {
dev_dbg ( cdev - > dev , " Start hrtimer \n " ) ;
hrtimer_start ( & cdev - > hrtimer , ms_to_ktime ( HRTIMER_POLL_INTERVAL_MS ) ,
HRTIMER_MODE_REL_PINNED ) ;
}
2022-12-07 15:36:31 +05:30
return 0 ;
2014-07-16 17:30:50 +08:00
}
static int m_can_set_mode ( struct net_device * dev , enum can_mode mode )
{
switch ( mode ) {
case CAN_MODE_START :
2019-05-09 11:11:05 -05:00
m_can_clean ( dev ) ;
2014-07-16 17:30:50 +08:00
m_can_start ( dev ) ;
netif_wake_queue ( dev ) ;
break ;
default :
return - EOPNOTSUPP ;
}
return 0 ;
}
2017-04-08 14:10:13 +02:00
/* Checks core release number of M_CAN
* returns 0 if an unsupported device is detected
* else it returns the release and step coded as :
* return value = 10 * < release > + 1 * < step >
*/
2019-05-09 11:11:06 -05:00
static int m_can_check_core_release ( struct m_can_classdev * cdev )
2017-04-08 14:10:13 +02:00
{
u32 crel_reg ;
u8 rel ;
u8 step ;
int res ;
/* Read Core Release Version and split into version number
* Example : Version 3.2 .1 = > rel = 3 ; step = 2 ; substep = 1 ;
*/
2019-05-09 11:11:06 -05:00
crel_reg = m_can_read ( cdev , M_CAN_CREL ) ;
2021-05-04 13:51:20 +01:00
rel = ( u8 ) FIELD_GET ( CREL_REL_MASK , crel_reg ) ;
step = ( u8 ) FIELD_GET ( CREL_STEP_MASK , crel_reg ) ;
2017-04-08 14:10:13 +02:00
if ( rel = = 3 ) {
/* M_CAN v3.x.y: create return value */
res = 30 + step ;
} else {
/* Unsupported M_CAN version */
res = 0 ;
}
return res ;
}
/* Selectable Non ISO support only in version 3.2.x
* This function checks if the bit is writable .
*/
2019-05-09 11:11:06 -05:00
static bool m_can_niso_supported ( struct m_can_classdev * cdev )
2017-04-08 14:10:13 +02:00
{
2019-05-09 11:11:05 -05:00
u32 cccr_reg , cccr_poll = 0 ;
int niso_timeout = - ETIMEDOUT ;
int i ;
2017-04-08 14:10:13 +02:00
2019-05-09 11:11:06 -05:00
m_can_config_endisable ( cdev , true ) ;
cccr_reg = m_can_read ( cdev , M_CAN_CCCR ) ;
2017-04-08 14:10:13 +02:00
cccr_reg | = CCCR_NISO ;
2019-05-09 11:11:06 -05:00
m_can_write ( cdev , M_CAN_CCCR , cccr_reg ) ;
2017-04-08 14:10:13 +02:00
2019-05-09 11:11:05 -05:00
for ( i = 0 ; i < = 10 ; i + + ) {
2019-05-09 11:11:06 -05:00
cccr_poll = m_can_read ( cdev , M_CAN_CCCR ) ;
2019-05-09 11:11:05 -05:00
if ( cccr_poll = = cccr_reg ) {
niso_timeout = 0 ;
break ;
}
usleep_range ( 1 , 5 ) ;
}
2017-04-08 14:10:13 +02:00
/* Clear NISO */
cccr_reg & = ~ ( CCCR_NISO ) ;
2019-05-09 11:11:06 -05:00
m_can_write ( cdev , M_CAN_CCCR , cccr_reg ) ;
2017-04-08 14:10:13 +02:00
2019-05-09 11:11:06 -05:00
m_can_config_endisable ( cdev , false ) ;
2017-04-08 14:10:13 +02:00
/* return false if time out (-ETIMEDOUT), else return true */
return ! niso_timeout ;
}
2020-12-12 18:55:14 +01:00
static int m_can_dev_setup ( struct m_can_classdev * cdev )
2014-07-16 17:30:50 +08:00
{
2020-12-12 18:55:14 +01:00
struct net_device * dev = cdev - > net ;
2021-12-14 01:02:24 +09:00
int m_can_version , err ;
2017-04-08 14:10:13 +02:00
2020-12-12 18:55:14 +01:00
m_can_version = m_can_check_core_release ( cdev ) ;
2017-04-08 14:10:13 +02:00
/* return if unsupported version */
if ( ! m_can_version ) {
2020-12-12 18:55:14 +01:00
dev_err ( cdev - > dev , " Unsupported version number: %2d " ,
2018-01-16 17:07:12 +05:30
m_can_version ) ;
return - EINVAL ;
2017-04-08 14:10:13 +02:00
}
2014-07-16 17:30:50 +08:00
2020-12-12 18:55:14 +01:00
if ( ! cdev - > is_peripheral )
2022-09-27 06:27:53 -07:00
netif_napi_add ( dev , & cdev - > napi , m_can_poll ) ;
2014-07-16 17:30:50 +08:00
2017-04-08 14:10:13 +02:00
/* Shared properties of all M_CAN versions */
2020-12-12 18:55:14 +01:00
cdev - > version = m_can_version ;
cdev - > can . do_set_mode = m_can_set_mode ;
cdev - > can . do_get_berr_counter = m_can_get_berr_counter ;
2015-01-05 19:47:43 +01:00
2017-04-08 14:10:13 +02:00
/* Set M_CAN supported operations */
2020-12-12 18:55:14 +01:00
cdev - > can . ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
2020-12-12 18:55:13 +01:00
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_FD |
CAN_CTRLMODE_ONE_SHOT ;
2014-07-16 17:30:50 +08:00
2017-04-08 14:10:13 +02:00
/* Set properties depending on M_CAN version */
2020-12-12 18:55:14 +01:00
switch ( cdev - > version ) {
2017-04-08 14:10:13 +02:00
case 30 :
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */
2021-12-14 01:02:24 +09:00
err = can_set_static_ctrlmode ( dev , CAN_CTRLMODE_FD_NON_ISO ) ;
if ( err )
return err ;
2022-05-12 15:41:44 +03:00
cdev - > can . bittiming_const = & m_can_bittiming_const_30X ;
cdev - > can . data_bittiming_const = & m_can_data_bittiming_const_30X ;
2017-04-08 14:10:13 +02:00
break ;
case 31 :
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
2021-12-14 01:02:24 +09:00
err = can_set_static_ctrlmode ( dev , CAN_CTRLMODE_FD_NON_ISO ) ;
if ( err )
return err ;
2022-05-12 15:41:44 +03:00
cdev - > can . bittiming_const = & m_can_bittiming_const_31X ;
cdev - > can . data_bittiming_const = & m_can_data_bittiming_const_31X ;
2017-04-08 14:10:13 +02:00
break ;
case 32 :
2020-11-26 10:21:42 +05:30
case 33 :
/* Support both MCAN version v3.2.x and v3.3.0 */
2022-05-12 15:41:44 +03:00
cdev - > can . bittiming_const = & m_can_bittiming_const_31X ;
cdev - > can . data_bittiming_const = & m_can_data_bittiming_const_31X ;
2019-05-09 11:11:05 -05:00
2020-12-12 18:55:14 +01:00
cdev - > can . ctrlmode_supported | =
( m_can_niso_supported ( cdev ) ?
2020-12-12 18:55:13 +01:00
CAN_CTRLMODE_FD_NON_ISO : 0 ) ;
2017-04-08 14:10:13 +02:00
break ;
default :
2020-12-12 18:55:14 +01:00
dev_err ( cdev - > dev , " Unsupported version number: %2d " ,
cdev - > version ) ;
2018-01-16 17:07:12 +05:30
return - EINVAL ;
2017-04-08 14:10:13 +02:00
}
2020-12-12 18:55:14 +01:00
if ( cdev - > ops - > init )
cdev - > ops - > init ( cdev ) ;
2014-07-16 17:30:50 +08:00
return 0 ;
}
static void m_can_stop ( struct net_device * dev )
{
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
2014-07-16 17:30:50 +08:00
2023-07-07 15:47:14 -05:00
if ( ! dev - > irq ) {
dev_dbg ( cdev - > dev , " Stop hrtimer \n " ) ;
hrtimer_cancel ( & cdev - > hrtimer ) ;
}
2014-07-16 17:30:50 +08:00
/* disable all interrupts */
2019-05-09 11:11:06 -05:00
m_can_disable_all_interrupts ( cdev ) ;
2014-07-16 17:30:50 +08:00
2020-08-25 11:24:42 +05:30
/* Set init mode to disengage from the network */
m_can_config_endisable ( cdev , true ) ;
2014-07-16 17:30:50 +08:00
/* set the state as STOPPED */
2019-05-09 11:11:06 -05:00
cdev - > can . state = CAN_STATE_STOPPED ;
2014-07-16 17:30:50 +08:00
}
static int m_can_close ( struct net_device * dev )
{
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
2014-07-16 17:30:50 +08:00
netif_stop_queue ( dev ) ;
2019-05-09 11:11:06 -05:00
if ( ! cdev - > is_peripheral )
napi_disable ( & cdev - > napi ) ;
2014-07-16 17:30:50 +08:00
m_can_stop ( dev ) ;
2019-05-09 11:11:06 -05:00
m_can_clk_stop ( cdev ) ;
2014-07-16 17:30:50 +08:00
free_irq ( dev - > irq , dev ) ;
2019-05-09 11:11:05 -05:00
2019-05-09 11:11:06 -05:00
if ( cdev - > is_peripheral ) {
cdev - > tx_skb = NULL ;
destroy_workqueue ( cdev - > tx_wq ) ;
cdev - > tx_wq = NULL ;
2021-03-08 10:24:28 +00:00
can_rx_offload_disable ( & cdev - > offload ) ;
2023-03-15 12:05:31 +01:00
}
2021-03-08 10:24:28 +00:00
2014-07-16 17:30:50 +08:00
close_candev ( dev ) ;
2021-05-10 10:55:41 +05:30
phy_power_off ( cdev - > transceiver ) ;
2014-07-16 17:30:50 +08:00
return 0 ;
}
2017-04-08 14:10:15 +02:00
static int m_can_next_echo_skb_occupied ( struct net_device * dev , int putidx )
{
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
2017-04-08 14:10:15 +02:00
/*get wrap around for loopback skb index */
2019-05-09 11:11:06 -05:00
unsigned int wrap = cdev - > can . echo_skb_max ;
2017-04-08 14:10:15 +02:00
int next_idx ;
/* calculate next index */
next_idx = ( + + putidx > = wrap ? 0 : putidx ) ;
/* check if occupied */
2019-05-09 11:11:06 -05:00
return ! ! cdev - > can . echo_skb [ next_idx ] ;
2017-04-08 14:10:15 +02:00
}
2019-05-09 11:11:06 -05:00
static netdev_tx_t m_can_tx_handler ( struct m_can_classdev * cdev )
2014-07-16 17:30:50 +08:00
{
2019-05-09 11:11:06 -05:00
struct canfd_frame * cf = ( struct canfd_frame * ) cdev - > tx_skb - > data ;
struct net_device * dev = cdev - > net ;
struct sk_buff * skb = cdev - > tx_skb ;
2021-08-16 22:08:53 -07:00
struct id_and_dlc fifo_header ;
u32 cccr , fdflags ;
2022-12-06 12:57:18 +01:00
u32 txfqs ;
2021-08-16 22:08:53 -07:00
int err ;
2017-04-08 14:10:15 +02:00
int putidx ;
2014-07-16 17:30:50 +08:00
2021-05-05 13:32:27 +02:00
cdev - > tx_skb = NULL ;
2017-04-08 14:10:15 +02:00
/* Generate ID field for TX buffer Element */
/* Common to all supported M_CAN versions */
2014-07-16 17:30:50 +08:00
if ( cf - > can_id & CAN_EFF_FLAG ) {
2021-08-16 22:08:53 -07:00
fifo_header . id = cf - > can_id & CAN_EFF_MASK ;
fifo_header . id | = TX_BUF_XTD ;
2014-07-16 17:30:50 +08:00
} else {
2021-08-16 22:08:53 -07:00
fifo_header . id = ( ( cf - > can_id & CAN_SFF_MASK ) < < 18 ) ;
2014-07-16 17:30:50 +08:00
}
if ( cf - > can_id & CAN_RTR_FLAG )
2021-08-16 22:08:53 -07:00
fifo_header . id | = TX_BUF_RTR ;
2014-07-16 17:30:50 +08:00
2019-05-09 11:11:06 -05:00
if ( cdev - > version = = 30 ) {
2017-04-08 14:10:15 +02:00
netif_stop_queue ( dev ) ;
2021-08-16 22:08:53 -07:00
fifo_header . dlc = can_fd_len2dlc ( cf - > len ) < < 16 ;
/* Write the frame ID, DLC, and payload to the FIFO element. */
err = m_can_fifo_write ( cdev , 0 , M_CAN_FIFO_ID , & fifo_header , 2 ) ;
2021-08-16 22:08:51 -07:00
if ( err )
goto out_fail ;
2021-08-16 22:08:53 -07:00
err = m_can_fifo_write ( cdev , 0 , M_CAN_FIFO_DATA ,
cf - > data , DIV_ROUND_UP ( cf - > len , 4 ) ) ;
2021-08-16 22:08:51 -07:00
if ( err )
goto out_fail ;
2014-11-18 19:00:55 +08:00
2019-05-09 11:11:06 -05:00
if ( cdev - > can . ctrlmode & CAN_CTRLMODE_FD ) {
cccr = m_can_read ( cdev , M_CAN_CCCR ) ;
2021-05-04 13:51:20 +01:00
cccr & = ~ CCCR_CMR_MASK ;
2017-04-08 14:10:15 +02:00
if ( can_is_canfd_skb ( skb ) ) {
if ( cf - > flags & CANFD_BRS )
2021-05-04 13:51:20 +01:00
cccr | = FIELD_PREP ( CCCR_CMR_MASK ,
CCCR_CMR_CANFD_BRS ) ;
2017-04-08 14:10:15 +02:00
else
2021-05-04 13:51:20 +01:00
cccr | = FIELD_PREP ( CCCR_CMR_MASK ,
CCCR_CMR_CANFD ) ;
2017-04-08 14:10:15 +02:00
} else {
2021-05-04 13:51:20 +01:00
cccr | = FIELD_PREP ( CCCR_CMR_MASK , CCCR_CMR_CAN ) ;
2017-04-08 14:10:15 +02:00
}
2019-05-09 11:11:06 -05:00
m_can_write ( cdev , M_CAN_CCCR , cccr ) ;
2017-04-08 14:10:15 +02:00
}
2019-05-09 11:11:06 -05:00
m_can_write ( cdev , M_CAN_TXBTIE , 0x1 ) ;
2022-03-17 08:57:35 +01:00
can_put_echo_skb ( skb , dev , 0 , 0 ) ;
2019-05-09 11:11:06 -05:00
m_can_write ( cdev , M_CAN_TXBAR , 0x1 ) ;
2017-04-08 14:10:15 +02:00
/* End of xmit function for version 3.0.x */
} else {
/* Transmit routine for version >= v3.1.x */
2022-12-06 12:57:18 +01:00
txfqs = m_can_read ( cdev , M_CAN_TXFQS ) ;
2017-04-08 14:10:15 +02:00
/* Check if FIFO full */
2022-12-06 12:57:18 +01:00
if ( _m_can_tx_fifo_full ( txfqs ) ) {
2017-04-08 14:10:15 +02:00
/* This shouldn't happen */
netif_stop_queue ( dev ) ;
netdev_warn ( dev ,
" TX queue active although FIFO is full. " ) ;
2019-05-09 11:11:06 -05:00
if ( cdev - > is_peripheral ) {
2019-05-09 11:11:05 -05:00
kfree_skb ( skb ) ;
dev - > stats . tx_dropped + + ;
return NETDEV_TX_OK ;
} else {
return NETDEV_TX_BUSY ;
}
2017-04-08 14:10:15 +02:00
}
2014-11-18 19:00:55 +08:00
2017-04-08 14:10:15 +02:00
/* get put index for frame */
2022-12-06 12:57:18 +01:00
putidx = FIELD_GET ( TXFQS_TFQPI_MASK , txfqs ) ;
2021-08-16 22:08:53 -07:00
/* Construct DLC Field, with CAN-FD configuration.
* Use the put index of the fifo as the message marker ,
* used in the TX interrupt for sending the correct echo frame .
*/
2014-07-16 17:30:50 +08:00
2017-04-08 14:10:15 +02:00
/* get CAN FD configuration of frame */
fdflags = 0 ;
2014-11-18 19:00:55 +08:00
if ( can_is_canfd_skb ( skb ) ) {
2017-04-08 14:10:15 +02:00
fdflags | = TX_BUF_FDF ;
2014-11-18 19:00:55 +08:00
if ( cf - > flags & CANFD_BRS )
2017-04-08 14:10:15 +02:00
fdflags | = TX_BUF_BRS ;
2014-11-18 19:00:55 +08:00
}
2021-08-16 22:08:53 -07:00
fifo_header . dlc = FIELD_PREP ( TX_BUF_MM_MASK , putidx ) |
2021-08-16 22:08:51 -07:00
FIELD_PREP ( TX_BUF_DLC_MASK , can_fd_len2dlc ( cf - > len ) ) |
fdflags | TX_BUF_EFC ;
2021-08-16 22:08:53 -07:00
err = m_can_fifo_write ( cdev , putidx , M_CAN_FIFO_ID , & fifo_header , 2 ) ;
2021-08-16 22:08:51 -07:00
if ( err )
goto out_fail ;
2017-04-08 14:10:15 +02:00
2021-08-16 22:08:53 -07:00
err = m_can_fifo_write ( cdev , putidx , M_CAN_FIFO_DATA ,
cf - > data , DIV_ROUND_UP ( cf - > len , 4 ) ) ;
if ( err )
goto out_fail ;
2017-04-08 14:10:15 +02:00
/* Push loopback echo.
* Will be looped back on TX interrupt based on message marker
*/
2021-01-11 15:19:27 +01:00
can_put_echo_skb ( skb , dev , putidx , 0 ) ;
2017-04-08 14:10:15 +02:00
/* Enable TX FIFO element to start transfer */
2019-05-09 11:11:06 -05:00
m_can_write ( cdev , M_CAN_TXBAR , ( 1 < < putidx ) ) ;
2017-04-08 14:10:15 +02:00
/* stop network queue if fifo full */
2019-05-09 11:11:06 -05:00
if ( m_can_tx_fifo_full ( cdev ) | |
2019-05-09 11:11:05 -05:00
m_can_next_echo_skb_occupied ( dev , putidx ) )
netif_stop_queue ( dev ) ;
2017-04-08 14:10:15 +02:00
}
2014-07-16 17:30:50 +08:00
return NETDEV_TX_OK ;
2021-08-16 22:08:51 -07:00
out_fail :
netdev_err ( dev , " FIFO write returned %d \n " , err ) ;
m_can_disable_all_interrupts ( cdev ) ;
return NETDEV_TX_BUSY ;
2014-07-16 17:30:50 +08:00
}
2019-05-09 11:11:05 -05:00
static void m_can_tx_work_queue ( struct work_struct * ws )
{
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = container_of ( ws , struct m_can_classdev ,
2020-12-12 18:55:13 +01:00
tx_work ) ;
2019-05-09 11:11:06 -05:00
m_can_tx_handler ( cdev ) ;
2019-05-09 11:11:05 -05:00
}
static netdev_tx_t m_can_start_xmit ( struct sk_buff * skb ,
struct net_device * dev )
{
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
2019-05-09 11:11:05 -05:00
2022-11-02 10:54:31 +01:00
if ( can_dev_dropped_skb ( dev , skb ) )
2019-05-09 11:11:05 -05:00
return NETDEV_TX_OK ;
2019-05-09 11:11:06 -05:00
if ( cdev - > is_peripheral ) {
if ( cdev - > tx_skb ) {
2019-05-09 11:11:05 -05:00
netdev_err ( dev , " hard_xmit called while tx busy \n " ) ;
return NETDEV_TX_BUSY ;
}
2019-05-09 11:11:06 -05:00
if ( cdev - > can . state = = CAN_STATE_BUS_OFF ) {
2019-05-09 11:11:05 -05:00
m_can_clean ( dev ) ;
} else {
/* Need to stop the queue to avoid numerous requests
* from being sent . Suggested improvement is to create
* a queueing mechanism that will queue the skbs and
* process them in order .
*/
2019-05-09 11:11:06 -05:00
cdev - > tx_skb = skb ;
netif_stop_queue ( cdev - > net ) ;
queue_work ( cdev - > tx_wq , & cdev - > tx_work ) ;
2019-05-09 11:11:05 -05:00
}
} else {
2019-05-09 11:11:06 -05:00
cdev - > tx_skb = skb ;
return m_can_tx_handler ( cdev ) ;
2019-05-09 11:11:05 -05:00
}
return NETDEV_TX_OK ;
}
2023-07-07 15:47:14 -05:00
static enum hrtimer_restart hrtimer_callback ( struct hrtimer * timer )
{
struct m_can_classdev * cdev = container_of ( timer , struct
m_can_classdev , hrtimer ) ;
m_can_isr ( 0 , cdev - > net ) ;
hrtimer_forward_now ( timer , ms_to_ktime ( HRTIMER_POLL_INTERVAL_MS ) ) ;
return HRTIMER_RESTART ;
}
2019-05-09 11:11:05 -05:00
static int m_can_open ( struct net_device * dev )
{
2019-05-09 11:11:06 -05:00
struct m_can_classdev * cdev = netdev_priv ( dev ) ;
2019-05-09 11:11:05 -05:00
int err ;
2021-05-10 10:55:41 +05:30
err = phy_power_on ( cdev - > transceiver ) ;
2019-05-09 11:11:05 -05:00
if ( err )
return err ;
2021-05-10 10:55:41 +05:30
err = m_can_clk_start ( cdev ) ;
if ( err )
goto out_phy_power_off ;
2019-05-09 11:11:05 -05:00
/* open the can device */
err = open_candev ( dev ) ;
if ( err ) {
netdev_err ( dev , " failed to open can device \n " ) ;
goto exit_disable_clks ;
}
2021-03-08 10:24:28 +00:00
if ( cdev - > is_peripheral )
can_rx_offload_enable ( & cdev - > offload ) ;
2019-05-09 11:11:05 -05:00
/* register interrupt handler */
2019-05-09 11:11:06 -05:00
if ( cdev - > is_peripheral ) {
cdev - > tx_skb = NULL ;
cdev - > tx_wq = alloc_workqueue ( " mcan_wq " ,
2019-05-09 11:11:05 -05:00
WQ_FREEZABLE | WQ_MEM_RECLAIM , 0 ) ;
2019-05-09 11:11:06 -05:00
if ( ! cdev - > tx_wq ) {
2019-05-09 11:11:05 -05:00
err = - ENOMEM ;
goto out_wq_fail ;
}
2019-05-09 11:11:06 -05:00
INIT_WORK ( & cdev - > tx_work , m_can_tx_work_queue ) ;
2019-05-09 11:11:05 -05:00
err = request_threaded_irq ( dev - > irq , NULL , m_can_isr ,
2020-11-27 08:35:12 +01:00
IRQF_ONESHOT ,
2019-05-09 11:11:05 -05:00
dev - > name , dev ) ;
2023-07-07 15:47:14 -05:00
} else if ( dev - > irq ) {
2019-05-09 11:11:05 -05:00
err = request_irq ( dev - > irq , m_can_isr , IRQF_SHARED , dev - > name ,
dev ) ;
}
if ( err < 0 ) {
netdev_err ( dev , " failed to request interrupt \n " ) ;
goto exit_irq_fail ;
}
/* start the m_can controller */
2022-12-07 15:36:31 +05:30
err = m_can_start ( dev ) ;
if ( err )
goto exit_irq_fail ;
2019-05-09 11:11:05 -05:00
2019-05-09 11:11:06 -05:00
if ( ! cdev - > is_peripheral )
napi_enable ( & cdev - > napi ) ;
2019-05-09 11:11:05 -05:00
netif_start_queue ( dev ) ;
return 0 ;
exit_irq_fail :
2019-05-09 11:11:06 -05:00
if ( cdev - > is_peripheral )
destroy_workqueue ( cdev - > tx_wq ) ;
2019-05-09 11:11:05 -05:00
out_wq_fail :
2021-03-08 10:24:28 +00:00
if ( cdev - > is_peripheral )
can_rx_offload_disable ( & cdev - > offload ) ;
2019-05-09 11:11:05 -05:00
close_candev ( dev ) ;
exit_disable_clks :
2019-05-09 11:11:06 -05:00
m_can_clk_stop ( cdev ) ;
2021-05-10 10:55:41 +05:30
out_phy_power_off :
phy_power_off ( cdev - > transceiver ) ;
2019-05-09 11:11:05 -05:00
return err ;
}
2014-07-16 17:30:50 +08:00
static const struct net_device_ops m_can_netdev_ops = {
. ndo_open = m_can_open ,
. ndo_stop = m_can_close ,
. ndo_start_xmit = m_can_start_xmit ,
2014-10-29 18:45:23 +08:00
. ndo_change_mtu = can_change_mtu ,
2014-07-16 17:30:50 +08:00
} ;
2022-07-27 19:16:32 +09:00
static const struct ethtool_ops m_can_ethtool_ops = {
. get_ts_info = ethtool_op_get_ts_info ,
} ;
2014-07-16 17:30:50 +08:00
static int register_m_can_dev ( struct net_device * dev )
{
dev - > flags | = IFF_ECHO ; /* we support local echo */
dev - > netdev_ops = & m_can_netdev_ops ;
2022-07-27 19:16:32 +09:00
dev - > ethtool_ops = & m_can_ethtool_ops ;
2014-07-16 17:30:50 +08:00
return register_candev ( dev ) ;
}
2019-05-09 11:11:06 -05:00
static void m_can_of_parse_mram ( struct m_can_classdev * cdev ,
2017-04-08 14:10:13 +02:00
const u32 * mram_config_vals )
2014-07-16 17:30:50 +08:00
{
2019-05-09 11:11:06 -05:00
cdev - > mcfg [ MRAM_SIDF ] . off = mram_config_vals [ 0 ] ;
cdev - > mcfg [ MRAM_SIDF ] . num = mram_config_vals [ 1 ] ;
cdev - > mcfg [ MRAM_XIDF ] . off = cdev - > mcfg [ MRAM_SIDF ] . off +
2020-12-12 18:55:13 +01:00
cdev - > mcfg [ MRAM_SIDF ] . num * SIDF_ELEMENT_SIZE ;
2019-05-09 11:11:06 -05:00
cdev - > mcfg [ MRAM_XIDF ] . num = mram_config_vals [ 2 ] ;
cdev - > mcfg [ MRAM_RXF0 ] . off = cdev - > mcfg [ MRAM_XIDF ] . off +
2020-12-12 18:55:13 +01:00
cdev - > mcfg [ MRAM_XIDF ] . num * XIDF_ELEMENT_SIZE ;
2019-05-09 11:11:06 -05:00
cdev - > mcfg [ MRAM_RXF0 ] . num = mram_config_vals [ 3 ] &
2021-05-04 13:51:20 +01:00
FIELD_MAX ( RXFC_FS_MASK ) ;
2019-05-09 11:11:06 -05:00
cdev - > mcfg [ MRAM_RXF1 ] . off = cdev - > mcfg [ MRAM_RXF0 ] . off +
2020-12-12 18:55:13 +01:00
cdev - > mcfg [ MRAM_RXF0 ] . num * RXF0_ELEMENT_SIZE ;
2019-05-09 11:11:06 -05:00
cdev - > mcfg [ MRAM_RXF1 ] . num = mram_config_vals [ 4 ] &
2021-05-04 13:51:20 +01:00
FIELD_MAX ( RXFC_FS_MASK ) ;
2019-05-09 11:11:06 -05:00
cdev - > mcfg [ MRAM_RXB ] . off = cdev - > mcfg [ MRAM_RXF1 ] . off +
2020-12-12 18:55:13 +01:00
cdev - > mcfg [ MRAM_RXF1 ] . num * RXF1_ELEMENT_SIZE ;
2019-05-09 11:11:06 -05:00
cdev - > mcfg [ MRAM_RXB ] . num = mram_config_vals [ 5 ] ;
cdev - > mcfg [ MRAM_TXE ] . off = cdev - > mcfg [ MRAM_RXB ] . off +
2020-12-12 18:55:13 +01:00
cdev - > mcfg [ MRAM_RXB ] . num * RXB_ELEMENT_SIZE ;
2019-05-09 11:11:06 -05:00
cdev - > mcfg [ MRAM_TXE ] . num = mram_config_vals [ 6 ] ;
cdev - > mcfg [ MRAM_TXB ] . off = cdev - > mcfg [ MRAM_TXE ] . off +
2020-12-12 18:55:13 +01:00
cdev - > mcfg [ MRAM_TXE ] . num * TXE_ELEMENT_SIZE ;
2019-05-09 11:11:06 -05:00
cdev - > mcfg [ MRAM_TXB ] . num = mram_config_vals [ 7 ] &
2021-05-04 13:51:20 +01:00
FIELD_MAX ( TXBC_NDTB_MASK ) ;
2014-07-16 17:30:50 +08:00
2019-05-09 11:11:06 -05:00
dev_dbg ( cdev - > dev ,
2019-05-09 11:11:05 -05:00
" sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d \n " ,
2019-05-09 11:11:06 -05:00
cdev - > mcfg [ MRAM_SIDF ] . off , cdev - > mcfg [ MRAM_SIDF ] . num ,
cdev - > mcfg [ MRAM_XIDF ] . off , cdev - > mcfg [ MRAM_XIDF ] . num ,
cdev - > mcfg [ MRAM_RXF0 ] . off , cdev - > mcfg [ MRAM_RXF0 ] . num ,
cdev - > mcfg [ MRAM_RXF1 ] . off , cdev - > mcfg [ MRAM_RXF1 ] . num ,
cdev - > mcfg [ MRAM_RXB ] . off , cdev - > mcfg [ MRAM_RXB ] . num ,
cdev - > mcfg [ MRAM_TXE ] . off , cdev - > mcfg [ MRAM_TXE ] . num ,
cdev - > mcfg [ MRAM_TXB ] . off , cdev - > mcfg [ MRAM_TXB ] . num ) ;
2014-07-16 17:30:50 +08:00
}
2021-08-16 22:08:51 -07:00
int m_can_init_ram ( struct m_can_classdev * cdev )
2014-07-16 17:30:50 +08:00
{
2019-05-09 11:11:05 -05:00
int end , i , start ;
2021-08-16 22:08:51 -07:00
int err = 0 ;
2014-07-16 17:30:50 +08:00
2019-05-09 11:11:05 -05:00
/* initialize the entire Message RAM in use to avoid possible
* ECC / parity checksum errors when reading an uninitialized buffer
*/
2019-05-09 11:11:06 -05:00
start = cdev - > mcfg [ MRAM_SIDF ] . off ;
end = cdev - > mcfg [ MRAM_TXB ] . off +
cdev - > mcfg [ MRAM_TXB ] . num * TXB_ELEMENT_SIZE ;
2017-04-08 14:10:13 +02:00
2021-08-16 22:08:51 -07:00
for ( i = start ; i < end ; i + = 4 ) {
err = m_can_fifo_write_no_off ( cdev , i , 0x0 ) ;
if ( err )
break ;
}
return err ;
2019-05-09 11:11:05 -05:00
}
EXPORT_SYMBOL_GPL ( m_can_init_ram ) ;
2014-07-16 17:30:50 +08:00
2020-12-12 18:55:14 +01:00
int m_can_class_get_clocks ( struct m_can_classdev * cdev )
2019-05-09 11:11:05 -05:00
{
int ret = 0 ;
2014-07-16 17:30:50 +08:00
2020-12-12 18:55:14 +01:00
cdev - > hclk = devm_clk_get ( cdev - > dev , " hclk " ) ;
cdev - > cclk = devm_clk_get ( cdev - > dev , " cclk " ) ;
2014-07-16 17:30:50 +08:00
2022-11-23 14:36:51 +08:00
if ( IS_ERR ( cdev - > hclk ) | | IS_ERR ( cdev - > cclk ) ) {
2020-12-12 18:55:14 +01:00
dev_err ( cdev - > dev , " no clock found \n " ) ;
2017-04-08 14:10:13 +02:00
ret = - ENODEV ;
}
2019-05-09 11:11:05 -05:00
return ret ;
}
EXPORT_SYMBOL_GPL ( m_can_class_get_clocks ) ;
2017-04-08 14:10:13 +02:00
2020-12-12 18:55:17 +01:00
struct m_can_classdev * m_can_class_allocate_dev ( struct device * dev ,
int sizeof_priv )
2019-05-09 11:11:05 -05:00
{
2019-05-09 11:11:06 -05:00
struct m_can_classdev * class_dev = NULL ;
2019-05-09 11:11:05 -05:00
u32 mram_config_vals [ MRAM_CFG_LEN ] ;
struct net_device * net_dev ;
u32 tx_fifo_size ;
int ret ;
ret = fwnode_property_read_u32_array ( dev_fwnode ( dev ) ,
" bosch,mram-cfg " ,
mram_config_vals ,
sizeof ( mram_config_vals ) / 4 ) ;
2017-04-08 14:10:13 +02:00
if ( ret ) {
2019-05-09 11:11:05 -05:00
dev_err ( dev , " Could not get Message RAM configuration. " ) ;
goto out ;
2017-04-08 14:10:13 +02:00
}
/* Get TX FIFO size
* Defines the total amount of echo buffers for loopback
*/
tx_fifo_size = mram_config_vals [ 7 ] ;
/* allocate the m_can device */
2020-12-12 18:55:17 +01:00
net_dev = alloc_candev ( sizeof_priv , tx_fifo_size ) ;
2019-05-09 11:11:05 -05:00
if ( ! net_dev ) {
dev_err ( dev , " Failed to allocate CAN device " ) ;
goto out ;
2017-04-08 14:10:13 +02:00
}
2018-01-16 17:07:12 +05:30
2019-05-09 11:11:05 -05:00
class_dev = netdev_priv ( net_dev ) ;
class_dev - > net = net_dev ;
class_dev - > dev = dev ;
SET_NETDEV_DEV ( net_dev , dev ) ;
2014-07-16 17:30:50 +08:00
2019-05-09 11:11:05 -05:00
m_can_of_parse_mram ( class_dev , mram_config_vals ) ;
out :
return class_dev ;
}
EXPORT_SYMBOL_GPL ( m_can_class_allocate_dev ) ;
2020-02-27 12:38:29 -06:00
void m_can_class_free_dev ( struct net_device * net )
{
free_candev ( net ) ;
}
EXPORT_SYMBOL_GPL ( m_can_class_free_dev ) ;
2020-12-12 18:55:14 +01:00
int m_can_class_register ( struct m_can_classdev * cdev )
2019-05-09 11:11:05 -05:00
{
int ret ;
2018-01-16 17:07:13 +05:30
2020-12-12 18:55:14 +01:00
if ( cdev - > pm_clock_support ) {
ret = m_can_clk_start ( cdev ) ;
2019-05-09 11:11:05 -05:00
if ( ret )
2020-10-23 14:58:00 +03:00
return ret ;
2019-05-09 11:11:05 -05:00
}
2021-03-08 10:24:28 +00:00
if ( cdev - > is_peripheral ) {
ret = can_rx_offload_add_manual ( cdev - > net , & cdev - > offload ,
2022-04-29 10:44:46 -07:00
NAPI_POLL_WEIGHT ) ;
2021-03-08 10:24:28 +00:00
if ( ret )
goto clk_disable ;
}
2023-07-07 15:47:14 -05:00
if ( ! cdev - > net - > irq )
cdev - > hrtimer . function = & hrtimer_callback ;
2020-12-12 18:55:14 +01:00
ret = m_can_dev_setup ( cdev ) ;
2018-01-16 17:07:13 +05:30
if ( ret )
2021-03-08 10:24:28 +00:00
goto rx_offload_del ;
2018-01-16 17:07:13 +05:30
2020-12-12 18:55:14 +01:00
ret = register_m_can_dev ( cdev - > net ) ;
2014-07-16 17:30:50 +08:00
if ( ret ) {
2020-12-12 18:55:14 +01:00
dev_err ( cdev - > dev , " registering %s failed (err=%d) \n " ,
cdev - > net - > name , ret ) ;
2021-03-08 10:24:28 +00:00
goto rx_offload_del ;
2014-07-16 17:30:50 +08:00
}
2020-12-12 18:55:14 +01:00
of_can_transceiver ( cdev - > net ) ;
2018-01-10 16:25:19 +05:30
2020-12-12 18:55:14 +01:00
dev_info ( cdev - > dev , " %s device registered (irq=%d, version=%d) \n " ,
KBUILD_MODNAME , cdev - > net - > irq , cdev - > version ) ;
2014-07-16 17:30:50 +08:00
2017-04-08 14:10:13 +02:00
/* Probe finished
* Stop clocks . They will be reactivated once the M_CAN device is opened
*/
2021-03-08 10:24:28 +00:00
m_can_clk_stop ( cdev ) ;
return 0 ;
rx_offload_del :
if ( cdev - > is_peripheral )
can_rx_offload_del ( & cdev - > offload ) ;
2018-01-16 17:07:13 +05:30
clk_disable :
2020-12-12 18:55:14 +01:00
m_can_clk_stop ( cdev ) ;
2019-05-09 11:11:05 -05:00
2014-07-16 17:30:50 +08:00
return ret ;
}
2019-05-09 11:11:05 -05:00
EXPORT_SYMBOL_GPL ( m_can_class_register ) ;
2014-07-16 17:30:50 +08:00
2020-12-12 18:55:14 +01:00
void m_can_class_unregister ( struct m_can_classdev * cdev )
2020-11-30 14:37:13 +01:00
{
2021-03-08 10:24:28 +00:00
if ( cdev - > is_peripheral )
can_rx_offload_del ( & cdev - > offload ) ;
2020-12-12 18:55:14 +01:00
unregister_candev ( cdev - > net ) ;
2020-11-30 14:37:13 +01:00
}
EXPORT_SYMBOL_GPL ( m_can_class_unregister ) ;
2019-05-09 11:11:05 -05:00
int m_can_class_suspend ( struct device * dev )
2014-07-16 17:30:50 +08:00
{
2020-12-12 18:55:18 +01:00
struct m_can_classdev * cdev = dev_get_drvdata ( dev ) ;
struct net_device * ndev = cdev - > net ;
2014-07-16 17:30:50 +08:00
if ( netif_running ( ndev ) ) {
netif_stop_queue ( ndev ) ;
netif_device_detach ( ndev ) ;
2017-05-05 15:50:33 +02:00
m_can_stop ( ndev ) ;
2019-05-09 11:11:06 -05:00
m_can_clk_stop ( cdev ) ;
2014-07-16 17:30:50 +08:00
}
2018-03-12 08:52:37 +00:00
pinctrl_pm_select_sleep_state ( dev ) ;
2019-05-09 11:11:06 -05:00
cdev - > can . state = CAN_STATE_SLEEPING ;
2014-07-16 17:30:50 +08:00
return 0 ;
}
2019-05-09 11:11:05 -05:00
EXPORT_SYMBOL_GPL ( m_can_class_suspend ) ;
2014-07-16 17:30:50 +08:00
2019-05-09 11:11:05 -05:00
int m_can_class_resume ( struct device * dev )
2014-07-16 17:30:50 +08:00
{
2020-12-12 18:55:18 +01:00
struct m_can_classdev * cdev = dev_get_drvdata ( dev ) ;
struct net_device * ndev = cdev - > net ;
2014-07-16 17:30:50 +08:00
2018-03-12 08:52:37 +00:00
pinctrl_pm_select_default_state ( dev ) ;
2019-05-09 11:11:06 -05:00
cdev - > can . state = CAN_STATE_ERROR_ACTIVE ;
2014-07-16 17:30:50 +08:00
if ( netif_running ( ndev ) ) {
2017-05-05 15:50:33 +02:00
int ret ;
2019-05-09 11:11:06 -05:00
ret = m_can_clk_start ( cdev ) ;
2017-05-05 15:50:33 +02:00
if ( ret )
return ret ;
2022-12-07 15:36:31 +05:30
ret = m_can_start ( ndev ) ;
if ( ret ) {
m_can_clk_stop ( cdev ) ;
return ret ;
}
2017-05-05 15:50:33 +02:00
2014-07-16 17:30:50 +08:00
netif_device_attach ( ndev ) ;
netif_start_queue ( ndev ) ;
}
return 0 ;
}
2019-05-09 11:11:05 -05:00
EXPORT_SYMBOL_GPL ( m_can_class_resume ) ;
2014-07-16 17:30:50 +08:00
MODULE_AUTHOR ( " Dong Aisheng <b29396@freescale.com> " ) ;
2019-05-09 11:11:05 -05:00
MODULE_AUTHOR ( " Dan Murphy <dmurphy@ti.com> " ) ;
2014-07-16 17:30:50 +08:00
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_DESCRIPTION ( " CAN bus driver for Bosch M_CAN controller " ) ;