2019-05-27 09:55:21 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2015-08-07 10:19:50 +03:00
/*
* Copyright ( c ) 2015 MediaTek Inc .
* Author : Leilk Liu < leilk . liu @ mediatek . com >
*/
# include <linux/clk.h>
# include <linux/device.h>
# include <linux/err.h>
# include <linux/interrupt.h>
2015-08-24 06:45:15 +03:00
# include <linux/io.h>
2015-08-07 10:19:50 +03:00
# include <linux/ioport.h>
# include <linux/module.h>
# include <linux/of.h>
2022-01-22 03:33:02 +03:00
# include <linux/gpio/consumer.h>
2015-08-07 10:19:50 +03:00
# include <linux/platform_device.h>
# include <linux/platform_data/spi-mt65xx.h>
# include <linux/pm_runtime.h>
# include <linux/spi/spi.h>
2022-03-21 04:39:20 +03:00
# include <linux/spi/spi-mem.h>
2019-09-11 12:55:31 +03:00
# include <linux/dma-mapping.h>
2015-08-07 10:19:50 +03:00
# define SPI_CFG0_REG 0x0000
# define SPI_CFG1_REG 0x0004
# define SPI_TX_SRC_REG 0x0008
# define SPI_RX_DST_REG 0x000c
# define SPI_TX_DATA_REG 0x0010
# define SPI_RX_DATA_REG 0x0014
# define SPI_CMD_REG 0x0018
# define SPI_STATUS0_REG 0x001c
# define SPI_PAD_SEL_REG 0x0024
2017-06-12 04:24:39 +03:00
# define SPI_CFG2_REG 0x0028
2019-09-11 12:55:31 +03:00
# define SPI_TX_SRC_REG_64 0x002c
# define SPI_RX_DST_REG_64 0x0030
2022-03-15 06:24:08 +03:00
# define SPI_CFG3_IPM_REG 0x0040
2015-08-07 10:19:50 +03:00
# define SPI_CFG0_SCK_HIGH_OFFSET 0
# define SPI_CFG0_SCK_LOW_OFFSET 8
# define SPI_CFG0_CS_HOLD_OFFSET 16
# define SPI_CFG0_CS_SETUP_OFFSET 24
2017-06-12 04:24:39 +03:00
# define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
# define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
2015-08-07 10:19:50 +03:00
# define SPI_CFG1_CS_IDLE_OFFSET 0
# define SPI_CFG1_PACKET_LOOP_OFFSET 8
# define SPI_CFG1_PACKET_LENGTH_OFFSET 16
2021-07-13 14:40:49 +03:00
# define SPI_CFG1_GET_TICK_DLY_OFFSET 29
2022-03-15 06:24:06 +03:00
# define SPI_CFG1_GET_TICK_DLY_OFFSET_V1 30
2015-08-07 10:19:50 +03:00
2021-07-13 14:40:49 +03:00
# define SPI_CFG1_GET_TICK_DLY_MASK 0xe0000000
2022-03-15 06:24:06 +03:00
# define SPI_CFG1_GET_TICK_DLY_MASK_V1 0xc0000000
2015-08-07 10:19:50 +03:00
# define SPI_CFG1_CS_IDLE_MASK 0xff
# define SPI_CFG1_PACKET_LOOP_MASK 0xff00
# define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
2022-03-15 06:24:08 +03:00
# define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
2020-07-01 12:00:20 +03:00
# define SPI_CFG2_SCK_HIGH_OFFSET 0
# define SPI_CFG2_SCK_LOW_OFFSET 16
2015-08-07 10:19:50 +03:00
2015-08-20 12:19:08 +03:00
# define SPI_CMD_ACT BIT(0)
# define SPI_CMD_RESUME BIT(1)
2015-08-07 10:19:50 +03:00
# define SPI_CMD_RST BIT(2)
# define SPI_CMD_PAUSE_EN BIT(4)
# define SPI_CMD_DEASSERT BIT(5)
2017-06-12 04:24:39 +03:00
# define SPI_CMD_SAMPLE_SEL BIT(6)
# define SPI_CMD_CS_POL BIT(7)
2015-08-07 10:19:50 +03:00
# define SPI_CMD_CPHA BIT(8)
# define SPI_CMD_CPOL BIT(9)
# define SPI_CMD_RX_DMA BIT(10)
# define SPI_CMD_TX_DMA BIT(11)
# define SPI_CMD_TXMSBF BIT(12)
# define SPI_CMD_RXMSBF BIT(13)
# define SPI_CMD_RX_ENDIAN BIT(14)
# define SPI_CMD_TX_ENDIAN BIT(15)
# define SPI_CMD_FINISH_IE BIT(16)
# define SPI_CMD_PAUSE_IE BIT(17)
2022-03-15 06:24:08 +03:00
# define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
# define SPI_CMD_IPM_SPIM_LOOP BIT(21)
# define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
2015-08-07 10:19:50 +03:00
2022-03-15 06:24:08 +03:00
# define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22)
2022-03-21 04:39:20 +03:00
# define PIN_MODE_CFG(x) ((x) / 2)
2022-03-15 06:24:08 +03:00
# define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
# define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
2022-03-21 04:39:20 +03:00
# define SPI_CFG3_IPM_XMODE_EN BIT(4)
# define SPI_CFG3_IPM_NODATA_FLAG BIT(5)
# define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET 8
# define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12
# define SPI_CFG3_IPM_CMD_PIN_MODE_MASK GENMASK(1, 0)
# define SPI_CFG3_IPM_CMD_BYTELEN_MASK GENMASK(11, 8)
# define SPI_CFG3_IPM_ADDR_BYTELEN_MASK GENMASK(15, 12)
2015-08-07 10:19:50 +03:00
# define MT8173_SPI_MAX_PAD_SEL 3
2015-08-24 06:45:16 +03:00
# define MTK_SPI_PAUSE_INT_STATUS 0x2
2015-08-07 10:19:50 +03:00
# define MTK_SPI_IDLE 0
# define MTK_SPI_PAUSED 1
2017-01-26 19:21:54 +03:00
# define MTK_SPI_MAX_FIFO_SIZE 32U
2015-08-07 10:19:50 +03:00
# define MTK_SPI_PACKET_SIZE 1024
2022-03-15 06:24:08 +03:00
# define MTK_SPI_IPM_PACKET_SIZE SZ_64K
2022-03-21 04:39:20 +03:00
# define MTK_SPI_IPM_PACKET_LOOP SZ_256
2019-09-11 12:55:31 +03:00
# define MTK_SPI_32BITS_MASK (0xffffffff)
# define DMA_ADDR_EXT_BITS (36)
# define DMA_ADDR_DEF_BITS (32)
2015-08-07 10:19:50 +03:00
struct mtk_spi_compatible {
2015-08-20 12:19:07 +03:00
bool need_pad_sel ;
/* Must explicitly send dummy Tx bytes to do Rx only transfer */
bool must_tx ;
2017-06-12 04:24:39 +03:00
/* some IC design adjust cfg register to enhance time accuracy */
bool enhance_timing ;
2019-09-11 12:55:31 +03:00
/* some IC support DMA addr extension */
bool dma_ext ;
2021-06-29 13:08:15 +03:00
/* some IC no need unprepare SPI clk */
bool no_need_unprepare ;
2022-03-15 06:24:08 +03:00
/* IPM design adjust and extend register to support more features */
bool ipm_design ;
2015-08-07 10:19:50 +03:00
} ;
struct mtk_spi {
void __iomem * base ;
u32 state ;
2015-10-26 11:09:44 +03:00
int pad_num ;
u32 * pad_sel ;
2022-03-21 04:39:22 +03:00
struct clk * parent_clk , * sel_clk , * spi_clk , * spi_hclk ;
2015-08-07 10:19:50 +03:00
struct spi_transfer * cur_transfer ;
u32 xfer_len ;
2018-09-10 06:54:21 +03:00
u32 num_xfered ;
2015-08-07 10:19:50 +03:00
struct scatterlist * tx_sgl , * rx_sgl ;
u32 tx_sgl_len , rx_sgl_len ;
const struct mtk_spi_compatible * dev_comp ;
2021-06-29 13:08:15 +03:00
u32 spi_clk_hz ;
2022-03-21 04:39:20 +03:00
struct completion spimem_done ;
bool use_spimem ;
struct device * dev ;
dma_addr_t tx_dma ;
dma_addr_t rx_dma ;
2015-08-07 10:19:50 +03:00
} ;
2015-12-31 05:59:00 +03:00
static const struct mtk_spi_compatible mtk_common_compat ;
2017-06-12 04:24:40 +03:00
2017-06-20 11:21:07 +03:00
static const struct mtk_spi_compatible mt2712_compat = {
. must_tx = true ,
} ;
2022-03-15 06:24:08 +03:00
static const struct mtk_spi_compatible mtk_ipm_compat = {
. enhance_timing = true ,
. dma_ext = true ,
. ipm_design = true ,
} ;
2019-09-11 12:55:30 +03:00
static const struct mtk_spi_compatible mt6765_compat = {
. need_pad_sel = true ,
. must_tx = true ,
. enhance_timing = true ,
2019-09-11 12:55:31 +03:00
. dma_ext = true ,
2019-09-11 12:55:30 +03:00
} ;
2017-06-12 04:24:40 +03:00
static const struct mtk_spi_compatible mt7622_compat = {
. must_tx = true ,
. enhance_timing = true ,
} ;
2015-08-07 10:19:50 +03:00
static const struct mtk_spi_compatible mt8173_compat = {
2015-08-20 12:19:07 +03:00
. need_pad_sel = true ,
. must_tx = true ,
2015-08-07 10:19:50 +03:00
} ;
2018-11-01 09:02:19 +03:00
static const struct mtk_spi_compatible mt8183_compat = {
. need_pad_sel = true ,
. must_tx = true ,
. enhance_timing = true ,
} ;
2021-06-29 13:08:15 +03:00
static const struct mtk_spi_compatible mt6893_compat = {
. need_pad_sel = true ,
. must_tx = true ,
. enhance_timing = true ,
. dma_ext = true ,
. no_need_unprepare = true ,
} ;
2015-08-07 10:19:50 +03:00
/*
* A piece of default chip info unless the platform
* supplies it .
*/
static const struct mtk_chip_config mtk_default_chip_info = {
2017-06-12 04:24:39 +03:00
. sample_sel = 0 ,
2021-07-13 14:40:49 +03:00
. tick_delay = 0 ,
2015-08-07 10:19:50 +03:00
} ;
static const struct of_device_id mtk_spi_of_match [ ] = {
2022-03-15 06:24:08 +03:00
{ . compatible = " mediatek,spi-ipm " ,
. data = ( void * ) & mtk_ipm_compat ,
} ,
2015-12-31 05:59:01 +03:00
{ . compatible = " mediatek,mt2701-spi " ,
. data = ( void * ) & mtk_common_compat ,
} ,
2017-06-20 11:21:07 +03:00
{ . compatible = " mediatek,mt2712-spi " ,
. data = ( void * ) & mt2712_compat ,
} ,
2015-12-31 05:59:00 +03:00
{ . compatible = " mediatek,mt6589-spi " ,
. data = ( void * ) & mtk_common_compat ,
} ,
2019-09-11 12:55:30 +03:00
{ . compatible = " mediatek,mt6765-spi " ,
. data = ( void * ) & mt6765_compat ,
} ,
2017-06-12 04:24:40 +03:00
{ . compatible = " mediatek,mt7622-spi " ,
. data = ( void * ) & mt7622_compat ,
} ,
2018-11-20 11:41:08 +03:00
{ . compatible = " mediatek,mt7629-spi " ,
. data = ( void * ) & mt7622_compat ,
} ,
2015-12-31 05:59:00 +03:00
{ . compatible = " mediatek,mt8135-spi " ,
. data = ( void * ) & mtk_common_compat ,
} ,
{ . compatible = " mediatek,mt8173-spi " ,
. data = ( void * ) & mt8173_compat ,
} ,
2018-11-01 09:02:19 +03:00
{ . compatible = " mediatek,mt8183-spi " ,
. data = ( void * ) & mt8183_compat ,
} ,
2020-07-21 15:24:36 +03:00
{ . compatible = " mediatek,mt8192-spi " ,
. data = ( void * ) & mt6765_compat ,
} ,
2021-06-29 13:08:15 +03:00
{ . compatible = " mediatek,mt6893-spi " ,
. data = ( void * ) & mt6893_compat ,
} ,
2015-08-07 10:19:50 +03:00
{ }
} ;
MODULE_DEVICE_TABLE ( of , mtk_spi_of_match ) ;
static void mtk_spi_reset ( struct mtk_spi * mdata )
{
u32 reg_val ;
/* set the software reset bit in SPI_CMD_REG. */
reg_val = readl ( mdata - > base + SPI_CMD_REG ) ;
reg_val | = SPI_CMD_RST ;
writel ( reg_val , mdata - > base + SPI_CMD_REG ) ;
reg_val = readl ( mdata - > base + SPI_CMD_REG ) ;
reg_val & = ~ SPI_CMD_RST ;
writel ( reg_val , mdata - > base + SPI_CMD_REG ) ;
}
2021-08-04 16:37:47 +03:00
static int mtk_spi_set_hw_cs_timing ( struct spi_device * spi )
{
struct mtk_spi * mdata = spi_master_get_devdata ( spi - > master ) ;
struct spi_delay * cs_setup = & spi - > cs_setup ;
struct spi_delay * cs_hold = & spi - > cs_hold ;
struct spi_delay * cs_inactive = & spi - > cs_inactive ;
2021-08-09 08:59:12 +03:00
u32 setup , hold , inactive ;
2021-08-04 16:37:47 +03:00
u32 reg_val ;
int delay ;
delay = spi_delay_to_ns ( cs_setup , NULL ) ;
if ( delay < 0 )
return delay ;
setup = ( delay * DIV_ROUND_UP ( mdata - > spi_clk_hz , 1000000 ) ) / 1000 ;
delay = spi_delay_to_ns ( cs_hold , NULL ) ;
if ( delay < 0 )
return delay ;
hold = ( delay * DIV_ROUND_UP ( mdata - > spi_clk_hz , 1000000 ) ) / 1000 ;
delay = spi_delay_to_ns ( cs_inactive , NULL ) ;
if ( delay < 0 )
return delay ;
inactive = ( delay * DIV_ROUND_UP ( mdata - > spi_clk_hz , 1000000 ) ) / 1000 ;
2021-10-01 18:21:53 +03:00
if ( hold | | setup ) {
reg_val = readl ( mdata - > base + SPI_CFG0_REG ) ;
if ( mdata - > dev_comp - > enhance_timing ) {
if ( hold ) {
hold = min_t ( u32 , hold , 0x10000 ) ;
reg_val & = ~ ( 0xffff < < SPI_ADJUST_CFG0_CS_HOLD_OFFSET ) ;
reg_val | = ( ( ( hold - 1 ) & 0xffff )
< < SPI_ADJUST_CFG0_CS_HOLD_OFFSET ) ;
}
if ( setup ) {
setup = min_t ( u32 , setup , 0x10000 ) ;
reg_val & = ~ ( 0xffff < < SPI_ADJUST_CFG0_CS_SETUP_OFFSET ) ;
reg_val | = ( ( ( setup - 1 ) & 0xffff )
< < SPI_ADJUST_CFG0_CS_SETUP_OFFSET ) ;
}
} else {
if ( hold ) {
hold = min_t ( u32 , hold , 0x100 ) ;
reg_val & = ~ ( 0xff < < SPI_CFG0_CS_HOLD_OFFSET ) ;
reg_val | = ( ( ( hold - 1 ) & 0xff ) < < SPI_CFG0_CS_HOLD_OFFSET ) ;
}
if ( setup ) {
setup = min_t ( u32 , setup , 0x100 ) ;
reg_val & = ~ ( 0xff < < SPI_CFG0_CS_SETUP_OFFSET ) ;
reg_val | = ( ( ( setup - 1 ) & 0xff )
< < SPI_CFG0_CS_SETUP_OFFSET ) ;
}
}
writel ( reg_val , mdata - > base + SPI_CFG0_REG ) ;
2021-08-04 16:37:47 +03:00
}
2021-10-01 18:21:53 +03:00
if ( inactive ) {
inactive = min_t ( u32 , inactive , 0x100 ) ;
reg_val = readl ( mdata - > base + SPI_CFG1_REG ) ;
reg_val & = ~ SPI_CFG1_CS_IDLE_MASK ;
reg_val | = ( ( ( inactive - 1 ) & 0xff ) < < SPI_CFG1_CS_IDLE_OFFSET ) ;
writel ( reg_val , mdata - > base + SPI_CFG1_REG ) ;
}
2021-08-04 16:37:47 +03:00
return 0 ;
}
2022-03-15 06:24:08 +03:00
static int mtk_spi_hw_init ( struct spi_master * master ,
struct spi_device * spi )
2015-08-07 10:19:50 +03:00
{
2015-10-26 11:09:41 +03:00
u16 cpha , cpol ;
2015-08-07 10:19:50 +03:00
u32 reg_val ;
2015-10-26 11:09:43 +03:00
struct mtk_chip_config * chip_config = spi - > controller_data ;
2015-10-26 11:09:41 +03:00
struct mtk_spi * mdata = spi_master_get_devdata ( master ) ;
cpha = spi - > mode & SPI_CPHA ? 1 : 0 ;
cpol = spi - > mode & SPI_CPOL ? 1 : 0 ;
reg_val = readl ( mdata - > base + SPI_CMD_REG ) ;
2022-03-15 06:24:08 +03:00
if ( mdata - > dev_comp - > ipm_design ) {
/* SPI transfer without idle time until packet length done */
reg_val | = SPI_CMD_IPM_NONIDLE_MODE ;
if ( spi - > mode & SPI_LOOP )
reg_val | = SPI_CMD_IPM_SPIM_LOOP ;
else
reg_val & = ~ SPI_CMD_IPM_SPIM_LOOP ;
}
2015-10-26 11:09:41 +03:00
if ( cpha )
reg_val | = SPI_CMD_CPHA ;
else
reg_val & = ~ SPI_CMD_CPHA ;
if ( cpol )
reg_val | = SPI_CMD_CPOL ;
else
reg_val & = ~ SPI_CMD_CPOL ;
2015-08-07 10:19:50 +03:00
/* set the mlsbx and mlsbtx */
2019-06-05 06:07:04 +03:00
if ( spi - > mode & SPI_LSB_FIRST ) {
2015-08-20 12:19:08 +03:00
reg_val & = ~ SPI_CMD_TXMSBF ;
reg_val & = ~ SPI_CMD_RXMSBF ;
2019-06-05 06:07:04 +03:00
} else {
reg_val | = SPI_CMD_TXMSBF ;
reg_val | = SPI_CMD_RXMSBF ;
}
2015-08-07 10:19:50 +03:00
/* set the tx/rx endian */
2015-08-20 12:19:06 +03:00
# ifdef __LITTLE_ENDIAN
reg_val & = ~ SPI_CMD_TX_ENDIAN ;
reg_val & = ~ SPI_CMD_RX_ENDIAN ;
# else
reg_val | = SPI_CMD_TX_ENDIAN ;
reg_val | = SPI_CMD_RX_ENDIAN ;
# endif
2015-08-07 10:19:50 +03:00
2017-06-12 04:24:39 +03:00
if ( mdata - > dev_comp - > enhance_timing ) {
2019-11-18 07:57:16 +03:00
/* set CS polarity */
if ( spi - > mode & SPI_CS_HIGH )
2017-06-12 04:24:39 +03:00
reg_val | = SPI_CMD_CS_POL ;
else
reg_val & = ~ SPI_CMD_CS_POL ;
2019-11-18 07:57:16 +03:00
2017-06-12 04:24:39 +03:00
if ( chip_config - > sample_sel )
reg_val | = SPI_CMD_SAMPLE_SEL ;
else
reg_val & = ~ SPI_CMD_SAMPLE_SEL ;
}
2015-08-07 10:19:50 +03:00
/* set finish and pause interrupt always enable */
2015-08-27 16:09:04 +03:00
reg_val | = SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE ;
2015-08-07 10:19:50 +03:00
/* disable dma mode */
reg_val & = ~ ( SPI_CMD_TX_DMA | SPI_CMD_RX_DMA ) ;
/* disable deassert mode */
reg_val & = ~ SPI_CMD_DEASSERT ;
writel ( reg_val , mdata - > base + SPI_CMD_REG ) ;
/* pad select */
if ( mdata - > dev_comp - > need_pad_sel )
2015-10-26 11:09:44 +03:00
writel ( mdata - > pad_sel [ spi - > chip_select ] ,
mdata - > base + SPI_PAD_SEL_REG ) ;
2015-08-07 10:19:50 +03:00
2021-07-13 14:40:49 +03:00
/* tick delay */
2022-03-15 06:24:06 +03:00
if ( mdata - > dev_comp - > enhance_timing ) {
2022-03-15 06:24:08 +03:00
if ( mdata - > dev_comp - > ipm_design ) {
reg_val = readl ( mdata - > base + SPI_CMD_REG ) ;
reg_val & = ~ SPI_CMD_IPM_GET_TICKDLY_MASK ;
reg_val | = ( ( chip_config - > tick_delay & 0x7 )
< < SPI_CMD_IPM_GET_TICKDLY_OFFSET ) ;
writel ( reg_val , mdata - > base + SPI_CMD_REG ) ;
} else {
reg_val = readl ( mdata - > base + SPI_CFG1_REG ) ;
reg_val & = ~ SPI_CFG1_GET_TICK_DLY_MASK ;
reg_val | = ( ( chip_config - > tick_delay & 0x7 )
< < SPI_CFG1_GET_TICK_DLY_OFFSET ) ;
writel ( reg_val , mdata - > base + SPI_CFG1_REG ) ;
}
2022-03-15 06:24:06 +03:00
} else {
2022-03-15 06:24:08 +03:00
reg_val = readl ( mdata - > base + SPI_CFG1_REG ) ;
2022-03-15 06:24:06 +03:00
reg_val & = ~ SPI_CFG1_GET_TICK_DLY_MASK_V1 ;
reg_val | = ( ( chip_config - > tick_delay & 0x3 )
< < SPI_CFG1_GET_TICK_DLY_OFFSET_V1 ) ;
2022-03-15 06:24:08 +03:00
writel ( reg_val , mdata - > base + SPI_CFG1_REG ) ;
2022-03-15 06:24:06 +03:00
}
2021-07-13 14:40:49 +03:00
2021-08-04 16:37:47 +03:00
/* set hw cs timing */
mtk_spi_set_hw_cs_timing ( spi ) ;
2015-08-07 10:19:50 +03:00
return 0 ;
}
2022-03-15 06:24:08 +03:00
static int mtk_spi_prepare_message ( struct spi_master * master ,
struct spi_message * msg )
{
return mtk_spi_hw_init ( master , msg - > spi ) ;
}
2015-08-07 10:19:50 +03:00
static void mtk_spi_set_cs ( struct spi_device * spi , bool enable )
{
u32 reg_val ;
struct mtk_spi * mdata = spi_master_get_devdata ( spi - > master ) ;
2019-11-18 07:57:16 +03:00
if ( spi - > mode & SPI_CS_HIGH )
enable = ! enable ;
2015-08-07 10:19:50 +03:00
reg_val = readl ( mdata - > base + SPI_CMD_REG ) ;
2015-09-07 14:37:57 +03:00
if ( ! enable ) {
2015-08-07 10:19:50 +03:00
reg_val | = SPI_CMD_PAUSE_EN ;
2015-09-07 14:37:57 +03:00
writel ( reg_val , mdata - > base + SPI_CMD_REG ) ;
} else {
2015-08-07 10:19:50 +03:00
reg_val & = ~ SPI_CMD_PAUSE_EN ;
2015-09-07 14:37:57 +03:00
writel ( reg_val , mdata - > base + SPI_CMD_REG ) ;
mdata - > state = MTK_SPI_IDLE ;
mtk_spi_reset ( mdata ) ;
}
2015-08-07 10:19:50 +03:00
}
static void mtk_spi_prepare_transfer ( struct spi_master * master ,
2022-03-15 06:24:08 +03:00
u32 speed_hz )
2015-08-07 10:19:50 +03:00
{
2021-06-29 13:08:15 +03:00
u32 div , sck_time , reg_val ;
2015-08-07 10:19:50 +03:00
struct mtk_spi * mdata = spi_master_get_devdata ( master ) ;
2022-03-15 06:24:08 +03:00
if ( speed_hz < mdata - > spi_clk_hz / 2 )
div = DIV_ROUND_UP ( mdata - > spi_clk_hz , speed_hz ) ;
2015-08-07 10:19:50 +03:00
else
div = 1 ;
2015-08-24 06:45:18 +03:00
sck_time = ( div + 1 ) / 2 ;
2015-08-07 10:19:50 +03:00
2017-06-12 04:24:39 +03:00
if ( mdata - > dev_comp - > enhance_timing ) {
2021-02-07 06:09:53 +03:00
reg_val = readl ( mdata - > base + SPI_CFG2_REG ) ;
reg_val & = ~ ( 0xffff < < SPI_CFG2_SCK_HIGH_OFFSET ) ;
reg_val | = ( ( ( sck_time - 1 ) & 0xffff )
2020-07-01 12:00:20 +03:00
< < SPI_CFG2_SCK_HIGH_OFFSET ) ;
2021-02-07 06:09:53 +03:00
reg_val & = ~ ( 0xffff < < SPI_CFG2_SCK_LOW_OFFSET ) ;
2017-06-12 04:24:39 +03:00
reg_val | = ( ( ( sck_time - 1 ) & 0xffff )
2020-07-01 12:00:20 +03:00
< < SPI_CFG2_SCK_LOW_OFFSET ) ;
2017-06-12 04:24:39 +03:00
writel ( reg_val , mdata - > base + SPI_CFG2_REG ) ;
} else {
2021-02-07 06:09:53 +03:00
reg_val = readl ( mdata - > base + SPI_CFG0_REG ) ;
reg_val & = ~ ( 0xff < < SPI_CFG0_SCK_HIGH_OFFSET ) ;
reg_val | = ( ( ( sck_time - 1 ) & 0xff )
2017-06-12 04:24:39 +03:00
< < SPI_CFG0_SCK_HIGH_OFFSET ) ;
2021-02-07 06:09:53 +03:00
reg_val & = ~ ( 0xff < < SPI_CFG0_SCK_LOW_OFFSET ) ;
2017-06-12 04:24:39 +03:00
reg_val | = ( ( ( sck_time - 1 ) & 0xff ) < < SPI_CFG0_SCK_LOW_OFFSET ) ;
writel ( reg_val , mdata - > base + SPI_CFG0_REG ) ;
}
2015-08-07 10:19:50 +03:00
}
static void mtk_spi_setup_packet ( struct spi_master * master )
{
u32 packet_size , packet_loop , reg_val ;
struct mtk_spi * mdata = spi_master_get_devdata ( master ) ;
2022-03-15 06:24:08 +03:00
if ( mdata - > dev_comp - > ipm_design )
packet_size = min_t ( u32 ,
mdata - > xfer_len ,
MTK_SPI_IPM_PACKET_SIZE ) ;
else
packet_size = min_t ( u32 ,
mdata - > xfer_len ,
MTK_SPI_PACKET_SIZE ) ;
2015-08-07 10:19:50 +03:00
packet_loop = mdata - > xfer_len / packet_size ;
reg_val = readl ( mdata - > base + SPI_CFG1_REG ) ;
2022-03-15 06:24:08 +03:00
if ( mdata - > dev_comp - > ipm_design )
reg_val & = ~ SPI_CFG1_IPM_PACKET_LENGTH_MASK ;
else
reg_val & = ~ SPI_CFG1_PACKET_LENGTH_MASK ;
2015-08-07 10:19:50 +03:00
reg_val | = ( packet_size - 1 ) < < SPI_CFG1_PACKET_LENGTH_OFFSET ;
2022-03-15 06:24:08 +03:00
reg_val & = ~ SPI_CFG1_PACKET_LOOP_MASK ;
2015-08-07 10:19:50 +03:00
reg_val | = ( packet_loop - 1 ) < < SPI_CFG1_PACKET_LOOP_OFFSET ;
writel ( reg_val , mdata - > base + SPI_CFG1_REG ) ;
}
static void mtk_spi_enable_transfer ( struct spi_master * master )
{
2015-08-24 06:45:16 +03:00
u32 cmd ;
2015-08-07 10:19:50 +03:00
struct mtk_spi * mdata = spi_master_get_devdata ( master ) ;
cmd = readl ( mdata - > base + SPI_CMD_REG ) ;
if ( mdata - > state = = MTK_SPI_IDLE )
2015-08-20 12:19:08 +03:00
cmd | = SPI_CMD_ACT ;
2015-08-07 10:19:50 +03:00
else
2015-08-20 12:19:08 +03:00
cmd | = SPI_CMD_RESUME ;
2015-08-07 10:19:50 +03:00
writel ( cmd , mdata - > base + SPI_CMD_REG ) ;
}
2015-08-24 06:45:16 +03:00
static int mtk_spi_get_mult_delta ( u32 xfer_len )
2015-08-07 10:19:50 +03:00
{
2015-08-24 06:45:16 +03:00
u32 mult_delta ;
2015-08-07 10:19:50 +03:00
if ( xfer_len > MTK_SPI_PACKET_SIZE )
mult_delta = xfer_len % MTK_SPI_PACKET_SIZE ;
else
mult_delta = 0 ;
return mult_delta ;
}
static void mtk_spi_update_mdata_len ( struct spi_master * master )
{
int mult_delta ;
struct mtk_spi * mdata = spi_master_get_devdata ( master ) ;
if ( mdata - > tx_sgl_len & & mdata - > rx_sgl_len ) {
if ( mdata - > tx_sgl_len > mdata - > rx_sgl_len ) {
mult_delta = mtk_spi_get_mult_delta ( mdata - > rx_sgl_len ) ;
mdata - > xfer_len = mdata - > rx_sgl_len - mult_delta ;
mdata - > rx_sgl_len = mult_delta ;
mdata - > tx_sgl_len - = mdata - > xfer_len ;
} else {
mult_delta = mtk_spi_get_mult_delta ( mdata - > tx_sgl_len ) ;
mdata - > xfer_len = mdata - > tx_sgl_len - mult_delta ;
mdata - > tx_sgl_len = mult_delta ;
mdata - > rx_sgl_len - = mdata - > xfer_len ;
}
} else if ( mdata - > tx_sgl_len ) {
mult_delta = mtk_spi_get_mult_delta ( mdata - > tx_sgl_len ) ;
mdata - > xfer_len = mdata - > tx_sgl_len - mult_delta ;
mdata - > tx_sgl_len = mult_delta ;
} else if ( mdata - > rx_sgl_len ) {
mult_delta = mtk_spi_get_mult_delta ( mdata - > rx_sgl_len ) ;
mdata - > xfer_len = mdata - > rx_sgl_len - mult_delta ;
mdata - > rx_sgl_len = mult_delta ;
}
}
static void mtk_spi_setup_dma_addr ( struct spi_master * master ,
struct spi_transfer * xfer )
{
struct mtk_spi * mdata = spi_master_get_devdata ( master ) ;
2019-09-11 12:55:31 +03:00
if ( mdata - > tx_sgl ) {
writel ( ( u32 ) ( xfer - > tx_dma & MTK_SPI_32BITS_MASK ) ,
mdata - > base + SPI_TX_SRC_REG ) ;
# ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if ( mdata - > dev_comp - > dma_ext )
writel ( ( u32 ) ( xfer - > tx_dma > > 32 ) ,
mdata - > base + SPI_TX_SRC_REG_64 ) ;
# endif
}
if ( mdata - > rx_sgl ) {
writel ( ( u32 ) ( xfer - > rx_dma & MTK_SPI_32BITS_MASK ) ,
mdata - > base + SPI_RX_DST_REG ) ;
# ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if ( mdata - > dev_comp - > dma_ext )
writel ( ( u32 ) ( xfer - > rx_dma > > 32 ) ,
mdata - > base + SPI_RX_DST_REG_64 ) ;
# endif
}
2015-08-07 10:19:50 +03:00
}
static int mtk_spi_fifo_transfer ( struct spi_master * master ,
struct spi_device * spi ,
struct spi_transfer * xfer )
{
2015-12-27 13:17:06 +03:00
int cnt , remainder ;
u32 reg_val ;
2015-08-07 10:19:50 +03:00
struct mtk_spi * mdata = spi_master_get_devdata ( master ) ;
mdata - > cur_transfer = xfer ;
2017-01-26 19:21:54 +03:00
mdata - > xfer_len = min ( MTK_SPI_MAX_FIFO_SIZE , xfer - > len ) ;
2018-09-10 06:54:21 +03:00
mdata - > num_xfered = 0 ;
2022-03-15 06:24:08 +03:00
mtk_spi_prepare_transfer ( master , xfer - > speed_hz ) ;
2015-08-07 10:19:50 +03:00
mtk_spi_setup_packet ( master ) ;
2021-08-02 06:00:23 +03:00
if ( xfer - > tx_buf ) {
cnt = xfer - > len / 4 ;
2021-07-06 15:16:09 +03:00
iowrite32_rep ( mdata - > base + SPI_TX_DATA_REG , xfer - > tx_buf , cnt ) ;
2021-08-02 06:00:23 +03:00
remainder = xfer - > len % 4 ;
if ( remainder > 0 ) {
reg_val = 0 ;
2021-07-06 15:16:09 +03:00
memcpy ( & reg_val , xfer - > tx_buf + ( cnt * 4 ) , remainder ) ;
writel ( reg_val , mdata - > base + SPI_TX_DATA_REG ) ;
}
2015-12-27 13:17:06 +03:00
}
2015-08-07 10:19:50 +03:00
mtk_spi_enable_transfer ( master ) ;
return 1 ;
}
static int mtk_spi_dma_transfer ( struct spi_master * master ,
struct spi_device * spi ,
struct spi_transfer * xfer )
{
int cmd ;
struct mtk_spi * mdata = spi_master_get_devdata ( master ) ;
mdata - > tx_sgl = NULL ;
mdata - > rx_sgl = NULL ;
mdata - > tx_sgl_len = 0 ;
mdata - > rx_sgl_len = 0 ;
mdata - > cur_transfer = xfer ;
2018-09-10 06:54:21 +03:00
mdata - > num_xfered = 0 ;
2015-08-07 10:19:50 +03:00
2022-03-15 06:24:08 +03:00
mtk_spi_prepare_transfer ( master , xfer - > speed_hz ) ;
2015-08-07 10:19:50 +03:00
cmd = readl ( mdata - > base + SPI_CMD_REG ) ;
if ( xfer - > tx_buf )
cmd | = SPI_CMD_TX_DMA ;
if ( xfer - > rx_buf )
cmd | = SPI_CMD_RX_DMA ;
writel ( cmd , mdata - > base + SPI_CMD_REG ) ;
if ( xfer - > tx_buf )
mdata - > tx_sgl = xfer - > tx_sg . sgl ;
if ( xfer - > rx_buf )
mdata - > rx_sgl = xfer - > rx_sg . sgl ;
if ( mdata - > tx_sgl ) {
xfer - > tx_dma = sg_dma_address ( mdata - > tx_sgl ) ;
mdata - > tx_sgl_len = sg_dma_len ( mdata - > tx_sgl ) ;
}
if ( mdata - > rx_sgl ) {
xfer - > rx_dma = sg_dma_address ( mdata - > rx_sgl ) ;
mdata - > rx_sgl_len = sg_dma_len ( mdata - > rx_sgl ) ;
}
mtk_spi_update_mdata_len ( master ) ;
mtk_spi_setup_packet ( master ) ;
mtk_spi_setup_dma_addr ( master , xfer ) ;
mtk_spi_enable_transfer ( master ) ;
return 1 ;
}
static int mtk_spi_transfer_one ( struct spi_master * master ,
struct spi_device * spi ,
struct spi_transfer * xfer )
{
2022-03-15 06:24:08 +03:00
struct mtk_spi * mdata = spi_master_get_devdata ( spi - > master ) ;
u32 reg_val = 0 ;
/* prepare xfer direction and duplex mode */
if ( mdata - > dev_comp - > ipm_design ) {
if ( ! xfer - > tx_buf | | ! xfer - > rx_buf ) {
reg_val | = SPI_CFG3_IPM_HALF_DUPLEX_EN ;
if ( xfer - > rx_buf )
reg_val | = SPI_CFG3_IPM_HALF_DUPLEX_DIR ;
}
writel ( reg_val , mdata - > base + SPI_CFG3_IPM_REG ) ;
}
2015-08-07 10:19:50 +03:00
if ( master - > can_dma ( master , spi , xfer ) )
return mtk_spi_dma_transfer ( master , spi , xfer ) ;
else
return mtk_spi_fifo_transfer ( master , spi , xfer ) ;
}
static bool mtk_spi_can_dma ( struct spi_master * master ,
struct spi_device * spi ,
struct spi_transfer * xfer )
{
2017-01-26 19:21:54 +03:00
/* Buffers for DMA transactions must be 4-byte aligned */
return ( xfer - > len > MTK_SPI_MAX_FIFO_SIZE & &
( unsigned long ) xfer - > tx_buf % 4 = = 0 & &
( unsigned long ) xfer - > rx_buf % 4 = = 0 ) ;
2015-08-07 10:19:50 +03:00
}
2015-10-26 11:09:43 +03:00
static int mtk_spi_setup ( struct spi_device * spi )
{
struct mtk_spi * mdata = spi_master_get_devdata ( spi - > master ) ;
if ( ! spi - > controller_data )
spi - > controller_data = ( void * ) & mtk_default_chip_info ;
2022-01-22 03:33:02 +03:00
if ( mdata - > dev_comp - > need_pad_sel & & spi - > cs_gpiod )
/* CS de-asserted, gpiolib will handle inversion */
gpiod_direction_output ( spi - > cs_gpiod , 0 ) ;
2015-10-26 11:09:44 +03:00
2015-10-26 11:09:43 +03:00
return 0 ;
}
2015-08-07 10:19:50 +03:00
static irqreturn_t mtk_spi_interrupt ( int irq , void * dev_id )
{
2018-09-10 06:54:21 +03:00
u32 cmd , reg_val , cnt , remainder , len ;
2015-08-07 10:19:50 +03:00
struct spi_master * master = dev_id ;
struct mtk_spi * mdata = spi_master_get_devdata ( master ) ;
struct spi_transfer * trans = mdata - > cur_transfer ;
reg_val = readl ( mdata - > base + SPI_STATUS0_REG ) ;
2015-08-24 06:45:16 +03:00
if ( reg_val & MTK_SPI_PAUSE_INT_STATUS )
2015-08-07 10:19:50 +03:00
mdata - > state = MTK_SPI_PAUSED ;
else
mdata - > state = MTK_SPI_IDLE ;
2022-03-21 04:39:20 +03:00
/* SPI-MEM ops */
if ( mdata - > use_spimem ) {
complete ( & mdata - > spimem_done ) ;
return IRQ_HANDLED ;
}
2022-01-31 17:17:08 +03:00
if ( ! master - > can_dma ( master , NULL , trans ) ) {
2015-08-07 10:19:50 +03:00
if ( trans - > rx_buf ) {
2015-12-27 13:17:06 +03:00
cnt = mdata - > xfer_len / 4 ;
2015-08-20 12:19:06 +03:00
ioread32_rep ( mdata - > base + SPI_RX_DATA_REG ,
2018-09-10 06:54:21 +03:00
trans - > rx_buf + mdata - > num_xfered , cnt ) ;
2015-12-27 13:17:06 +03:00
remainder = mdata - > xfer_len % 4 ;
if ( remainder > 0 ) {
reg_val = readl ( mdata - > base + SPI_RX_DATA_REG ) ;
2018-09-10 06:54:21 +03:00
memcpy ( trans - > rx_buf +
mdata - > num_xfered +
( cnt * 4 ) ,
& reg_val ,
remainder ) ;
2015-12-27 13:17:06 +03:00
}
2015-08-07 10:19:50 +03:00
}
2017-01-26 19:21:54 +03:00
2018-09-10 06:54:21 +03:00
mdata - > num_xfered + = mdata - > xfer_len ;
if ( mdata - > num_xfered = = trans - > len ) {
2017-01-26 19:21:54 +03:00
spi_finalize_current_transfer ( master ) ;
return IRQ_HANDLED ;
}
2018-09-10 06:54:21 +03:00
len = trans - > len - mdata - > num_xfered ;
mdata - > xfer_len = min ( MTK_SPI_MAX_FIFO_SIZE , len ) ;
2017-01-26 19:21:54 +03:00
mtk_spi_setup_packet ( master ) ;
2018-10-31 11:49:16 +03:00
cnt = mdata - > xfer_len / 4 ;
2018-09-10 06:54:21 +03:00
iowrite32_rep ( mdata - > base + SPI_TX_DATA_REG ,
trans - > tx_buf + mdata - > num_xfered , cnt ) ;
2017-01-26 19:21:54 +03:00
2018-10-31 11:49:16 +03:00
remainder = mdata - > xfer_len % 4 ;
2017-01-26 19:21:54 +03:00
if ( remainder > 0 ) {
reg_val = 0 ;
2018-09-10 06:54:21 +03:00
memcpy ( & reg_val ,
trans - > tx_buf + ( cnt * 4 ) + mdata - > num_xfered ,
remainder ) ;
2017-01-26 19:21:54 +03:00
writel ( reg_val , mdata - > base + SPI_TX_DATA_REG ) ;
}
mtk_spi_enable_transfer ( master ) ;
2015-08-07 10:19:50 +03:00
return IRQ_HANDLED ;
}
if ( mdata - > tx_sgl )
trans - > tx_dma + = mdata - > xfer_len ;
if ( mdata - > rx_sgl )
trans - > rx_dma + = mdata - > xfer_len ;
if ( mdata - > tx_sgl & & ( mdata - > tx_sgl_len = = 0 ) ) {
mdata - > tx_sgl = sg_next ( mdata - > tx_sgl ) ;
if ( mdata - > tx_sgl ) {
trans - > tx_dma = sg_dma_address ( mdata - > tx_sgl ) ;
mdata - > tx_sgl_len = sg_dma_len ( mdata - > tx_sgl ) ;
}
}
if ( mdata - > rx_sgl & & ( mdata - > rx_sgl_len = = 0 ) ) {
mdata - > rx_sgl = sg_next ( mdata - > rx_sgl ) ;
if ( mdata - > rx_sgl ) {
trans - > rx_dma = sg_dma_address ( mdata - > rx_sgl ) ;
mdata - > rx_sgl_len = sg_dma_len ( mdata - > rx_sgl ) ;
}
}
if ( ! mdata - > tx_sgl & & ! mdata - > rx_sgl ) {
/* spi disable dma */
cmd = readl ( mdata - > base + SPI_CMD_REG ) ;
cmd & = ~ SPI_CMD_TX_DMA ;
cmd & = ~ SPI_CMD_RX_DMA ;
writel ( cmd , mdata - > base + SPI_CMD_REG ) ;
spi_finalize_current_transfer ( master ) ;
return IRQ_HANDLED ;
}
mtk_spi_update_mdata_len ( master ) ;
mtk_spi_setup_packet ( master ) ;
mtk_spi_setup_dma_addr ( master , trans ) ;
mtk_spi_enable_transfer ( master ) ;
return IRQ_HANDLED ;
}
2022-03-21 04:39:20 +03:00
static int mtk_spi_mem_adjust_op_size ( struct spi_mem * mem ,
struct spi_mem_op * op )
{
int opcode_len ;
if ( op - > data . dir ! = SPI_MEM_NO_DATA ) {
opcode_len = 1 + op - > addr . nbytes + op - > dummy . nbytes ;
if ( opcode_len + op - > data . nbytes > MTK_SPI_IPM_PACKET_SIZE ) {
op - > data . nbytes = MTK_SPI_IPM_PACKET_SIZE - opcode_len ;
/* force data buffer dma-aligned. */
op - > data . nbytes - = op - > data . nbytes % 4 ;
}
}
return 0 ;
}
static bool mtk_spi_mem_supports_op ( struct spi_mem * mem ,
const struct spi_mem_op * op )
{
if ( ! spi_mem_default_supports_op ( mem , op ) )
return false ;
if ( op - > addr . nbytes & & op - > dummy . nbytes & &
op - > addr . buswidth ! = op - > dummy . buswidth )
return false ;
if ( op - > addr . nbytes + op - > dummy . nbytes > 16 )
return false ;
if ( op - > data . nbytes > MTK_SPI_IPM_PACKET_SIZE ) {
if ( op - > data . nbytes / MTK_SPI_IPM_PACKET_SIZE >
MTK_SPI_IPM_PACKET_LOOP | |
op - > data . nbytes % MTK_SPI_IPM_PACKET_SIZE ! = 0 )
return false ;
}
return true ;
}
static void mtk_spi_mem_setup_dma_xfer ( struct spi_master * master ,
const struct spi_mem_op * op )
{
struct mtk_spi * mdata = spi_master_get_devdata ( master ) ;
writel ( ( u32 ) ( mdata - > tx_dma & MTK_SPI_32BITS_MASK ) ,
mdata - > base + SPI_TX_SRC_REG ) ;
# ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if ( mdata - > dev_comp - > dma_ext )
writel ( ( u32 ) ( mdata - > tx_dma > > 32 ) ,
mdata - > base + SPI_TX_SRC_REG_64 ) ;
# endif
if ( op - > data . dir = = SPI_MEM_DATA_IN ) {
writel ( ( u32 ) ( mdata - > rx_dma & MTK_SPI_32BITS_MASK ) ,
mdata - > base + SPI_RX_DST_REG ) ;
# ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if ( mdata - > dev_comp - > dma_ext )
writel ( ( u32 ) ( mdata - > rx_dma > > 32 ) ,
mdata - > base + SPI_RX_DST_REG_64 ) ;
# endif
}
}
static int mtk_spi_transfer_wait ( struct spi_mem * mem ,
const struct spi_mem_op * op )
{
struct mtk_spi * mdata = spi_master_get_devdata ( mem - > spi - > master ) ;
/*
* For each byte we wait for 8 cycles of the SPI clock .
* Since speed is defined in Hz and we want milliseconds ,
* so it should be 8 * 1000.
*/
u64 ms = 8000LL ;
if ( op - > data . dir = = SPI_MEM_NO_DATA )
ms * = 32 ; /* prevent we may get 0 for short transfers. */
else
ms * = op - > data . nbytes ;
ms = div_u64 ( ms , mem - > spi - > max_speed_hz ) ;
ms + = ms + 1000 ; /* 1s tolerance */
if ( ms > UINT_MAX )
ms = UINT_MAX ;
if ( ! wait_for_completion_timeout ( & mdata - > spimem_done ,
msecs_to_jiffies ( ms ) ) ) {
dev_err ( mdata - > dev , " spi-mem transfer timeout \n " ) ;
return - ETIMEDOUT ;
}
return 0 ;
}
static int mtk_spi_mem_exec_op ( struct spi_mem * mem ,
const struct spi_mem_op * op )
{
struct mtk_spi * mdata = spi_master_get_devdata ( mem - > spi - > master ) ;
u32 reg_val , nio , tx_size ;
char * tx_tmp_buf , * rx_tmp_buf ;
int ret = 0 ;
mdata - > use_spimem = true ;
reinit_completion ( & mdata - > spimem_done ) ;
mtk_spi_reset ( mdata ) ;
mtk_spi_hw_init ( mem - > spi - > master , mem - > spi ) ;
mtk_spi_prepare_transfer ( mem - > spi - > master , mem - > spi - > max_speed_hz ) ;
reg_val = readl ( mdata - > base + SPI_CFG3_IPM_REG ) ;
/* opcode byte len */
reg_val & = ~ SPI_CFG3_IPM_CMD_BYTELEN_MASK ;
reg_val | = 1 < < SPI_CFG3_IPM_CMD_BYTELEN_OFFSET ;
/* addr & dummy byte len */
reg_val & = ~ SPI_CFG3_IPM_ADDR_BYTELEN_MASK ;
if ( op - > addr . nbytes | | op - > dummy . nbytes )
reg_val | = ( op - > addr . nbytes + op - > dummy . nbytes ) < <
SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET ;
/* data byte len */
if ( op - > data . dir = = SPI_MEM_NO_DATA ) {
reg_val | = SPI_CFG3_IPM_NODATA_FLAG ;
writel ( 0 , mdata - > base + SPI_CFG1_REG ) ;
} else {
reg_val & = ~ SPI_CFG3_IPM_NODATA_FLAG ;
mdata - > xfer_len = op - > data . nbytes ;
mtk_spi_setup_packet ( mem - > spi - > master ) ;
}
if ( op - > addr . nbytes | | op - > dummy . nbytes ) {
if ( op - > addr . buswidth = = 1 | | op - > dummy . buswidth = = 1 )
reg_val | = SPI_CFG3_IPM_XMODE_EN ;
else
reg_val & = ~ SPI_CFG3_IPM_XMODE_EN ;
}
if ( op - > addr . buswidth = = 2 | |
op - > dummy . buswidth = = 2 | |
op - > data . buswidth = = 2 )
nio = 2 ;
else if ( op - > addr . buswidth = = 4 | |
op - > dummy . buswidth = = 4 | |
op - > data . buswidth = = 4 )
nio = 4 ;
else
nio = 1 ;
reg_val & = ~ SPI_CFG3_IPM_CMD_PIN_MODE_MASK ;
reg_val | = PIN_MODE_CFG ( nio ) ;
reg_val | = SPI_CFG3_IPM_HALF_DUPLEX_EN ;
if ( op - > data . dir = = SPI_MEM_DATA_IN )
reg_val | = SPI_CFG3_IPM_HALF_DUPLEX_DIR ;
else
reg_val & = ~ SPI_CFG3_IPM_HALF_DUPLEX_DIR ;
writel ( reg_val , mdata - > base + SPI_CFG3_IPM_REG ) ;
tx_size = 1 + op - > addr . nbytes + op - > dummy . nbytes ;
if ( op - > data . dir = = SPI_MEM_DATA_OUT )
tx_size + = op - > data . nbytes ;
tx_size = max_t ( u32 , tx_size , 32 ) ;
tx_tmp_buf = kzalloc ( tx_size , GFP_KERNEL | GFP_DMA ) ;
if ( ! tx_tmp_buf ) {
mdata - > use_spimem = false ;
return - ENOMEM ;
}
tx_tmp_buf [ 0 ] = op - > cmd . opcode ;
if ( op - > addr . nbytes ) {
int i ;
for ( i = 0 ; i < op - > addr . nbytes ; i + + )
tx_tmp_buf [ i + 1 ] = op - > addr . val > >
( 8 * ( op - > addr . nbytes - i - 1 ) ) ;
}
if ( op - > dummy . nbytes )
memset ( tx_tmp_buf + op - > addr . nbytes + 1 ,
0xff ,
op - > dummy . nbytes ) ;
if ( op - > data . nbytes & & op - > data . dir = = SPI_MEM_DATA_OUT )
memcpy ( tx_tmp_buf + op - > dummy . nbytes + op - > addr . nbytes + 1 ,
op - > data . buf . out ,
op - > data . nbytes ) ;
mdata - > tx_dma = dma_map_single ( mdata - > dev , tx_tmp_buf ,
tx_size , DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( mdata - > dev , mdata - > tx_dma ) ) {
ret = - ENOMEM ;
goto err_exit ;
}
if ( op - > data . dir = = SPI_MEM_DATA_IN ) {
if ( ! IS_ALIGNED ( ( size_t ) op - > data . buf . in , 4 ) ) {
rx_tmp_buf = kzalloc ( op - > data . nbytes ,
GFP_KERNEL | GFP_DMA ) ;
if ( ! rx_tmp_buf ) {
ret = - ENOMEM ;
goto unmap_tx_dma ;
}
} else {
rx_tmp_buf = op - > data . buf . in ;
}
mdata - > rx_dma = dma_map_single ( mdata - > dev ,
rx_tmp_buf ,
op - > data . nbytes ,
DMA_FROM_DEVICE ) ;
if ( dma_mapping_error ( mdata - > dev , mdata - > rx_dma ) ) {
ret = - ENOMEM ;
goto kfree_rx_tmp_buf ;
}
}
reg_val = readl ( mdata - > base + SPI_CMD_REG ) ;
reg_val | = SPI_CMD_TX_DMA ;
if ( op - > data . dir = = SPI_MEM_DATA_IN )
reg_val | = SPI_CMD_RX_DMA ;
writel ( reg_val , mdata - > base + SPI_CMD_REG ) ;
mtk_spi_mem_setup_dma_xfer ( mem - > spi - > master , op ) ;
mtk_spi_enable_transfer ( mem - > spi - > master ) ;
/* Wait for the interrupt. */
ret = mtk_spi_transfer_wait ( mem , op ) ;
if ( ret )
goto unmap_rx_dma ;
/* spi disable dma */
reg_val = readl ( mdata - > base + SPI_CMD_REG ) ;
reg_val & = ~ SPI_CMD_TX_DMA ;
if ( op - > data . dir = = SPI_MEM_DATA_IN )
reg_val & = ~ SPI_CMD_RX_DMA ;
writel ( reg_val , mdata - > base + SPI_CMD_REG ) ;
unmap_rx_dma :
if ( op - > data . dir = = SPI_MEM_DATA_IN ) {
dma_unmap_single ( mdata - > dev , mdata - > rx_dma ,
op - > data . nbytes , DMA_FROM_DEVICE ) ;
if ( ! IS_ALIGNED ( ( size_t ) op - > data . buf . in , 4 ) )
memcpy ( op - > data . buf . in , rx_tmp_buf , op - > data . nbytes ) ;
}
kfree_rx_tmp_buf :
if ( op - > data . dir = = SPI_MEM_DATA_IN & &
! IS_ALIGNED ( ( size_t ) op - > data . buf . in , 4 ) )
kfree ( rx_tmp_buf ) ;
unmap_tx_dma :
dma_unmap_single ( mdata - > dev , mdata - > tx_dma ,
tx_size , DMA_TO_DEVICE ) ;
err_exit :
kfree ( tx_tmp_buf ) ;
mdata - > use_spimem = false ;
return ret ;
}
static const struct spi_controller_mem_ops mtk_spi_mem_ops = {
. adjust_op_size = mtk_spi_mem_adjust_op_size ,
. supports_op = mtk_spi_mem_supports_op ,
. exec_op = mtk_spi_mem_exec_op ,
} ;
2015-08-07 10:19:50 +03:00
static int mtk_spi_probe ( struct platform_device * pdev )
{
2022-04-07 14:44:23 +03:00
struct device * dev = & pdev - > dev ;
2015-08-07 10:19:50 +03:00
struct spi_master * master ;
struct mtk_spi * mdata ;
2019-09-11 12:55:31 +03:00
int i , irq , ret , addr_bits ;
2015-08-07 10:19:50 +03:00
2022-04-07 14:44:23 +03:00
master = devm_spi_alloc_master ( dev , sizeof ( * mdata ) ) ;
2015-08-07 10:19:50 +03:00
if ( ! master ) {
2022-04-07 14:44:23 +03:00
dev_err ( dev , " failed to alloc spi master \n " ) ;
2015-08-07 10:19:50 +03:00
return - ENOMEM ;
}
master - > auto_runtime_pm = true ;
2022-04-07 14:44:23 +03:00
master - > dev . of_node = dev - > of_node ;
2019-06-05 06:07:04 +03:00
master - > mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST ;
2015-08-07 10:19:50 +03:00
master - > set_cs = mtk_spi_set_cs ;
master - > prepare_message = mtk_spi_prepare_message ;
master - > transfer_one = mtk_spi_transfer_one ;
master - > can_dma = mtk_spi_can_dma ;
2015-10-26 11:09:43 +03:00
master - > setup = mtk_spi_setup ;
2021-02-07 06:09:53 +03:00
master - > set_cs_timing = mtk_spi_set_hw_cs_timing ;
2022-01-22 03:33:02 +03:00
master - > use_gpio_descriptors = true ;
2015-08-07 10:19:50 +03:00
mdata = spi_master_get_devdata ( master ) ;
2022-04-07 14:44:23 +03:00
mdata - > dev_comp = device_get_match_data ( dev ) ;
2019-11-18 07:57:16 +03:00
if ( mdata - > dev_comp - > enhance_timing )
master - > mode_bits | = SPI_CS_HIGH ;
2015-08-07 10:19:50 +03:00
if ( mdata - > dev_comp - > must_tx )
master - > flags = SPI_MASTER_MUST_TX ;
2022-03-15 06:24:08 +03:00
if ( mdata - > dev_comp - > ipm_design )
master - > mode_bits | = SPI_LOOP ;
2015-08-07 10:19:50 +03:00
2022-03-21 04:39:20 +03:00
if ( mdata - > dev_comp - > ipm_design ) {
2022-04-07 14:44:23 +03:00
mdata - > dev = dev ;
2022-03-21 04:39:20 +03:00
master - > mem_ops = & mtk_spi_mem_ops ;
init_completion ( & mdata - > spimem_done ) ;
}
2015-08-07 10:19:50 +03:00
if ( mdata - > dev_comp - > need_pad_sel ) {
2022-04-07 14:44:23 +03:00
mdata - > pad_num = of_property_count_u32_elems ( dev - > of_node ,
2015-10-26 11:09:44 +03:00
" mediatek,pad-select " ) ;
if ( mdata - > pad_num < 0 ) {
2022-04-07 14:44:23 +03:00
dev_err ( dev ,
2015-10-26 11:09:44 +03:00
" No 'mediatek,pad-select' property \n " ) ;
2022-04-07 14:44:21 +03:00
return - EINVAL ;
2015-08-07 10:19:50 +03:00
}
2022-04-07 14:44:23 +03:00
mdata - > pad_sel = devm_kmalloc_array ( dev , mdata - > pad_num ,
2015-10-26 11:09:44 +03:00
sizeof ( u32 ) , GFP_KERNEL ) ;
2022-04-07 14:44:21 +03:00
if ( ! mdata - > pad_sel )
return - ENOMEM ;
2015-10-26 11:09:44 +03:00
for ( i = 0 ; i < mdata - > pad_num ; i + + ) {
2022-04-07 14:44:23 +03:00
of_property_read_u32_index ( dev - > of_node ,
2015-10-26 11:09:44 +03:00
" mediatek,pad-select " ,
i , & mdata - > pad_sel [ i ] ) ;
if ( mdata - > pad_sel [ i ] > MT8173_SPI_MAX_PAD_SEL ) {
2022-04-07 14:44:23 +03:00
dev_err ( dev , " wrong pad-sel[%d]: %u \n " ,
2015-10-26 11:09:44 +03:00
i , mdata - > pad_sel [ i ] ) ;
2022-04-07 14:44:21 +03:00
return - EINVAL ;
2015-10-26 11:09:44 +03:00
}
}
2015-08-07 10:19:50 +03:00
}
platform_set_drvdata ( pdev , master ) ;
2019-09-21 15:45:40 +03:00
mdata - > base = devm_platform_ioremap_resource ( pdev , 0 ) ;
2022-04-07 14:44:21 +03:00
if ( IS_ERR ( mdata - > base ) )
return PTR_ERR ( mdata - > base ) ;
2015-08-07 10:19:50 +03:00
irq = platform_get_irq ( pdev , 0 ) ;
2022-04-07 14:44:21 +03:00
if ( irq < 0 )
return irq ;
2015-08-07 10:19:50 +03:00
2022-04-07 14:44:23 +03:00
if ( ! dev - > dma_mask )
dev - > dma_mask = & dev - > coherent_dma_mask ;
2015-08-07 10:19:50 +03:00
2022-04-07 14:44:23 +03:00
ret = devm_request_irq ( dev , irq , mtk_spi_interrupt ,
IRQF_TRIGGER_NONE , dev_name ( dev ) , master ) ;
2015-08-07 10:19:50 +03:00
if ( ret ) {
2022-04-07 14:44:23 +03:00
dev_err ( dev , " failed to register irq (%d) \n " , ret ) ;
2022-04-07 14:44:21 +03:00
return ret ;
2015-08-07 10:19:50 +03:00
}
2022-04-07 14:44:23 +03:00
mdata - > parent_clk = devm_clk_get ( dev , " parent-clk " ) ;
2015-08-07 10:19:50 +03:00
if ( IS_ERR ( mdata - > parent_clk ) ) {
ret = PTR_ERR ( mdata - > parent_clk ) ;
2022-04-07 14:44:23 +03:00
dev_err ( dev , " failed to get parent-clk: %d \n " , ret ) ;
2022-04-07 14:44:21 +03:00
return ret ;
2015-08-07 10:19:50 +03:00
}
2022-04-07 14:44:23 +03:00
mdata - > sel_clk = devm_clk_get ( dev , " sel-clk " ) ;
2015-08-31 16:18:57 +03:00
if ( IS_ERR ( mdata - > sel_clk ) ) {
2015-09-15 15:46:45 +03:00
ret = PTR_ERR ( mdata - > sel_clk ) ;
2022-04-07 14:44:23 +03:00
dev_err ( dev , " failed to get sel-clk: %d \n " , ret ) ;
2022-04-07 14:44:21 +03:00
return ret ;
2015-08-07 10:19:50 +03:00
}
2022-04-07 14:44:23 +03:00
mdata - > spi_clk = devm_clk_get ( dev , " spi-clk " ) ;
2015-08-31 16:18:57 +03:00
if ( IS_ERR ( mdata - > spi_clk ) ) {
2015-09-15 15:46:45 +03:00
ret = PTR_ERR ( mdata - > spi_clk ) ;
2022-04-07 14:44:23 +03:00
dev_err ( dev , " failed to get spi-clk: %d \n " , ret ) ;
2022-04-07 14:44:21 +03:00
return ret ;
2015-08-07 10:19:50 +03:00
}
2022-04-07 14:44:23 +03:00
mdata - > spi_hclk = devm_clk_get_optional ( dev , " hclk " ) ;
2022-03-21 04:39:22 +03:00
if ( IS_ERR ( mdata - > spi_hclk ) ) {
ret = PTR_ERR ( mdata - > spi_hclk ) ;
2022-04-07 14:44:23 +03:00
dev_err ( dev , " failed to get hclk: %d \n " , ret ) ;
2022-04-07 14:44:21 +03:00
return ret ;
2022-03-21 04:39:22 +03:00
}
2022-04-07 14:44:24 +03:00
ret = clk_set_parent ( mdata - > sel_clk , mdata - > parent_clk ) ;
if ( ret < 0 ) {
dev_err ( dev , " failed to clk_set_parent (%d) \n " , ret ) ;
return ret ;
}
2022-03-21 04:39:22 +03:00
ret = clk_prepare_enable ( mdata - > spi_hclk ) ;
if ( ret < 0 ) {
2022-04-07 14:44:23 +03:00
dev_err ( dev , " failed to enable hclk (%d) \n " , ret ) ;
2022-04-07 14:44:21 +03:00
return ret ;
2022-03-21 04:39:22 +03:00
}
2015-08-07 10:19:50 +03:00
ret = clk_prepare_enable ( mdata - > spi_clk ) ;
if ( ret < 0 ) {
2022-04-07 14:44:23 +03:00
dev_err ( dev , " failed to enable spi_clk (%d) \n " , ret ) ;
2022-04-07 14:44:24 +03:00
clk_disable_unprepare ( mdata - > spi_hclk ) ;
return ret ;
2015-08-07 10:19:50 +03:00
}
2021-06-29 13:08:15 +03:00
mdata - > spi_clk_hz = clk_get_rate ( mdata - > spi_clk ) ;
2022-03-21 04:39:22 +03:00
if ( mdata - > dev_comp - > no_need_unprepare ) {
2021-06-29 13:08:15 +03:00
clk_disable ( mdata - > spi_clk ) ;
2022-03-21 04:39:22 +03:00
clk_disable ( mdata - > spi_hclk ) ;
} else {
2021-06-29 13:08:15 +03:00
clk_disable_unprepare ( mdata - > spi_clk ) ;
2022-03-21 04:39:22 +03:00
clk_disable_unprepare ( mdata - > spi_hclk ) ;
}
2015-08-07 10:19:50 +03:00
2022-04-07 14:44:23 +03:00
pm_runtime_enable ( dev ) ;
2015-08-07 10:19:50 +03:00
2015-10-26 11:09:44 +03:00
if ( mdata - > dev_comp - > need_pad_sel ) {
if ( mdata - > pad_num ! = master - > num_chipselect ) {
2022-04-07 14:44:23 +03:00
dev_err ( dev ,
2015-10-26 11:09:44 +03:00
" pad_num does not match num_chipselect(%d != %d) \n " ,
mdata - > pad_num , master - > num_chipselect ) ;
ret = - EINVAL ;
2015-11-25 12:50:38 +03:00
goto err_disable_runtime_pm ;
2015-10-26 11:09:44 +03:00
}
2022-01-22 03:33:02 +03:00
if ( ! master - > cs_gpiods & & master - > num_chipselect > 1 ) {
2022-04-07 14:44:23 +03:00
dev_err ( dev ,
2015-11-09 07:14:51 +03:00
" cs_gpios not specified and num_chipselect > 1 \n " ) ;
ret = - EINVAL ;
2015-11-25 12:50:38 +03:00
goto err_disable_runtime_pm ;
2015-11-09 07:14:51 +03:00
}
2015-10-26 11:09:44 +03:00
}
2019-09-11 12:55:31 +03:00
if ( mdata - > dev_comp - > dma_ext )
addr_bits = DMA_ADDR_EXT_BITS ;
else
addr_bits = DMA_ADDR_DEF_BITS ;
2022-04-07 14:44:23 +03:00
ret = dma_set_mask ( dev , DMA_BIT_MASK ( addr_bits ) ) ;
2019-09-11 12:55:31 +03:00
if ( ret )
2022-04-07 14:44:23 +03:00
dev_notice ( dev , " SPI dma_set_mask(%d) failed, ret:%d \n " ,
2019-09-11 12:55:31 +03:00
addr_bits , ret ) ;
2022-04-07 14:44:23 +03:00
ret = devm_spi_register_master ( dev , master ) ;
2021-07-13 14:42:48 +03:00
if ( ret ) {
2022-04-07 14:44:23 +03:00
dev_err ( dev , " failed to register master (%d) \n " , ret ) ;
2021-07-13 14:42:48 +03:00
goto err_disable_runtime_pm ;
}
2015-08-07 10:19:50 +03:00
return 0 ;
2015-11-25 12:50:38 +03:00
err_disable_runtime_pm :
2022-04-07 14:44:23 +03:00
pm_runtime_disable ( dev ) ;
2015-08-07 10:19:50 +03:00
return ret ;
}
static int mtk_spi_remove ( struct platform_device * pdev )
{
struct spi_master * master = platform_get_drvdata ( pdev ) ;
struct mtk_spi * mdata = spi_master_get_devdata ( master ) ;
pm_runtime_disable ( & pdev - > dev ) ;
mtk_spi_reset ( mdata ) ;
2022-03-21 04:39:22 +03:00
if ( mdata - > dev_comp - > no_need_unprepare ) {
2021-06-29 13:08:15 +03:00
clk_unprepare ( mdata - > spi_clk ) ;
2022-03-21 04:39:22 +03:00
clk_unprepare ( mdata - > spi_hclk ) ;
}
2021-06-29 13:08:15 +03:00
2015-08-07 10:19:50 +03:00
return 0 ;
}
# ifdef CONFIG_PM_SLEEP
static int mtk_spi_suspend ( struct device * dev )
{
int ret ;
struct spi_master * master = dev_get_drvdata ( dev ) ;
struct mtk_spi * mdata = spi_master_get_devdata ( master ) ;
ret = spi_master_suspend ( master ) ;
if ( ret )
return ret ;
2022-03-21 04:39:22 +03:00
if ( ! pm_runtime_suspended ( dev ) ) {
2015-08-07 10:19:50 +03:00
clk_disable_unprepare ( mdata - > spi_clk ) ;
2022-03-21 04:39:22 +03:00
clk_disable_unprepare ( mdata - > spi_hclk ) ;
}
2015-08-07 10:19:50 +03:00
return ret ;
}
static int mtk_spi_resume ( struct device * dev )
{
int ret ;
struct spi_master * master = dev_get_drvdata ( dev ) ;
struct mtk_spi * mdata = spi_master_get_devdata ( master ) ;
if ( ! pm_runtime_suspended ( dev ) ) {
ret = clk_prepare_enable ( mdata - > spi_clk ) ;
2015-08-24 06:45:17 +03:00
if ( ret < 0 ) {
dev_err ( dev , " failed to enable spi_clk (%d) \n " , ret ) ;
2015-08-07 10:19:50 +03:00
return ret ;
2015-08-24 06:45:17 +03:00
}
2022-03-21 04:39:22 +03:00
ret = clk_prepare_enable ( mdata - > spi_hclk ) ;
if ( ret < 0 ) {
dev_err ( dev , " failed to enable spi_hclk (%d) \n " , ret ) ;
clk_disable_unprepare ( mdata - > spi_clk ) ;
return ret ;
}
2015-08-07 10:19:50 +03:00
}
ret = spi_master_resume ( master ) ;
2022-03-21 04:39:22 +03:00
if ( ret < 0 ) {
2015-08-07 10:19:50 +03:00
clk_disable_unprepare ( mdata - > spi_clk ) ;
2022-03-21 04:39:22 +03:00
clk_disable_unprepare ( mdata - > spi_hclk ) ;
}
2015-08-07 10:19:50 +03:00
return ret ;
}
# endif /* CONFIG_PM_SLEEP */
# ifdef CONFIG_PM
static int mtk_spi_runtime_suspend ( struct device * dev )
{
struct spi_master * master = dev_get_drvdata ( dev ) ;
struct mtk_spi * mdata = spi_master_get_devdata ( master ) ;
2022-03-21 04:39:22 +03:00
if ( mdata - > dev_comp - > no_need_unprepare ) {
2021-06-29 13:08:15 +03:00
clk_disable ( mdata - > spi_clk ) ;
2022-03-21 04:39:22 +03:00
clk_disable ( mdata - > spi_hclk ) ;
} else {
2021-06-29 13:08:15 +03:00
clk_disable_unprepare ( mdata - > spi_clk ) ;
2022-03-21 04:39:22 +03:00
clk_disable_unprepare ( mdata - > spi_hclk ) ;
}
2015-08-07 10:19:50 +03:00
return 0 ;
}
static int mtk_spi_runtime_resume ( struct device * dev )
{
struct spi_master * master = dev_get_drvdata ( dev ) ;
struct mtk_spi * mdata = spi_master_get_devdata ( master ) ;
2015-08-24 06:45:17 +03:00
int ret ;
2022-03-21 04:39:22 +03:00
if ( mdata - > dev_comp - > no_need_unprepare ) {
2021-06-29 13:08:15 +03:00
ret = clk_enable ( mdata - > spi_clk ) ;
2022-03-21 04:39:22 +03:00
if ( ret < 0 ) {
dev_err ( dev , " failed to enable spi_clk (%d) \n " , ret ) ;
return ret ;
}
ret = clk_enable ( mdata - > spi_hclk ) ;
if ( ret < 0 ) {
dev_err ( dev , " failed to enable spi_hclk (%d) \n " , ret ) ;
clk_disable ( mdata - > spi_clk ) ;
return ret ;
}
} else {
2021-06-29 13:08:15 +03:00
ret = clk_prepare_enable ( mdata - > spi_clk ) ;
2022-03-21 04:39:22 +03:00
if ( ret < 0 ) {
dev_err ( dev , " failed to prepare_enable spi_clk (%d) \n " , ret ) ;
return ret ;
}
ret = clk_prepare_enable ( mdata - > spi_hclk ) ;
if ( ret < 0 ) {
dev_err ( dev , " failed to prepare_enable spi_hclk (%d) \n " , ret ) ;
clk_disable_unprepare ( mdata - > spi_clk ) ;
return ret ;
}
2015-08-24 06:45:17 +03:00
}
2015-08-07 10:19:50 +03:00
2015-08-24 06:45:17 +03:00
return 0 ;
2015-08-07 10:19:50 +03:00
}
# endif /* CONFIG_PM */
static const struct dev_pm_ops mtk_spi_pm = {
SET_SYSTEM_SLEEP_PM_OPS ( mtk_spi_suspend , mtk_spi_resume )
SET_RUNTIME_PM_OPS ( mtk_spi_runtime_suspend ,
mtk_spi_runtime_resume , NULL )
} ;
2015-08-07 17:33:11 +03:00
static struct platform_driver mtk_spi_driver = {
2015-08-07 10:19:50 +03:00
. driver = {
. name = " mtk-spi " ,
. pm = & mtk_spi_pm ,
. of_match_table = mtk_spi_of_match ,
} ,
. probe = mtk_spi_probe ,
. remove = mtk_spi_remove ,
} ;
module_platform_driver ( mtk_spi_driver ) ;
MODULE_DESCRIPTION ( " MTK SPI Controller driver " ) ;
MODULE_AUTHOR ( " Leilk Liu <leilk.liu@mediatek.com> " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
2015-08-11 04:15:30 +03:00
MODULE_ALIAS ( " platform:mtk-spi " ) ;