2019-05-29 17:18:10 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2013-08-20 17:25:48 +04:00
/*
* TI QSPI driver
*
2020-07-08 22:44:00 +03:00
* Copyright ( C ) 2013 Texas Instruments Incorporated - https : //www.ti.com
2013-08-20 17:25:48 +04:00
* Author : Sourav Poddar < sourav . poddar @ ti . com >
*/
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/module.h>
# include <linux/device.h>
# include <linux/delay.h>
# include <linux/dma-mapping.h>
# include <linux/dmaengine.h>
# include <linux/omap-dma.h>
# include <linux/platform_device.h>
# include <linux/err.h>
# include <linux/clk.h>
# include <linux/io.h>
# include <linux/slab.h>
# include <linux/pm_runtime.h>
# include <linux/of.h>
# include <linux/of_device.h>
# include <linux/pinctrl/consumer.h>
2015-12-11 07:09:57 +03:00
# include <linux/mfd/syscon.h>
# include <linux/regmap.h>
2017-04-11 14:52:25 +03:00
# include <linux/sizes.h>
2013-08-20 17:25:48 +04:00
# include <linux/spi/spi.h>
2018-04-26 19:18:18 +03:00
# include <linux/spi/spi-mem.h>
2013-08-20 17:25:48 +04:00
struct ti_qspi_regs {
u32 clkctrl ;
} ;
struct ti_qspi {
2016-08-17 12:52:37 +03:00
struct completion transfer_complete ;
2013-08-20 17:25:48 +04:00
/* list synchronization */
struct mutex list_lock ;
struct spi_master * master ;
void __iomem * base ;
2013-12-06 18:24:43 +04:00
void __iomem * mmap_base ;
2018-04-26 19:18:18 +03:00
size_t mmap_size ;
2015-12-11 07:09:57 +03:00
struct regmap * ctrl_base ;
unsigned int ctrl_reg ;
2013-08-20 17:25:48 +04:00
struct clk * fclk ;
struct device * dev ;
struct ti_qspi_regs ctx_reg ;
2016-08-17 12:52:37 +03:00
dma_addr_t mmap_phys_base ;
2017-04-11 14:52:25 +03:00
dma_addr_t rx_bb_dma_addr ;
void * rx_bb_addr ;
2016-08-17 12:52:37 +03:00
struct dma_chan * rx_chan ;
2013-08-20 17:25:48 +04:00
u32 cmd ;
u32 dc ;
2013-12-06 18:24:43 +04:00
2015-12-11 07:09:57 +03:00
bool mmap_enabled ;
2019-12-11 18:52:16 +03:00
int current_cs ;
2013-08-20 17:25:48 +04:00
} ;
# define QSPI_PID (0x0)
# define QSPI_SYSCONFIG (0x10)
# define QSPI_SPI_CLOCK_CNTRL_REG (0x40)
# define QSPI_SPI_DC_REG (0x44)
# define QSPI_SPI_CMD_REG (0x48)
# define QSPI_SPI_STATUS_REG (0x4c)
# define QSPI_SPI_DATA_REG (0x50)
2015-12-11 07:09:57 +03:00
# define QSPI_SPI_SETUP_REG(n) ((0x54 + 4 * n))
2013-08-20 17:25:48 +04:00
# define QSPI_SPI_SWITCH_REG (0x64)
# define QSPI_SPI_DATA_REG_1 (0x68)
# define QSPI_SPI_DATA_REG_2 (0x6c)
# define QSPI_SPI_DATA_REG_3 (0x70)
# define QSPI_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
/* Clock Control */
# define QSPI_CLK_EN (1 << 31)
# define QSPI_CLK_DIV_MAX 0xffff
/* Command */
# define QSPI_EN_CS(n) (n << 28)
# define QSPI_WLEN(n) ((n - 1) << 19)
# define QSPI_3_PIN (1 << 18)
# define QSPI_RD_SNGL (1 << 16)
# define QSPI_WR_SNGL (2 << 16)
# define QSPI_RD_DUAL (3 << 16)
# define QSPI_RD_QUAD (7 << 16)
# define QSPI_INVAL (4 << 16)
# define QSPI_FLEN(n) ((n - 1) << 0)
2015-08-20 13:30:59 +03:00
# define QSPI_WLEN_MAX_BITS 128
# define QSPI_WLEN_MAX_BYTES 16
2016-04-12 14:56:25 +03:00
# define QSPI_WLEN_MASK QSPI_WLEN(QSPI_WLEN_MAX_BITS)
2013-08-20 17:25:48 +04:00
/* STATUS REGISTER */
2015-02-17 22:03:51 +03:00
# define BUSY 0x01
2013-08-20 17:25:48 +04:00
# define WC 0x02
/* Device Control */
# define QSPI_DD(m, n) (m << (3 + n * 8))
# define QSPI_CKPHA(n) (1 << (2 + n * 8))
# define QSPI_CSPOL(n) (1 << (1 + n * 8))
# define QSPI_CKPOL(n) (1 << (n * 8))
# define QSPI_FRAME 4096
# define QSPI_AUTOSUSPEND_TIMEOUT 2000
2015-12-11 07:09:57 +03:00
# define MEM_CS_EN(n) ((n + 1) << 8)
# define MEM_CS_MASK (7 << 8)
# define MM_SWITCH 0x1
# define QSPI_SETUP_RD_NORMAL (0x0 << 12)
# define QSPI_SETUP_RD_DUAL (0x1 << 12)
# define QSPI_SETUP_RD_QUAD (0x3 << 12)
# define QSPI_SETUP_ADDR_SHIFT 8
# define QSPI_SETUP_DUMMY_SHIFT 10
2017-04-11 14:52:25 +03:00
# define QSPI_DMA_BUFFER_SIZE SZ_64K
2013-08-20 17:25:48 +04:00
static inline unsigned long ti_qspi_read ( struct ti_qspi * qspi ,
unsigned long reg )
{
return readl ( qspi - > base + reg ) ;
}
static inline void ti_qspi_write ( struct ti_qspi * qspi ,
unsigned long val , unsigned long reg )
{
writel ( val , qspi - > base + reg ) ;
}
static int ti_qspi_setup ( struct spi_device * spi )
{
struct ti_qspi * qspi = spi_master_get_devdata ( spi - > master ) ;
2022-05-19 02:46:04 +03:00
int ret ;
2013-08-20 17:25:48 +04:00
if ( spi - > master - > busy ) {
2016-06-23 20:01:34 +03:00
dev_dbg ( qspi - > dev , " master busy doing other transfers \n " ) ;
2013-08-20 17:25:48 +04:00
return - EBUSY ;
}
2022-05-19 02:46:04 +03:00
if ( ! qspi - > master - > max_speed_hz ) {
2013-08-20 17:25:48 +04:00
dev_err ( qspi - > dev , " spi max frequency not defined \n " ) ;
return - EINVAL ;
}
2022-05-19 02:46:04 +03:00
spi - > max_speed_hz = min ( spi - > max_speed_hz , qspi - > master - > max_speed_hz ) ;
2013-08-20 17:25:48 +04:00
2022-04-08 11:09:31 +03:00
ret = pm_runtime_resume_and_get ( qspi - > dev ) ;
2013-11-19 17:07:15 +04:00
if ( ret < 0 ) {
2013-08-20 17:25:48 +04:00
dev_err ( qspi - > dev , " pm_runtime_get_sync() failed \n " ) ;
return ret ;
}
pm_runtime_mark_last_busy ( qspi - > dev ) ;
ret = pm_runtime_put_autosuspend ( qspi - > dev ) ;
if ( ret < 0 ) {
dev_err ( qspi - > dev , " pm_runtime_put_autosuspend() failed \n " ) ;
return ret ;
}
return 0 ;
}
2022-05-19 02:46:04 +03:00
static void ti_qspi_setup_clk ( struct ti_qspi * qspi , u32 speed_hz )
{
struct ti_qspi_regs * ctx_reg = & qspi - > ctx_reg ;
int clk_div ;
u32 clk_ctrl_reg , clk_rate , clk_ctrl_new ;
clk_rate = clk_get_rate ( qspi - > fclk ) ;
clk_div = DIV_ROUND_UP ( clk_rate , speed_hz ) - 1 ;
clk_div = clamp ( clk_div , 0 , QSPI_CLK_DIV_MAX ) ;
dev_dbg ( qspi - > dev , " hz: %d, clock divider %d \n " , speed_hz , clk_div ) ;
pm_runtime_resume_and_get ( qspi - > dev ) ;
clk_ctrl_new = QSPI_CLK_EN | clk_div ;
if ( ctx_reg - > clkctrl ! = clk_ctrl_new ) {
clk_ctrl_reg = ti_qspi_read ( qspi , QSPI_SPI_CLOCK_CNTRL_REG ) ;
clk_ctrl_reg & = ~ QSPI_CLK_EN ;
/* disable SCLK */
ti_qspi_write ( qspi , clk_ctrl_reg , QSPI_SPI_CLOCK_CNTRL_REG ) ;
/* enable SCLK */
ti_qspi_write ( qspi , clk_ctrl_new , QSPI_SPI_CLOCK_CNTRL_REG ) ;
ctx_reg - > clkctrl = clk_ctrl_new ;
}
pm_runtime_mark_last_busy ( qspi - > dev ) ;
pm_runtime_put_autosuspend ( qspi - > dev ) ;
}
2013-08-20 17:25:48 +04:00
static void ti_qspi_restore_ctx ( struct ti_qspi * qspi )
{
struct ti_qspi_regs * ctx_reg = & qspi - > ctx_reg ;
ti_qspi_write ( qspi , ctx_reg - > clkctrl , QSPI_SPI_CLOCK_CNTRL_REG ) ;
}
2015-02-17 22:03:51 +03:00
static inline u32 qspi_is_busy ( struct ti_qspi * qspi )
{
u32 stat ;
unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT ;
stat = ti_qspi_read ( qspi , QSPI_SPI_STATUS_REG ) ;
while ( ( stat & BUSY ) & & time_after ( timeout , jiffies ) ) {
cpu_relax ( ) ;
stat = ti_qspi_read ( qspi , QSPI_SPI_STATUS_REG ) ;
}
WARN ( stat & BUSY , " qspi busy \n " ) ;
return stat & BUSY ;
}
2015-10-13 13:21:05 +03:00
static inline int ti_qspi_poll_wc ( struct ti_qspi * qspi )
{
u32 stat ;
unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT ;
do {
stat = ti_qspi_read ( qspi , QSPI_SPI_STATUS_REG ) ;
if ( stat & WC )
return 0 ;
cpu_relax ( ) ;
} while ( time_after ( timeout , jiffies ) ) ;
stat = ti_qspi_read ( qspi , QSPI_SPI_STATUS_REG ) ;
if ( stat & WC )
return 0 ;
return - ETIMEDOUT ;
}
2016-04-12 14:58:14 +03:00
static int qspi_write_msg ( struct ti_qspi * qspi , struct spi_transfer * t ,
int count )
2013-08-20 17:25:48 +04:00
{
2016-04-12 14:58:14 +03:00
int wlen , xfer_len ;
2013-08-20 17:25:48 +04:00
unsigned int cmd ;
const u8 * txbuf ;
2015-08-20 13:30:59 +03:00
u32 data ;
2013-08-20 17:25:48 +04:00
txbuf = t - > tx_buf ;
cmd = qspi - > cmd | QSPI_WR_SNGL ;
2014-01-12 10:40:22 +04:00
wlen = t - > bits_per_word > > 3 ; /* in bytes */
2015-08-20 13:30:59 +03:00
xfer_len = wlen ;
2013-08-20 17:25:48 +04:00
while ( count ) {
2015-02-17 22:03:51 +03:00
if ( qspi_is_busy ( qspi ) )
return - EBUSY ;
2013-08-20 17:25:48 +04:00
switch ( wlen ) {
2014-01-12 10:40:22 +04:00
case 1 :
2013-08-20 17:25:48 +04:00
dev_dbg ( qspi - > dev , " tx cmd %08x dc %08x data %02x \n " ,
cmd , qspi - > dc , * txbuf ) ;
2015-08-20 13:30:59 +03:00
if ( count > = QSPI_WLEN_MAX_BYTES ) {
u32 * txp = ( u32 * ) txbuf ;
data = cpu_to_be32 ( * txp + + ) ;
writel ( data , qspi - > base +
QSPI_SPI_DATA_REG_3 ) ;
data = cpu_to_be32 ( * txp + + ) ;
writel ( data , qspi - > base +
QSPI_SPI_DATA_REG_2 ) ;
data = cpu_to_be32 ( * txp + + ) ;
writel ( data , qspi - > base +
QSPI_SPI_DATA_REG_1 ) ;
data = cpu_to_be32 ( * txp + + ) ;
writel ( data , qspi - > base +
QSPI_SPI_DATA_REG ) ;
xfer_len = QSPI_WLEN_MAX_BYTES ;
cmd | = QSPI_WLEN ( QSPI_WLEN_MAX_BITS ) ;
} else {
writeb ( * txbuf , qspi - > base + QSPI_SPI_DATA_REG ) ;
cmd = qspi - > cmd | QSPI_WR_SNGL ;
xfer_len = wlen ;
cmd | = QSPI_WLEN ( wlen ) ;
}
2013-08-20 17:25:48 +04:00
break ;
2014-01-12 10:40:22 +04:00
case 2 :
2013-08-20 17:25:48 +04:00
dev_dbg ( qspi - > dev , " tx cmd %08x dc %08x data %04x \n " ,
cmd , qspi - > dc , * txbuf ) ;
writew ( * ( ( u16 * ) txbuf ) , qspi - > base + QSPI_SPI_DATA_REG ) ;
break ;
2014-01-12 10:40:22 +04:00
case 4 :
2013-08-20 17:25:48 +04:00
dev_dbg ( qspi - > dev , " tx cmd %08x dc %08x data %08x \n " ,
cmd , qspi - > dc , * txbuf ) ;
writel ( * ( ( u32 * ) txbuf ) , qspi - > base + QSPI_SPI_DATA_REG ) ;
break ;
}
2014-01-12 10:40:22 +04:00
ti_qspi_write ( qspi , cmd , QSPI_SPI_CMD_REG ) ;
2015-10-13 13:21:05 +03:00
if ( ti_qspi_poll_wc ( qspi ) ) {
2014-01-12 10:40:22 +04:00
dev_err ( qspi - > dev , " write timed out \n " ) ;
return - ETIMEDOUT ;
}
2015-08-20 13:30:59 +03:00
txbuf + = xfer_len ;
count - = xfer_len ;
2013-08-20 17:25:48 +04:00
}
return 0 ;
}
2016-04-12 14:58:14 +03:00
static int qspi_read_msg ( struct ti_qspi * qspi , struct spi_transfer * t ,
int count )
2013-08-20 17:25:48 +04:00
{
2016-04-12 14:58:14 +03:00
int wlen ;
2013-08-20 17:25:48 +04:00
unsigned int cmd ;
2020-01-14 15:41:25 +03:00
u32 rx ;
u8 rxlen , rx_wlen ;
2013-08-20 17:25:48 +04:00
u8 * rxbuf ;
rxbuf = t - > rx_buf ;
2013-08-23 13:42:16 +04:00
cmd = qspi - > cmd ;
switch ( t - > rx_nbits ) {
case SPI_NBITS_DUAL :
cmd | = QSPI_RD_DUAL ;
break ;
case SPI_NBITS_QUAD :
cmd | = QSPI_RD_QUAD ;
break ;
default :
cmd | = QSPI_RD_SNGL ;
break ;
}
2014-01-12 10:40:22 +04:00
wlen = t - > bits_per_word > > 3 ; /* in bytes */
2020-01-15 13:07:00 +03:00
rx_wlen = wlen ;
2013-08-20 17:25:48 +04:00
while ( count ) {
dev_dbg ( qspi - > dev , " rx cmd %08x dc %08x \n " , cmd , qspi - > dc ) ;
2015-02-17 22:03:51 +03:00
if ( qspi_is_busy ( qspi ) )
return - EBUSY ;
2020-01-14 15:41:25 +03:00
switch ( wlen ) {
case 1 :
/*
* Optimize the 8 - bit words transfers , as used by
* the SPI flash devices .
*/
if ( count > = QSPI_WLEN_MAX_BYTES ) {
rxlen = QSPI_WLEN_MAX_BYTES ;
} else {
rxlen = min ( count , 4 ) ;
}
rx_wlen = rxlen < < 3 ;
cmd & = ~ QSPI_WLEN_MASK ;
cmd | = QSPI_WLEN ( rx_wlen ) ;
break ;
default :
rxlen = wlen ;
break ;
}
2013-08-20 17:25:48 +04:00
ti_qspi_write ( qspi , cmd , QSPI_SPI_CMD_REG ) ;
2015-10-13 13:21:05 +03:00
if ( ti_qspi_poll_wc ( qspi ) ) {
2013-08-20 17:25:48 +04:00
dev_err ( qspi - > dev , " read timed out \n " ) ;
return - ETIMEDOUT ;
}
2020-01-14 15:41:25 +03:00
2013-08-20 17:25:48 +04:00
switch ( wlen ) {
2014-01-12 10:40:22 +04:00
case 1 :
2020-01-14 15:41:25 +03:00
/*
* Optimize the 8 - bit words transfers , as used by
* the SPI flash devices .
*/
if ( count > = QSPI_WLEN_MAX_BYTES ) {
u32 * rxp = ( u32 * ) rxbuf ;
rx = readl ( qspi - > base + QSPI_SPI_DATA_REG_3 ) ;
* rxp + + = be32_to_cpu ( rx ) ;
rx = readl ( qspi - > base + QSPI_SPI_DATA_REG_2 ) ;
* rxp + + = be32_to_cpu ( rx ) ;
rx = readl ( qspi - > base + QSPI_SPI_DATA_REG_1 ) ;
* rxp + + = be32_to_cpu ( rx ) ;
rx = readl ( qspi - > base + QSPI_SPI_DATA_REG ) ;
* rxp + + = be32_to_cpu ( rx ) ;
} else {
u8 * rxp = rxbuf ;
rx = readl ( qspi - > base + QSPI_SPI_DATA_REG ) ;
if ( rx_wlen > = 8 )
* rxp + + = rx > > ( rx_wlen - 8 ) ;
if ( rx_wlen > = 16 )
* rxp + + = rx > > ( rx_wlen - 16 ) ;
if ( rx_wlen > = 24 )
* rxp + + = rx > > ( rx_wlen - 24 ) ;
if ( rx_wlen > = 32 )
* rxp + + = rx ;
}
2013-08-20 17:25:48 +04:00
break ;
2014-01-12 10:40:22 +04:00
case 2 :
2013-08-20 17:25:48 +04:00
* ( ( u16 * ) rxbuf ) = readw ( qspi - > base + QSPI_SPI_DATA_REG ) ;
break ;
2014-01-12 10:40:22 +04:00
case 4 :
2013-08-20 17:25:48 +04:00
* ( ( u32 * ) rxbuf ) = readl ( qspi - > base + QSPI_SPI_DATA_REG ) ;
break ;
}
2020-01-14 15:41:25 +03:00
rxbuf + = rxlen ;
count - = rxlen ;
2013-08-20 17:25:48 +04:00
}
return 0 ;
}
2016-04-12 14:58:14 +03:00
static int qspi_transfer_msg ( struct ti_qspi * qspi , struct spi_transfer * t ,
int count )
2013-08-20 17:25:48 +04:00
{
int ret ;
if ( t - > tx_buf ) {
2016-04-12 14:58:14 +03:00
ret = qspi_write_msg ( qspi , t , count ) ;
2013-08-20 17:25:48 +04:00
if ( ret ) {
dev_dbg ( qspi - > dev , " Error while writing \n " ) ;
return ret ;
}
}
if ( t - > rx_buf ) {
2016-04-12 14:58:14 +03:00
ret = qspi_read_msg ( qspi , t , count ) ;
2013-08-20 17:25:48 +04:00
if ( ret ) {
dev_dbg ( qspi - > dev , " Error while reading \n " ) ;
return ret ;
}
}
return 0 ;
}
2016-08-17 12:52:37 +03:00
static void ti_qspi_dma_callback ( void * param )
{
struct ti_qspi * qspi = param ;
complete ( & qspi - > transfer_complete ) ;
}
static int ti_qspi_dma_xfer ( struct ti_qspi * qspi , dma_addr_t dma_dst ,
dma_addr_t dma_src , size_t len )
{
struct dma_chan * chan = qspi - > rx_chan ;
dma_cookie_t cookie ;
enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT ;
struct dma_async_tx_descriptor * tx ;
int ret ;
2022-04-11 14:10:33 +03:00
unsigned long time_left ;
2016-08-17 12:52:37 +03:00
2017-03-24 09:42:15 +03:00
tx = dmaengine_prep_dma_memcpy ( chan , dma_dst , dma_src , len , flags ) ;
2016-08-17 12:52:37 +03:00
if ( ! tx ) {
dev_err ( qspi - > dev , " device_prep_dma_memcpy error \n " ) ;
return - EIO ;
}
tx - > callback = ti_qspi_dma_callback ;
tx - > callback_param = qspi ;
cookie = tx - > tx_submit ( tx ) ;
2016-11-15 21:26:43 +03:00
reinit_completion ( & qspi - > transfer_complete ) ;
2016-08-17 12:52:37 +03:00
ret = dma_submit_error ( cookie ) ;
if ( ret ) {
dev_err ( qspi - > dev , " dma_submit_error %d \n " , cookie ) ;
return - EIO ;
}
dma_async_issue_pending ( chan ) ;
2022-04-11 14:10:33 +03:00
time_left = wait_for_completion_timeout ( & qspi - > transfer_complete ,
2016-08-17 12:52:37 +03:00
msecs_to_jiffies ( len ) ) ;
2022-04-11 14:10:33 +03:00
if ( time_left = = 0 ) {
2016-08-17 12:52:37 +03:00
dmaengine_terminate_sync ( chan ) ;
dev_err ( qspi - > dev , " DMA wait_for_completion_timeout \n " ) ;
return - ETIMEDOUT ;
}
return 0 ;
}
2018-04-26 19:18:18 +03:00
static int ti_qspi_dma_bounce_buffer ( struct ti_qspi * qspi , loff_t offs ,
void * to , size_t readsize )
2017-04-11 14:52:25 +03:00
{
2018-04-26 19:18:18 +03:00
dma_addr_t dma_src = qspi - > mmap_phys_base + offs ;
2017-04-11 14:52:25 +03:00
int ret = 0 ;
/*
* Use bounce buffer as FS like jffs2 , ubifs may pass
* buffers that does not belong to kernel lowmem region .
*/
while ( readsize ! = 0 ) {
size_t xfer_len = min_t ( size_t , QSPI_DMA_BUFFER_SIZE ,
readsize ) ;
ret = ti_qspi_dma_xfer ( qspi , qspi - > rx_bb_dma_addr ,
dma_src , xfer_len ) ;
if ( ret ! = 0 )
return ret ;
memcpy ( to , qspi - > rx_bb_addr , xfer_len ) ;
readsize - = xfer_len ;
dma_src + = xfer_len ;
to + = xfer_len ;
}
return ret ;
}
2016-08-17 12:52:37 +03:00
static int ti_qspi_dma_xfer_sg ( struct ti_qspi * qspi , struct sg_table rx_sg ,
loff_t from )
{
struct scatterlist * sg ;
dma_addr_t dma_src = qspi - > mmap_phys_base + from ;
dma_addr_t dma_dst ;
int i , len , ret ;
for_each_sg ( rx_sg . sgl , sg , rx_sg . nents , i ) {
dma_dst = sg_dma_address ( sg ) ;
len = sg_dma_len ( sg ) ;
ret = ti_qspi_dma_xfer ( qspi , dma_dst , dma_src , len ) ;
if ( ret )
return ret ;
dma_src + = len ;
}
return 0 ;
}
2015-12-11 07:09:57 +03:00
static void ti_qspi_enable_memory_map ( struct spi_device * spi )
{
struct ti_qspi * qspi = spi_master_get_devdata ( spi - > master ) ;
ti_qspi_write ( qspi , MM_SWITCH , QSPI_SPI_SWITCH_REG ) ;
if ( qspi - > ctrl_base ) {
regmap_update_bits ( qspi - > ctrl_base , qspi - > ctrl_reg ,
2019-01-29 10:44:22 +03:00
MEM_CS_MASK ,
MEM_CS_EN ( spi - > chip_select ) ) ;
2015-12-11 07:09:57 +03:00
}
qspi - > mmap_enabled = true ;
2019-12-11 18:52:16 +03:00
qspi - > current_cs = spi - > chip_select ;
2015-12-11 07:09:57 +03:00
}
static void ti_qspi_disable_memory_map ( struct spi_device * spi )
{
struct ti_qspi * qspi = spi_master_get_devdata ( spi - > master ) ;
ti_qspi_write ( qspi , 0 , QSPI_SPI_SWITCH_REG ) ;
if ( qspi - > ctrl_base )
regmap_update_bits ( qspi - > ctrl_base , qspi - > ctrl_reg ,
2019-01-29 10:44:22 +03:00
MEM_CS_MASK , 0 ) ;
2015-12-11 07:09:57 +03:00
qspi - > mmap_enabled = false ;
2019-12-11 18:52:16 +03:00
qspi - > current_cs = - 1 ;
2015-12-11 07:09:57 +03:00
}
2018-04-26 19:18:18 +03:00
static void ti_qspi_setup_mmap_read ( struct spi_device * spi , u8 opcode ,
u8 data_nbits , u8 addr_width ,
u8 dummy_bytes )
2015-12-11 07:09:57 +03:00
{
struct ti_qspi * qspi = spi_master_get_devdata ( spi - > master ) ;
2018-04-26 19:18:18 +03:00
u32 memval = opcode ;
2015-12-11 07:09:57 +03:00
2018-04-26 19:18:18 +03:00
switch ( data_nbits ) {
2015-12-11 07:09:57 +03:00
case SPI_NBITS_QUAD :
memval | = QSPI_SETUP_RD_QUAD ;
break ;
case SPI_NBITS_DUAL :
memval | = QSPI_SETUP_RD_DUAL ;
break ;
default :
memval | = QSPI_SETUP_RD_NORMAL ;
break ;
}
2018-04-26 19:18:18 +03:00
memval | = ( ( addr_width - 1 ) < < QSPI_SETUP_ADDR_SHIFT |
dummy_bytes < < QSPI_SETUP_DUMMY_SHIFT ) ;
2015-12-11 07:09:57 +03:00
ti_qspi_write ( qspi , memval ,
QSPI_SPI_SETUP_REG ( spi - > chip_select ) ) ;
}
2020-01-14 15:41:24 +03:00
static int ti_qspi_adjust_op_size ( struct spi_mem * mem , struct spi_mem_op * op )
{
struct ti_qspi * qspi = spi_controller_get_devdata ( mem - > spi - > master ) ;
size_t max_len ;
if ( op - > data . dir = = SPI_MEM_DATA_IN ) {
if ( op - > addr . val < qspi - > mmap_size ) {
/* Limit MMIO to the mmaped region */
if ( op - > addr . val + op - > data . nbytes > qspi - > mmap_size ) {
max_len = qspi - > mmap_size - op - > addr . val ;
op - > data . nbytes = min ( ( size_t ) op - > data . nbytes ,
max_len ) ;
}
} else {
/*
* Use fallback mode ( SW generated transfers ) above the
* mmaped region .
* Adjust size to comply with the QSPI max frame length .
*/
max_len = QSPI_FRAME ;
max_len - = 1 + op - > addr . nbytes + op - > dummy . nbytes ;
op - > data . nbytes = min ( ( size_t ) op - > data . nbytes ,
max_len ) ;
}
}
return 0 ;
}
2018-04-26 19:18:18 +03:00
static int ti_qspi_exec_mem_op ( struct spi_mem * mem ,
const struct spi_mem_op * op )
{
struct ti_qspi * qspi = spi_master_get_devdata ( mem - > spi - > master ) ;
u32 from = 0 ;
int ret = 0 ;
/* Only optimize read path. */
if ( ! op - > data . nbytes | | op - > data . dir ! = SPI_MEM_DATA_IN | |
! op - > addr . nbytes | | op - > addr . nbytes > 4 )
return - ENOTSUPP ;
/* Address exceeds MMIO window size, fall back to regular mode. */
from = op - > addr . val ;
if ( from + op - > data . nbytes > qspi - > mmap_size )
return - ENOTSUPP ;
mutex_lock ( & qspi - > list_lock ) ;
2022-05-19 02:46:04 +03:00
if ( ! qspi - > mmap_enabled | | qspi - > current_cs ! = mem - > spi - > chip_select ) {
ti_qspi_setup_clk ( qspi , mem - > spi - > max_speed_hz ) ;
2018-04-26 19:18:18 +03:00
ti_qspi_enable_memory_map ( mem - > spi ) ;
2022-05-19 02:46:04 +03:00
}
2018-04-26 19:18:18 +03:00
ti_qspi_setup_mmap_read ( mem - > spi , op - > cmd . opcode , op - > data . buswidth ,
op - > addr . nbytes , op - > dummy . nbytes ) ;
if ( qspi - > rx_chan ) {
struct sg_table sgt ;
if ( virt_addr_valid ( op - > data . buf . in ) & &
! spi_controller_dma_map_mem_op_data ( mem - > spi - > master , op ,
& sgt ) ) {
ret = ti_qspi_dma_xfer_sg ( qspi , sgt , from ) ;
spi_controller_dma_unmap_mem_op_data ( mem - > spi - > master ,
op , & sgt ) ;
} else {
ret = ti_qspi_dma_bounce_buffer ( qspi , from ,
op - > data . buf . in ,
op - > data . nbytes ) ;
}
} else {
memcpy_fromio ( op - > data . buf . in , qspi - > mmap_base + from ,
op - > data . nbytes ) ;
}
mutex_unlock ( & qspi - > list_lock ) ;
return ret ;
}
static const struct spi_controller_mem_ops ti_qspi_mem_ops = {
. exec_op = ti_qspi_exec_mem_op ,
2020-01-14 15:41:24 +03:00
. adjust_op_size = ti_qspi_adjust_op_size ,
2018-04-26 19:18:18 +03:00
} ;
2013-08-20 17:25:48 +04:00
static int ti_qspi_start_transfer_one ( struct spi_master * master ,
struct spi_message * m )
{
struct ti_qspi * qspi = spi_master_get_devdata ( master ) ;
struct spi_device * spi = m - > spi ;
struct spi_transfer * t ;
int status = 0 , ret ;
2016-04-12 14:58:14 +03:00
unsigned int frame_len_words , transfer_len_words ;
int wlen ;
2013-08-20 17:25:48 +04:00
/* setup device control reg */
qspi - > dc = 0 ;
if ( spi - > mode & SPI_CPHA )
qspi - > dc | = QSPI_CKPHA ( spi - > chip_select ) ;
if ( spi - > mode & SPI_CPOL )
qspi - > dc | = QSPI_CKPOL ( spi - > chip_select ) ;
if ( spi - > mode & SPI_CS_HIGH )
qspi - > dc | = QSPI_CSPOL ( spi - > chip_select ) ;
2016-04-12 14:56:25 +03:00
frame_len_words = 0 ;
list_for_each_entry ( t , & m - > transfers , transfer_list )
frame_len_words + = t - > len / ( t - > bits_per_word > > 3 ) ;
frame_len_words = min_t ( unsigned int , frame_len_words , QSPI_FRAME ) ;
2013-08-20 17:25:48 +04:00
/* setup command reg */
qspi - > cmd = 0 ;
qspi - > cmd | = QSPI_EN_CS ( spi - > chip_select ) ;
2016-04-12 14:56:25 +03:00
qspi - > cmd | = QSPI_FLEN ( frame_len_words ) ;
2013-08-20 17:25:48 +04:00
ti_qspi_write ( qspi , qspi - > dc , QSPI_SPI_DC_REG ) ;
mutex_lock ( & qspi - > list_lock ) ;
2015-12-11 07:09:57 +03:00
if ( qspi - > mmap_enabled )
ti_qspi_disable_memory_map ( spi ) ;
2013-08-20 17:25:48 +04:00
list_for_each_entry ( t , & m - > transfers , transfer_list ) {
2016-04-12 14:56:25 +03:00
qspi - > cmd = ( ( qspi - > cmd & ~ QSPI_WLEN_MASK ) |
QSPI_WLEN ( t - > bits_per_word ) ) ;
2013-08-20 17:25:48 +04:00
2016-04-12 14:58:14 +03:00
wlen = t - > bits_per_word > > 3 ;
transfer_len_words = min ( t - > len / wlen , frame_len_words ) ;
2022-05-19 02:46:04 +03:00
ti_qspi_setup_clk ( qspi , t - > speed_hz ) ;
2016-04-12 14:58:14 +03:00
ret = qspi_transfer_msg ( qspi , t , transfer_len_words * wlen ) ;
2013-08-20 17:25:48 +04:00
if ( ret ) {
dev_dbg ( qspi - > dev , " transfer message failed \n " ) ;
2013-09-01 05:01:00 +04:00
mutex_unlock ( & qspi - > list_lock ) ;
2013-08-20 17:25:48 +04:00
return - EINVAL ;
}
2016-04-12 14:58:14 +03:00
m - > actual_length + = transfer_len_words * wlen ;
frame_len_words - = transfer_len_words ;
if ( frame_len_words = = 0 )
break ;
2013-08-20 17:25:48 +04:00
}
mutex_unlock ( & qspi - > list_lock ) ;
2015-10-12 10:52:02 +03:00
ti_qspi_write ( qspi , qspi - > cmd | QSPI_INVAL , QSPI_SPI_CMD_REG ) ;
2013-08-20 17:25:48 +04:00
m - > status = status ;
spi_finalize_current_message ( master ) ;
return status ;
}
static int ti_qspi_runtime_resume ( struct device * dev )
{
struct ti_qspi * qspi ;
2013-12-20 16:52:57 +04:00
qspi = dev_get_drvdata ( dev ) ;
2013-08-20 17:25:48 +04:00
ti_qspi_restore_ctx ( qspi ) ;
return 0 ;
}
2021-02-18 16:09:50 +03:00
static void ti_qspi_dma_cleanup ( struct ti_qspi * qspi )
{
if ( qspi - > rx_bb_addr )
dma_free_coherent ( qspi - > dev , QSPI_DMA_BUFFER_SIZE ,
qspi - > rx_bb_addr ,
qspi - > rx_bb_dma_addr ) ;
if ( qspi - > rx_chan )
dma_release_channel ( qspi - > rx_chan ) ;
}
2013-08-20 17:25:48 +04:00
static const struct of_device_id ti_qspi_match [ ] = {
{ . compatible = " ti,dra7xxx-qspi " } ,
2013-08-27 18:12:24 +04:00
{ . compatible = " ti,am4372-qspi " } ,
2013-08-20 17:25:48 +04:00
{ } ,
} ;
2013-08-27 11:11:20 +04:00
MODULE_DEVICE_TABLE ( of , ti_qspi_match ) ;
2013-08-20 17:25:48 +04:00
static int ti_qspi_probe ( struct platform_device * pdev )
{
struct ti_qspi * qspi ;
struct spi_master * master ;
2015-12-11 07:09:57 +03:00
struct resource * r , * res_mmap ;
2013-08-20 17:25:48 +04:00
struct device_node * np = pdev - > dev . of_node ;
u32 max_freq ;
int ret = 0 , num_cs , irq ;
2016-08-17 12:52:37 +03:00
dma_cap_mask_t mask ;
2013-08-20 17:25:48 +04:00
master = spi_alloc_master ( & pdev - > dev , sizeof ( * qspi ) ) ;
if ( ! master )
return - ENOMEM ;
2013-09-24 19:11:23 +04:00
master - > mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD ;
2013-08-20 17:25:48 +04:00
master - > flags = SPI_MASTER_HALF_DUPLEX ;
master - > setup = ti_qspi_setup ;
master - > auto_runtime_pm = true ;
master - > transfer_one_message = ti_qspi_start_transfer_one ;
master - > dev . of_node = pdev - > dev . of_node ;
2014-02-05 17:59:18 +04:00
master - > bits_per_word_mask = SPI_BPW_MASK ( 32 ) | SPI_BPW_MASK ( 16 ) |
SPI_BPW_MASK ( 8 ) ;
2018-04-26 19:18:18 +03:00
master - > mem_ops = & ti_qspi_mem_ops ;
2013-08-20 17:25:48 +04:00
if ( ! of_property_read_u32 ( np , " num-cs " , & num_cs ) )
master - > num_chipselect = num_cs ;
qspi = spi_master_get_devdata ( master ) ;
qspi - > master = master ;
qspi - > dev = & pdev - > dev ;
2013-11-11 10:13:41 +04:00
platform_set_drvdata ( pdev , qspi ) ;
2013-08-20 17:25:48 +04:00
2013-12-06 18:24:43 +04:00
r = platform_get_resource_byname ( pdev , IORESOURCE_MEM , " qspi_base " ) ;
if ( r = = NULL ) {
r = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
if ( r = = NULL ) {
dev_err ( & pdev - > dev , " missing platform data \n " ) ;
2017-02-16 23:33:35 +03:00
ret = - ENODEV ;
goto free_master ;
2013-12-06 18:24:43 +04:00
}
}
2013-08-20 17:25:48 +04:00
2013-12-06 18:24:43 +04:00
res_mmap = platform_get_resource_byname ( pdev ,
IORESOURCE_MEM , " qspi_mmap " ) ;
if ( res_mmap = = NULL ) {
res_mmap = platform_get_resource ( pdev , IORESOURCE_MEM , 1 ) ;
if ( res_mmap = = NULL ) {
dev_err ( & pdev - > dev ,
" memory mapped resource not required \n " ) ;
}
}
2018-05-14 12:11:29 +03:00
if ( res_mmap )
qspi - > mmap_size = resource_size ( res_mmap ) ;
2013-12-06 18:24:43 +04:00
2013-08-20 17:25:48 +04:00
irq = platform_get_irq ( pdev , 0 ) ;
if ( irq < 0 ) {
2017-02-16 23:33:35 +03:00
ret = irq ;
goto free_master ;
2013-08-20 17:25:48 +04:00
}
mutex_init ( & qspi - > list_lock ) ;
qspi - > base = devm_ioremap_resource ( & pdev - > dev , r ) ;
if ( IS_ERR ( qspi - > base ) ) {
ret = PTR_ERR ( qspi - > base ) ;
goto free_master ;
}
2015-12-11 07:09:57 +03:00
if ( of_property_read_bool ( np , " syscon-chipselects " ) ) {
qspi - > ctrl_base =
syscon_regmap_lookup_by_phandle ( np ,
" syscon-chipselects " ) ;
2017-02-16 23:33:35 +03:00
if ( IS_ERR ( qspi - > ctrl_base ) ) {
ret = PTR_ERR ( qspi - > ctrl_base ) ;
goto free_master ;
}
2015-12-11 07:09:57 +03:00
ret = of_property_read_u32_index ( np ,
" syscon-chipselects " ,
1 , & qspi - > ctrl_reg ) ;
if ( ret ) {
dev_err ( & pdev - > dev ,
" couldn't get ctrl_mod reg index \n " ) ;
2017-02-16 23:33:35 +03:00
goto free_master ;
2013-12-06 18:24:43 +04:00
}
}
2013-08-20 17:25:48 +04:00
qspi - > fclk = devm_clk_get ( & pdev - > dev , " fck " ) ;
if ( IS_ERR ( qspi - > fclk ) ) {
ret = PTR_ERR ( qspi - > fclk ) ;
dev_err ( & pdev - > dev , " could not get clk: %d \n " , ret ) ;
}
pm_runtime_use_autosuspend ( & pdev - > dev ) ;
pm_runtime_set_autosuspend_delay ( & pdev - > dev , QSPI_AUTOSUSPEND_TIMEOUT ) ;
pm_runtime_enable ( & pdev - > dev ) ;
if ( ! of_property_read_u32 ( np , " spi-max-frequency " , & max_freq ) )
2022-05-19 02:46:04 +03:00
master - > max_speed_hz = max_freq ;
2013-08-20 17:25:48 +04:00
2016-08-17 12:52:37 +03:00
dma_cap_zero ( mask ) ;
dma_cap_set ( DMA_MEMCPY , mask ) ;
2013-08-20 17:25:48 +04:00
2016-08-17 12:52:37 +03:00
qspi - > rx_chan = dma_request_chan_by_mask ( & mask ) ;
2017-02-18 12:42:02 +03:00
if ( IS_ERR ( qspi - > rx_chan ) ) {
2016-08-17 12:52:37 +03:00
dev_err ( qspi - > dev ,
" No Rx DMA available, trying mmap mode \n " ) ;
2017-02-18 12:42:02 +03:00
qspi - > rx_chan = NULL ;
2016-08-17 12:52:37 +03:00
ret = 0 ;
goto no_dma ;
}
2017-04-11 14:52:25 +03:00
qspi - > rx_bb_addr = dma_alloc_coherent ( qspi - > dev ,
QSPI_DMA_BUFFER_SIZE ,
& qspi - > rx_bb_dma_addr ,
GFP_KERNEL | GFP_DMA ) ;
if ( ! qspi - > rx_bb_addr ) {
dev_err ( qspi - > dev ,
" dma_alloc_coherent failed, using PIO mode \n " ) ;
dma_release_channel ( qspi - > rx_chan ) ;
goto no_dma ;
}
2016-08-17 12:52:37 +03:00
master - > dma_rx = qspi - > rx_chan ;
init_completion ( & qspi - > transfer_complete ) ;
if ( res_mmap )
qspi - > mmap_phys_base = ( dma_addr_t ) res_mmap - > start ;
no_dma :
if ( ! qspi - > rx_chan & & res_mmap ) {
qspi - > mmap_base = devm_ioremap_resource ( & pdev - > dev , res_mmap ) ;
if ( IS_ERR ( qspi - > mmap_base ) ) {
dev_info ( & pdev - > dev ,
" mmap failed with error %ld using PIO mode \n " ,
PTR_ERR ( qspi - > mmap_base ) ) ;
qspi - > mmap_base = NULL ;
2018-04-26 19:18:18 +03:00
master - > mem_ops = NULL ;
2016-08-17 12:52:37 +03:00
}
}
qspi - > mmap_enabled = false ;
2019-12-11 18:52:16 +03:00
qspi - > current_cs = - 1 ;
2016-08-17 12:52:37 +03:00
ret = devm_spi_register_master ( & pdev - > dev , master ) ;
if ( ! ret )
return 0 ;
2013-08-20 17:25:48 +04:00
2021-02-18 16:09:50 +03:00
ti_qspi_dma_cleanup ( qspi ) ;
2017-02-16 23:33:35 +03:00
pm_runtime_disable ( & pdev - > dev ) ;
2013-08-20 17:25:48 +04:00
free_master :
spi_master_put ( master ) ;
return ret ;
}
static int ti_qspi_remove ( struct platform_device * pdev )
{
2016-05-31 18:56:23 +03:00
struct ti_qspi * qspi = platform_get_drvdata ( pdev ) ;
int rc ;
rc = spi_master_suspend ( qspi - > master ) ;
if ( rc )
return rc ;
2015-10-29 16:57:30 +03:00
pm_runtime_put_sync ( & pdev - > dev ) ;
2013-11-19 17:07:16 +04:00
pm_runtime_disable ( & pdev - > dev ) ;
2021-02-18 16:09:50 +03:00
ti_qspi_dma_cleanup ( qspi ) ;
2016-08-17 12:52:37 +03:00
2013-08-20 17:25:48 +04:00
return 0 ;
}
static const struct dev_pm_ops ti_qspi_pm_ops = {
. runtime_resume = ti_qspi_runtime_resume ,
} ;
static struct platform_driver ti_qspi_driver = {
. probe = ti_qspi_probe ,
2013-10-07 15:02:26 +04:00
. remove = ti_qspi_remove ,
2013-08-20 17:25:48 +04:00
. driver = {
2014-01-12 11:02:32 +04:00
. name = " ti-qspi " ,
2013-08-20 17:25:48 +04:00
. pm = & ti_qspi_pm_ops ,
. of_match_table = ti_qspi_match ,
}
} ;
module_platform_driver ( ti_qspi_driver ) ;
MODULE_AUTHOR ( " Sourav Poddar <sourav.poddar@ti.com> " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_DESCRIPTION ( " TI QSPI controller driver " ) ;
2014-01-12 11:02:32 +04:00
MODULE_ALIAS ( " platform:ti-qspi " ) ;