2014-07-01 05:03:59 +04:00
/*
* Copyright ( c ) 2014 , Fuzhou Rockchip Electronics Co . , Ltd
2014-07-11 06:07:56 +04:00
* Author : Addy Ke < addy . ke @ rock - chips . com >
2014-07-01 05:03:59 +04:00
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
*/
# include <linux/clk.h>
2016-03-10 09:51:48 +03:00
# include <linux/dmaengine.h>
# include <linux/module.h>
# include <linux/of.h>
2016-12-17 03:59:16 +03:00
# include <linux/pinctrl/consumer.h>
2014-07-01 05:03:59 +04:00
# include <linux/platform_device.h>
# include <linux/spi/spi.h>
# include <linux/pm_runtime.h>
2016-03-10 09:51:48 +03:00
# include <linux/scatterlist.h>
2014-07-01 05:03:59 +04:00
# define DRIVER_NAME "rockchip-spi"
2017-06-28 07:38:43 +03:00
# define ROCKCHIP_SPI_CLR_BITS(reg, bits) \
writel_relaxed ( readl_relaxed ( reg ) & ~ ( bits ) , reg )
# define ROCKCHIP_SPI_SET_BITS(reg, bits) \
writel_relaxed ( readl_relaxed ( reg ) | ( bits ) , reg )
2014-07-01 05:03:59 +04:00
/* SPI register offsets */
# define ROCKCHIP_SPI_CTRLR0 0x0000
# define ROCKCHIP_SPI_CTRLR1 0x0004
# define ROCKCHIP_SPI_SSIENR 0x0008
# define ROCKCHIP_SPI_SER 0x000c
# define ROCKCHIP_SPI_BAUDR 0x0010
# define ROCKCHIP_SPI_TXFTLR 0x0014
# define ROCKCHIP_SPI_RXFTLR 0x0018
# define ROCKCHIP_SPI_TXFLR 0x001c
# define ROCKCHIP_SPI_RXFLR 0x0020
# define ROCKCHIP_SPI_SR 0x0024
# define ROCKCHIP_SPI_IPR 0x0028
# define ROCKCHIP_SPI_IMR 0x002c
# define ROCKCHIP_SPI_ISR 0x0030
# define ROCKCHIP_SPI_RISR 0x0034
# define ROCKCHIP_SPI_ICR 0x0038
# define ROCKCHIP_SPI_DMACR 0x003c
# define ROCKCHIP_SPI_DMATDLR 0x0040
# define ROCKCHIP_SPI_DMARDLR 0x0044
# define ROCKCHIP_SPI_TXDR 0x0400
# define ROCKCHIP_SPI_RXDR 0x0800
/* Bit fields in CTRLR0 */
# define CR0_DFS_OFFSET 0
2018-10-31 13:57:10 +03:00
# define CR0_DFS_4BIT 0x0
# define CR0_DFS_8BIT 0x1
# define CR0_DFS_16BIT 0x2
2014-07-01 05:03:59 +04:00
# define CR0_CFS_OFFSET 2
# define CR0_SCPH_OFFSET 6
# define CR0_SCPOL_OFFSET 7
# define CR0_CSM_OFFSET 8
# define CR0_CSM_KEEP 0x0
/* ss_n be high for half sclk_out cycles */
# define CR0_CSM_HALF 0X1
/* ss_n be high for one sclk_out cycle */
# define CR0_CSM_ONE 0x2
/* ss_n to sclk_out delay */
# define CR0_SSD_OFFSET 10
/*
* The period between ss_n active and
* sclk_out active is half sclk_out cycles
*/
# define CR0_SSD_HALF 0x0
/*
* The period between ss_n active and
* sclk_out active is one sclk_out cycle
*/
# define CR0_SSD_ONE 0x1
# define CR0_EM_OFFSET 11
# define CR0_EM_LITTLE 0x0
# define CR0_EM_BIG 0x1
# define CR0_FBM_OFFSET 12
# define CR0_FBM_MSB 0x0
# define CR0_FBM_LSB 0x1
# define CR0_BHT_OFFSET 13
# define CR0_BHT_16BIT 0x0
# define CR0_BHT_8BIT 0x1
# define CR0_RSD_OFFSET 14
2018-10-31 13:57:08 +03:00
# define CR0_RSD_MAX 0x3
2014-07-01 05:03:59 +04:00
# define CR0_FRF_OFFSET 16
# define CR0_FRF_SPI 0x0
# define CR0_FRF_SSP 0x1
# define CR0_FRF_MICROWIRE 0x2
# define CR0_XFM_OFFSET 18
# define CR0_XFM_MASK (0x03 << SPI_XFM_OFFSET)
# define CR0_XFM_TR 0x0
# define CR0_XFM_TO 0x1
# define CR0_XFM_RO 0x2
# define CR0_OPM_OFFSET 20
# define CR0_OPM_MASTER 0x0
# define CR0_OPM_SLAVE 0x1
# define CR0_MTM_OFFSET 0x21
/* Bit fields in SER, 2bit */
# define SER_MASK 0x3
2018-10-31 13:57:07 +03:00
/* Bit fields in BAUDR */
# define BAUDR_SCKDV_MIN 2
# define BAUDR_SCKDV_MAX 65534
2014-07-01 05:03:59 +04:00
/* Bit fields in SR, 5bit */
# define SR_MASK 0x1f
# define SR_BUSY (1 << 0)
# define SR_TF_FULL (1 << 1)
# define SR_TF_EMPTY (1 << 2)
# define SR_RF_EMPTY (1 << 3)
# define SR_RF_FULL (1 << 4)
/* Bit fields in ISR, IMR, ISR, RISR, 5bit */
# define INT_MASK 0x1f
# define INT_TF_EMPTY (1 << 0)
# define INT_TF_OVERFLOW (1 << 1)
# define INT_RF_UNDERFLOW (1 << 2)
# define INT_RF_OVERFLOW (1 << 3)
# define INT_RF_FULL (1 << 4)
/* Bit fields in ICR, 4bit */
# define ICR_MASK 0x0f
# define ICR_ALL (1 << 0)
# define ICR_RF_UNDERFLOW (1 << 1)
# define ICR_RF_OVERFLOW (1 << 2)
# define ICR_TF_OVERFLOW (1 << 3)
/* Bit fields in DMACR */
# define RF_DMA_EN (1 << 0)
# define TF_DMA_EN (1 << 1)
2018-10-31 13:57:01 +03:00
/* Driver state flags */
# define RXDMA (1 << 0)
# define TXDMA (1 << 1)
2014-07-01 05:03:59 +04:00
2014-10-15 15:25:49 +04:00
/* sclk_out: spi master internal logic in rk3x can support 50Mhz */
2018-10-31 13:57:07 +03:00
# define MAX_SCLK_OUT 50000000U
2014-10-15 15:25:49 +04:00
2016-07-15 04:30:59 +03:00
/*
* SPI_CTRLR1 is 16 - bits , so we should support lengths of 0xffff + 1. However ,
* the controller seems to hang when given 0x10000 , so stick with this for now .
*/
# define ROCKCHIP_SPI_MAX_TRANLEN 0xffff
2017-06-28 07:38:43 +03:00
# define ROCKCHIP_SPI_MAX_CS_NUM 2
2014-07-01 05:03:59 +04:00
struct rockchip_spi {
struct device * dev ;
struct clk * spiclk ;
struct clk * apb_pclk ;
void __iomem * regs ;
2018-10-31 13:57:04 +03:00
dma_addr_t dma_addr_rx ;
dma_addr_t dma_addr_tx ;
2018-10-31 13:57:01 +03:00
2018-10-31 13:57:09 +03:00
const void * tx ;
void * rx ;
unsigned int tx_left ;
unsigned int rx_left ;
2018-10-31 13:57:01 +03:00
atomic_t state ;
2014-07-01 05:03:59 +04:00
/*depth of the FIFO buffer */
u32 fifo_len ;
2018-10-31 13:57:07 +03:00
/* frequency of spiclk */
u32 freq ;
2014-07-01 05:03:59 +04:00
u8 n_bytes ;
2018-10-31 13:57:08 +03:00
u8 rsd ;
2014-07-01 05:03:59 +04:00
2017-06-28 07:38:43 +03:00
bool cs_asserted [ ROCKCHIP_SPI_MAX_CS_NUM ] ;
2014-07-01 05:03:59 +04:00
} ;
2018-10-31 13:56:58 +03:00
static inline void spi_enable_chip ( struct rockchip_spi * rs , bool enable )
2014-07-01 05:03:59 +04:00
{
2018-10-31 13:56:58 +03:00
writel_relaxed ( ( enable ? 1U : 0U ) , rs - > regs + ROCKCHIP_SPI_SSIENR ) ;
2014-07-01 05:03:59 +04:00
}
2014-07-11 06:08:24 +04:00
static inline void wait_for_idle ( struct rockchip_spi * rs )
{
unsigned long timeout = jiffies + msecs_to_jiffies ( 5 ) ;
do {
if ( ! ( readl_relaxed ( rs - > regs + ROCKCHIP_SPI_SR ) & SR_BUSY ) )
return ;
2014-09-04 00:44:25 +04:00
} while ( ! time_after ( jiffies , timeout ) ) ;
2014-07-11 06:08:24 +04:00
dev_warn ( rs - > dev , " spi controller is in busy state! \n " ) ;
}
2014-07-01 05:03:59 +04:00
static u32 get_fifo_len ( struct rockchip_spi * rs )
{
u32 fifo ;
for ( fifo = 2 ; fifo < 32 ; fifo + + ) {
writel_relaxed ( fifo , rs - > regs + ROCKCHIP_SPI_TXFTLR ) ;
if ( fifo ! = readl_relaxed ( rs - > regs + ROCKCHIP_SPI_TXFTLR ) )
break ;
}
writel_relaxed ( 0 , rs - > regs + ROCKCHIP_SPI_TXFTLR ) ;
return ( fifo = = 31 ) ? 0 : fifo ;
}
static void rockchip_spi_set_cs ( struct spi_device * spi , bool enable )
{
2016-02-24 13:00:04 +03:00
struct spi_master * master = spi - > master ;
struct rockchip_spi * rs = spi_master_get_devdata ( master ) ;
2017-06-28 07:38:43 +03:00
bool cs_asserted = ! enable ;
2016-02-24 13:00:04 +03:00
2017-06-28 07:38:43 +03:00
/* Return immediately for no-op */
if ( cs_asserted = = rs - > cs_asserted [ spi - > chip_select ] )
return ;
2014-07-01 05:03:59 +04:00
2017-06-28 07:38:43 +03:00
if ( cs_asserted ) {
/* Keep things powered as long as CS is asserted */
pm_runtime_get_sync ( rs - > dev ) ;
2014-07-01 05:03:59 +04:00
2017-06-28 07:38:43 +03:00
ROCKCHIP_SPI_SET_BITS ( rs - > regs + ROCKCHIP_SPI_SER ,
BIT ( spi - > chip_select ) ) ;
} else {
ROCKCHIP_SPI_CLR_BITS ( rs - > regs + ROCKCHIP_SPI_SER ,
BIT ( spi - > chip_select ) ) ;
2014-07-01 05:03:59 +04:00
2017-06-28 07:38:43 +03:00
/* Drop reference from when we first asserted CS */
pm_runtime_put ( rs - > dev ) ;
}
2016-02-24 13:00:04 +03:00
2017-06-28 07:38:43 +03:00
rs - > cs_asserted [ spi - > chip_select ] = cs_asserted ;
2014-07-01 05:03:59 +04:00
}
2015-02-27 18:34:16 +03:00
static void rockchip_spi_handle_err ( struct spi_master * master ,
struct spi_message * msg )
2014-07-01 05:03:59 +04:00
{
struct rockchip_spi * rs = spi_master_get_devdata ( master ) ;
2018-10-31 13:57:02 +03:00
/* stop running spi transfer
* this also flushes both rx and tx fifos
2014-07-11 06:07:56 +04:00
*/
2018-10-31 13:57:02 +03:00
spi_enable_chip ( rs , false ) ;
2018-10-31 13:57:09 +03:00
/* make sure all interrupts are masked */
writel_relaxed ( 0 , rs - > regs + ROCKCHIP_SPI_IMR ) ;
2018-10-31 13:57:01 +03:00
if ( atomic_read ( & rs - > state ) & TXDMA )
2018-10-31 13:57:04 +03:00
dmaengine_terminate_async ( master - > dma_tx ) ;
2014-07-01 05:03:59 +04:00
2018-10-31 13:57:02 +03:00
if ( atomic_read ( & rs - > state ) & RXDMA )
2018-10-31 13:57:04 +03:00
dmaengine_terminate_async ( master - > dma_rx ) ;
2014-07-01 05:03:59 +04:00
}
static void rockchip_spi_pio_writer ( struct rockchip_spi * rs )
{
2018-10-31 13:57:09 +03:00
u32 tx_free = rs - > fifo_len - readl_relaxed ( rs - > regs + ROCKCHIP_SPI_TXFLR ) ;
u32 words = min ( rs - > tx_left , tx_free ) ;
rs - > tx_left - = words ;
for ( ; words ; words - - ) {
u32 txw ;
2014-07-01 05:03:59 +04:00
if ( rs - > n_bytes = = 1 )
2018-10-31 13:57:09 +03:00
txw = * ( u8 * ) rs - > tx ;
2014-07-01 05:03:59 +04:00
else
2018-10-31 13:57:09 +03:00
txw = * ( u16 * ) rs - > tx ;
2014-07-01 05:03:59 +04:00
writel_relaxed ( txw , rs - > regs + ROCKCHIP_SPI_TXDR ) ;
rs - > tx + = rs - > n_bytes ;
}
}
static void rockchip_spi_pio_reader ( struct rockchip_spi * rs )
{
2018-10-31 13:57:09 +03:00
u32 words = readl_relaxed ( rs - > regs + ROCKCHIP_SPI_RXFLR ) ;
u32 rx_left = rs - > rx_left - words ;
/* the hardware doesn't allow us to change fifo threshold
* level while spi is enabled , so instead make sure to leave
* enough words in the rx fifo to get the last interrupt
* exactly when all words have been received
*/
if ( rx_left ) {
u32 ftl = readl_relaxed ( rs - > regs + ROCKCHIP_SPI_RXFTLR ) + 1 ;
if ( rx_left < ftl ) {
rx_left = ftl ;
words = rs - > rx_left - rx_left ;
}
}
rs - > rx_left = rx_left ;
for ( ; words ; words - - ) {
u32 rxw = readl_relaxed ( rs - > regs + ROCKCHIP_SPI_RXDR ) ;
if ( ! rs - > rx )
continue ;
2014-07-01 05:03:59 +04:00
if ( rs - > n_bytes = = 1 )
2018-10-31 13:57:09 +03:00
* ( u8 * ) rs - > rx = ( u8 ) rxw ;
2014-07-01 05:03:59 +04:00
else
2018-10-31 13:57:09 +03:00
* ( u16 * ) rs - > rx = ( u16 ) rxw ;
2014-07-01 05:03:59 +04:00
rs - > rx + = rs - > n_bytes ;
2014-07-11 06:07:56 +04:00
}
2014-07-01 05:03:59 +04:00
}
2018-10-31 13:57:09 +03:00
static irqreturn_t rockchip_spi_isr ( int irq , void * dev_id )
2014-07-01 05:03:59 +04:00
{
2018-10-31 13:57:09 +03:00
struct spi_master * master = dev_id ;
struct rockchip_spi * rs = spi_master_get_devdata ( master ) ;
2014-07-01 05:03:59 +04:00
2018-10-31 13:57:09 +03:00
if ( rs - > tx_left )
rockchip_spi_pio_writer ( rs ) ;
2018-10-10 12:00:38 +03:00
2018-10-31 13:57:09 +03:00
rockchip_spi_pio_reader ( rs ) ;
if ( ! rs - > rx_left ) {
spi_enable_chip ( rs , false ) ;
writel_relaxed ( 0 , rs - > regs + ROCKCHIP_SPI_IMR ) ;
spi_finalize_current_transfer ( master ) ;
}
2014-07-01 05:03:59 +04:00
2018-10-31 13:57:09 +03:00
return IRQ_HANDLED ;
}
2014-07-01 05:03:59 +04:00
2018-10-31 13:57:09 +03:00
static int rockchip_spi_prepare_irq ( struct rockchip_spi * rs ,
struct spi_transfer * xfer )
{
rs - > tx = xfer - > tx_buf ;
rs - > rx = xfer - > rx_buf ;
rs - > tx_left = rs - > tx ? xfer - > len / rs - > n_bytes : 0 ;
rs - > rx_left = xfer - > len / rs - > n_bytes ;
2014-07-01 05:03:59 +04:00
2018-10-31 13:57:09 +03:00
writel_relaxed ( INT_RF_FULL , rs - > regs + ROCKCHIP_SPI_IMR ) ;
spi_enable_chip ( rs , true ) ;
2014-07-11 06:08:24 +04:00
2018-10-31 13:57:09 +03:00
if ( rs - > tx_left )
rockchip_spi_pio_writer ( rs ) ;
2014-10-15 15:26:18 +04:00
2018-10-31 13:57:09 +03:00
/* 1 means the transfer is in progress */
return 1 ;
2014-07-01 05:03:59 +04:00
}
static void rockchip_spi_dma_rxcb ( void * data )
{
2018-10-31 13:57:05 +03:00
struct spi_master * master = data ;
struct rockchip_spi * rs = spi_master_get_devdata ( master ) ;
2018-10-31 13:57:01 +03:00
int state = atomic_fetch_andnot ( RXDMA , & rs - > state ) ;
2014-07-01 05:03:59 +04:00
2018-10-31 13:57:01 +03:00
if ( state & TXDMA )
return ;
2014-07-01 05:03:59 +04:00
2018-10-31 13:57:01 +03:00
spi_enable_chip ( rs , false ) ;
2018-10-31 13:57:05 +03:00
spi_finalize_current_transfer ( master ) ;
2014-07-01 05:03:59 +04:00
}
static void rockchip_spi_dma_txcb ( void * data )
{
2018-10-31 13:57:05 +03:00
struct spi_master * master = data ;
struct rockchip_spi * rs = spi_master_get_devdata ( master ) ;
2018-10-31 13:57:01 +03:00
int state = atomic_fetch_andnot ( TXDMA , & rs - > state ) ;
if ( state & RXDMA )
return ;
2014-07-01 05:03:59 +04:00
2014-07-11 06:08:24 +04:00
/* Wait until the FIFO data completely. */
wait_for_idle ( rs ) ;
2018-10-31 13:57:01 +03:00
spi_enable_chip ( rs , false ) ;
2018-10-31 13:57:05 +03:00
spi_finalize_current_transfer ( master ) ;
2014-07-01 05:03:59 +04:00
}
2018-10-31 13:57:03 +03:00
static int rockchip_spi_prepare_dma ( struct rockchip_spi * rs ,
2018-10-31 13:57:04 +03:00
struct spi_master * master , struct spi_transfer * xfer )
2014-07-01 05:03:59 +04:00
{
struct dma_async_tx_descriptor * rxdesc , * txdesc ;
2018-10-31 13:57:01 +03:00
atomic_set ( & rs - > state , 0 ) ;
2014-07-01 05:03:59 +04:00
2015-01-28 16:25:10 +03:00
rxdesc = NULL ;
2018-10-31 13:57:03 +03:00
if ( xfer - > rx_buf ) {
2018-10-31 13:56:59 +03:00
struct dma_slave_config rxconf = {
. direction = DMA_DEV_TO_MEM ,
2018-10-31 13:57:04 +03:00
. src_addr = rs - > dma_addr_rx ,
2018-10-31 13:56:59 +03:00
. src_addr_width = rs - > n_bytes ,
. src_maxburst = 1 ,
} ;
2018-10-31 13:57:04 +03:00
dmaengine_slave_config ( master - > dma_rx , & rxconf ) ;
2014-07-01 05:03:59 +04:00
2014-07-11 06:07:56 +04:00
rxdesc = dmaengine_prep_slave_sg (
2018-10-31 13:57:04 +03:00
master - > dma_rx ,
2018-10-31 13:57:03 +03:00
xfer - > rx_sg . sgl , xfer - > rx_sg . nents ,
2018-10-10 12:00:37 +03:00
DMA_DEV_TO_MEM , DMA_PREP_INTERRUPT ) ;
2016-03-09 11:11:15 +03:00
if ( ! rxdesc )
return - EINVAL ;
2014-07-01 05:03:59 +04:00
rxdesc - > callback = rockchip_spi_dma_rxcb ;
2018-10-31 13:57:05 +03:00
rxdesc - > callback_param = master ;
2014-07-01 05:03:59 +04:00
}
2015-01-28 16:25:10 +03:00
txdesc = NULL ;
2018-10-31 13:57:03 +03:00
if ( xfer - > tx_buf ) {
2018-10-31 13:56:59 +03:00
struct dma_slave_config txconf = {
. direction = DMA_MEM_TO_DEV ,
2018-10-31 13:57:04 +03:00
. dst_addr = rs - > dma_addr_tx ,
2018-10-31 13:56:59 +03:00
. dst_addr_width = rs - > n_bytes ,
. dst_maxburst = rs - > fifo_len / 2 ,
} ;
2018-10-31 13:57:04 +03:00
dmaengine_slave_config ( master - > dma_tx , & txconf ) ;
2014-07-01 05:03:59 +04:00
2014-07-11 06:07:56 +04:00
txdesc = dmaengine_prep_slave_sg (
2018-10-31 13:57:04 +03:00
master - > dma_tx ,
2018-10-31 13:57:03 +03:00
xfer - > tx_sg . sgl , xfer - > tx_sg . nents ,
2018-10-10 12:00:37 +03:00
DMA_MEM_TO_DEV , DMA_PREP_INTERRUPT ) ;
2016-03-09 11:11:15 +03:00
if ( ! txdesc ) {
if ( rxdesc )
2018-10-31 13:57:04 +03:00
dmaengine_terminate_sync ( master - > dma_rx ) ;
2016-03-09 11:11:15 +03:00
return - EINVAL ;
}
2014-07-01 05:03:59 +04:00
txdesc - > callback = rockchip_spi_dma_txcb ;
2018-10-31 13:57:05 +03:00
txdesc - > callback_param = master ;
2014-07-01 05:03:59 +04:00
}
/* rx must be started before tx due to spi instinct */
2015-01-28 16:25:10 +03:00
if ( rxdesc ) {
2018-10-31 13:57:01 +03:00
atomic_or ( RXDMA , & rs - > state ) ;
2014-07-01 05:03:59 +04:00
dmaengine_submit ( rxdesc ) ;
2018-10-31 13:57:04 +03:00
dma_async_issue_pending ( master - > dma_rx ) ;
2014-07-01 05:03:59 +04:00
}
2018-10-31 13:56:58 +03:00
spi_enable_chip ( rs , true ) ;
2018-10-10 12:00:38 +03:00
2015-01-28 16:25:10 +03:00
if ( txdesc ) {
2018-10-31 13:57:01 +03:00
atomic_or ( TXDMA , & rs - > state ) ;
2014-07-01 05:03:59 +04:00
dmaengine_submit ( txdesc ) ;
2018-10-31 13:57:04 +03:00
dma_async_issue_pending ( master - > dma_tx ) ;
2014-07-01 05:03:59 +04:00
}
2016-03-09 11:11:15 +03:00
2018-10-10 12:00:38 +03:00
/* 1 means the transfer is in progress */
return 1 ;
2014-07-01 05:03:59 +04:00
}
2018-10-31 13:57:03 +03:00
static void rockchip_spi_config ( struct rockchip_spi * rs ,
2018-10-31 13:57:06 +03:00
struct spi_device * spi , struct spi_transfer * xfer ,
bool use_dma )
2014-07-01 05:03:59 +04:00
{
2018-10-31 13:57:00 +03:00
u32 cr0 = CR0_FRF_SPI < < CR0_FRF_OFFSET
| CR0_BHT_8BIT < < CR0_BHT_OFFSET
| CR0_SSD_ONE < < CR0_SSD_OFFSET
| CR0_EM_BIG < < CR0_EM_OFFSET ;
2018-10-31 13:57:10 +03:00
u32 cr1 ;
u32 dmacr = 0 ;
2014-07-01 05:03:59 +04:00
2018-10-31 13:57:08 +03:00
cr0 | = rs - > rsd < < CR0_RSD_OFFSET ;
2018-10-31 13:57:03 +03:00
cr0 | = ( spi - > mode & 0x3U ) < < CR0_SCPH_OFFSET ;
2018-10-31 13:57:11 +03:00
if ( spi - > mode & SPI_LSB_FIRST )
cr0 | = CR0_FBM_LSB < < CR0_FBM_OFFSET ;
2018-10-31 13:57:03 +03:00
if ( xfer - > rx_buf & & xfer - > tx_buf )
cr0 | = CR0_XFM_TR < < CR0_XFM_OFFSET ;
else if ( xfer - > rx_buf )
cr0 | = CR0_XFM_RO < < CR0_XFM_OFFSET ;
2018-10-31 13:57:09 +03:00
else if ( use_dma )
2018-10-31 13:57:03 +03:00
cr0 | = CR0_XFM_TO < < CR0_XFM_OFFSET ;
2014-07-01 05:03:59 +04:00
2018-10-31 13:57:10 +03:00
switch ( xfer - > bits_per_word ) {
case 4 :
cr0 | = CR0_DFS_4BIT < < CR0_DFS_OFFSET ;
cr1 = xfer - > len - 1 ;
break ;
case 8 :
cr0 | = CR0_DFS_8BIT < < CR0_DFS_OFFSET ;
cr1 = xfer - > len - 1 ;
break ;
case 16 :
cr0 | = CR0_DFS_16BIT < < CR0_DFS_OFFSET ;
cr1 = xfer - > len / 2 - 1 ;
break ;
default :
/* we only whitelist 4, 8 and 16 bit words in
* master - > bits_per_word_mask , so this shouldn ' t
* happen
*/
unreachable ( ) ;
}
2018-10-31 13:57:06 +03:00
if ( use_dma ) {
2018-10-31 13:57:03 +03:00
if ( xfer - > tx_buf )
2014-07-01 05:03:59 +04:00
dmacr | = TF_DMA_EN ;
2018-10-31 13:57:03 +03:00
if ( xfer - > rx_buf )
2014-07-01 05:03:59 +04:00
dmacr | = RF_DMA_EN ;
}
writel_relaxed ( cr0 , rs - > regs + ROCKCHIP_SPI_CTRLR0 ) ;
2018-10-31 13:57:10 +03:00
writel_relaxed ( cr1 , rs - > regs + ROCKCHIP_SPI_CTRLR1 ) ;
2017-08-16 05:12:02 +03:00
2018-10-31 13:57:09 +03:00
/* unfortunately setting the fifo threshold level to generate an
* interrupt exactly when the fifo is full doesn ' t seem to work ,
* so we need the strict inequality here
*/
if ( xfer - > len < rs - > fifo_len )
writel_relaxed ( xfer - > len - 1 , rs - > regs + ROCKCHIP_SPI_RXFTLR ) ;
else
writel_relaxed ( rs - > fifo_len / 2 - 1 , rs - > regs + ROCKCHIP_SPI_RXFTLR ) ;
2014-07-01 05:03:59 +04:00
2018-10-10 12:00:33 +03:00
writel_relaxed ( rs - > fifo_len / 2 - 1 , rs - > regs + ROCKCHIP_SPI_DMATDLR ) ;
2014-07-01 05:03:59 +04:00
writel_relaxed ( 0 , rs - > regs + ROCKCHIP_SPI_DMARDLR ) ;
writel_relaxed ( dmacr , rs - > regs + ROCKCHIP_SPI_DMACR ) ;
2018-10-31 13:57:07 +03:00
/* the hardware only supports an even clock divisor, so
* round divisor = spiclk / speed up to nearest even number
* so that the resulting speed is < = the requested speed
*/
writel_relaxed ( 2 * DIV_ROUND_UP ( rs - > freq , 2 * xfer - > speed_hz ) ,
rs - > regs + ROCKCHIP_SPI_BAUDR ) ;
2014-07-01 05:03:59 +04:00
}
2016-07-15 04:30:59 +03:00
static size_t rockchip_spi_max_transfer_size ( struct spi_device * spi )
{
return ROCKCHIP_SPI_MAX_TRANLEN ;
}
2014-07-11 06:07:56 +04:00
static int rockchip_spi_transfer_one (
struct spi_master * master ,
2014-07-01 05:03:59 +04:00
struct spi_device * spi ,
struct spi_transfer * xfer )
{
struct rockchip_spi * rs = spi_master_get_devdata ( master ) ;
2018-10-31 13:57:06 +03:00
bool use_dma ;
2014-07-01 05:03:59 +04:00
2014-09-04 00:44:26 +04:00
WARN_ON ( readl_relaxed ( rs - > regs + ROCKCHIP_SPI_SSIENR ) & &
( readl_relaxed ( rs - > regs + ROCKCHIP_SPI_SR ) & SR_BUSY ) ) ;
2014-07-01 05:03:59 +04:00
if ( ! xfer - > tx_buf & & ! xfer - > rx_buf ) {
dev_err ( rs - > dev , " No buffer for transfer \n " ) ;
return - EINVAL ;
}
2016-07-15 04:30:59 +03:00
if ( xfer - > len > ROCKCHIP_SPI_MAX_TRANLEN ) {
dev_err ( rs - > dev , " Transfer is too long (%d) \n " , xfer - > len ) ;
return - EINVAL ;
}
2018-10-31 13:57:10 +03:00
rs - > n_bytes = xfer - > bits_per_word < = 8 ? 1 : 2 ;
2014-07-01 05:03:59 +04:00
2018-10-31 13:57:06 +03:00
use_dma = master - > can_dma ? master - > can_dma ( master , spi , xfer ) : false ;
2014-07-01 05:03:59 +04:00
2018-10-31 13:57:06 +03:00
rockchip_spi_config ( rs , spi , xfer , use_dma ) ;
2014-07-01 05:03:59 +04:00
2018-10-31 13:57:06 +03:00
if ( use_dma )
2018-10-31 13:57:04 +03:00
return rockchip_spi_prepare_dma ( rs , master , xfer ) ;
2014-07-01 05:03:59 +04:00
2018-10-31 13:57:09 +03:00
return rockchip_spi_prepare_irq ( rs , xfer ) ;
2014-07-01 05:03:59 +04:00
}
static bool rockchip_spi_can_dma ( struct spi_master * master ,
2014-07-11 06:07:56 +04:00
struct spi_device * spi ,
struct spi_transfer * xfer )
2014-07-01 05:03:59 +04:00
{
struct rockchip_spi * rs = spi_master_get_devdata ( master ) ;
2018-10-31 13:57:09 +03:00
unsigned int bytes_per_word = xfer - > bits_per_word < = 8 ? 1 : 2 ;
2014-07-01 05:03:59 +04:00
2018-10-31 13:57:09 +03:00
/* if the numbor of spi words to transfer is less than the fifo
* length we can just fill the fifo and wait for a single irq ,
* so don ' t bother setting up dma
*/
return xfer - > len / bytes_per_word > = rs - > fifo_len ;
2014-07-01 05:03:59 +04:00
}
static int rockchip_spi_probe ( struct platform_device * pdev )
{
2017-08-07 15:40:18 +03:00
int ret ;
2014-07-01 05:03:59 +04:00
struct rockchip_spi * rs ;
struct spi_master * master ;
struct resource * mem ;
2015-03-27 02:30:25 +03:00
u32 rsd_nsecs ;
2014-07-01 05:03:59 +04:00
master = spi_alloc_master ( & pdev - > dev , sizeof ( struct rockchip_spi ) ) ;
2014-07-11 06:07:56 +04:00
if ( ! master )
2014-07-01 05:03:59 +04:00
return - ENOMEM ;
2014-07-11 06:07:56 +04:00
2014-07-01 05:03:59 +04:00
platform_set_drvdata ( pdev , master ) ;
rs = spi_master_get_devdata ( master ) ;
/* Get basic io resource and map it */
mem = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
rs - > regs = devm_ioremap_resource ( & pdev - > dev , mem ) ;
if ( IS_ERR ( rs - > regs ) ) {
ret = PTR_ERR ( rs - > regs ) ;
2017-06-13 08:25:40 +03:00
goto err_put_master ;
2014-07-01 05:03:59 +04:00
}
rs - > apb_pclk = devm_clk_get ( & pdev - > dev , " apb_pclk " ) ;
if ( IS_ERR ( rs - > apb_pclk ) ) {
dev_err ( & pdev - > dev , " Failed to get apb_pclk \n " ) ;
ret = PTR_ERR ( rs - > apb_pclk ) ;
2017-06-13 08:25:40 +03:00
goto err_put_master ;
2014-07-01 05:03:59 +04:00
}
rs - > spiclk = devm_clk_get ( & pdev - > dev , " spiclk " ) ;
if ( IS_ERR ( rs - > spiclk ) ) {
dev_err ( & pdev - > dev , " Failed to get spi_pclk \n " ) ;
ret = PTR_ERR ( rs - > spiclk ) ;
2017-06-13 08:25:40 +03:00
goto err_put_master ;
2014-07-01 05:03:59 +04:00
}
ret = clk_prepare_enable ( rs - > apb_pclk ) ;
2017-08-07 15:40:18 +03:00
if ( ret < 0 ) {
2014-07-01 05:03:59 +04:00
dev_err ( & pdev - > dev , " Failed to enable apb_pclk \n " ) ;
2017-06-13 08:25:40 +03:00
goto err_put_master ;
2014-07-01 05:03:59 +04:00
}
ret = clk_prepare_enable ( rs - > spiclk ) ;
2017-08-07 15:40:18 +03:00
if ( ret < 0 ) {
2014-07-01 05:03:59 +04:00
dev_err ( & pdev - > dev , " Failed to enable spi_clk \n " ) ;
2017-06-13 08:25:40 +03:00
goto err_disable_apbclk ;
2014-07-01 05:03:59 +04:00
}
2018-10-31 13:56:58 +03:00
spi_enable_chip ( rs , false ) ;
2014-07-01 05:03:59 +04:00
2018-10-31 13:57:09 +03:00
ret = platform_get_irq ( pdev , 0 ) ;
if ( ret < 0 )
goto err_disable_spiclk ;
ret = devm_request_threaded_irq ( & pdev - > dev , ret , rockchip_spi_isr , NULL ,
IRQF_ONESHOT , dev_name ( & pdev - > dev ) , master ) ;
if ( ret )
goto err_disable_spiclk ;
2014-07-01 05:03:59 +04:00
rs - > dev = & pdev - > dev ;
2018-10-31 13:57:07 +03:00
rs - > freq = clk_get_rate ( rs - > spiclk ) ;
2014-07-01 05:03:59 +04:00
2015-03-27 02:30:25 +03:00
if ( ! of_property_read_u32 ( pdev - > dev . of_node , " rx-sample-delay-ns " ,
2018-10-31 13:57:08 +03:00
& rsd_nsecs ) ) {
/* rx sample delay is expressed in parent clock cycles (max 3) */
u32 rsd = DIV_ROUND_CLOSEST ( rsd_nsecs * ( rs - > freq > > 8 ) ,
1000000000 > > 8 ) ;
if ( ! rsd ) {
dev_warn ( rs - > dev , " %u Hz are too slow to express %u ns delay \n " ,
rs - > freq , rsd_nsecs ) ;
} else if ( rsd > CR0_RSD_MAX ) {
rsd = CR0_RSD_MAX ;
dev_warn ( rs - > dev , " %u Hz are too fast to express %u ns delay, clamping at %u ns \n " ,
rs - > freq , rsd_nsecs ,
CR0_RSD_MAX * 1000000000U / rs - > freq ) ;
}
rs - > rsd = rsd ;
}
2015-03-27 02:30:25 +03:00
2014-07-01 05:03:59 +04:00
rs - > fifo_len = get_fifo_len ( rs ) ;
if ( ! rs - > fifo_len ) {
dev_err ( & pdev - > dev , " Failed to get fifo length \n " ) ;
2014-07-20 18:02:04 +04:00
ret = - EINVAL ;
2017-06-13 08:25:40 +03:00
goto err_disable_spiclk ;
2014-07-01 05:03:59 +04:00
}
pm_runtime_set_active ( & pdev - > dev ) ;
pm_runtime_enable ( & pdev - > dev ) ;
master - > auto_runtime_pm = true ;
master - > bus_num = pdev - > id ;
2018-10-31 13:57:11 +03:00
master - > mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_LSB_FIRST ;
2017-06-28 07:38:43 +03:00
master - > num_chipselect = ROCKCHIP_SPI_MAX_CS_NUM ;
2014-07-01 05:03:59 +04:00
master - > dev . of_node = pdev - > dev . of_node ;
2018-10-31 13:57:10 +03:00
master - > bits_per_word_mask = SPI_BPW_MASK ( 16 ) | SPI_BPW_MASK ( 8 ) | SPI_BPW_MASK ( 4 ) ;
2018-10-31 13:57:07 +03:00
master - > min_speed_hz = rs - > freq / BAUDR_SCKDV_MAX ;
master - > max_speed_hz = min ( rs - > freq / BAUDR_SCKDV_MIN , MAX_SCLK_OUT ) ;
2014-07-01 05:03:59 +04:00
master - > set_cs = rockchip_spi_set_cs ;
master - > transfer_one = rockchip_spi_transfer_one ;
2016-07-15 04:30:59 +03:00
master - > max_transfer_size = rockchip_spi_max_transfer_size ;
2015-02-27 18:34:16 +03:00
master - > handle_err = rockchip_spi_handle_err ;
2017-06-28 07:38:42 +03:00
master - > flags = SPI_MASTER_GPIO_SS ;
2014-07-01 05:03:59 +04:00
2018-10-31 13:57:04 +03:00
master - > dma_tx = dma_request_chan ( rs - > dev , " tx " ) ;
if ( IS_ERR ( master - > dma_tx ) ) {
2016-03-09 11:11:32 +03:00
/* Check tx to see if we need defer probing driver */
2018-10-31 13:57:04 +03:00
if ( PTR_ERR ( master - > dma_tx ) = = - EPROBE_DEFER ) {
2016-03-09 11:11:32 +03:00
ret = - EPROBE_DEFER ;
2017-06-13 08:25:40 +03:00
goto err_disable_pm_runtime ;
2016-03-09 11:11:32 +03:00
}
2014-07-01 05:03:59 +04:00
dev_warn ( rs - > dev , " Failed to request TX DMA channel \n " ) ;
2018-10-31 13:57:04 +03:00
master - > dma_tx = NULL ;
2016-03-09 11:11:32 +03:00
}
2014-07-01 05:03:59 +04:00
2018-10-31 13:57:04 +03:00
master - > dma_rx = dma_request_chan ( rs - > dev , " rx " ) ;
if ( IS_ERR ( master - > dma_rx ) ) {
if ( PTR_ERR ( master - > dma_rx ) = = - EPROBE_DEFER ) {
2016-03-31 06:11:41 +03:00
ret = - EPROBE_DEFER ;
2016-05-04 09:25:46 +03:00
goto err_free_dma_tx ;
2014-07-01 05:03:59 +04:00
}
dev_warn ( rs - > dev , " Failed to request RX DMA channel \n " ) ;
2018-10-31 13:57:04 +03:00
master - > dma_rx = NULL ;
2014-07-01 05:03:59 +04:00
}
2018-10-31 13:57:04 +03:00
if ( master - > dma_tx & & master - > dma_rx ) {
rs - > dma_addr_tx = mem - > start + ROCKCHIP_SPI_TXDR ;
rs - > dma_addr_rx = mem - > start + ROCKCHIP_SPI_RXDR ;
2014-07-01 05:03:59 +04:00
master - > can_dma = rockchip_spi_can_dma ;
}
ret = devm_spi_register_master ( & pdev - > dev , master ) ;
2017-08-07 15:40:18 +03:00
if ( ret < 0 ) {
2014-07-01 05:03:59 +04:00
dev_err ( & pdev - > dev , " Failed to register master \n " ) ;
2017-06-13 08:25:40 +03:00
goto err_free_dma_rx ;
2014-07-01 05:03:59 +04:00
}
return 0 ;
2017-06-13 08:25:40 +03:00
err_free_dma_rx :
2018-10-31 13:57:04 +03:00
if ( master - > dma_rx )
dma_release_channel ( master - > dma_rx ) ;
2016-05-04 09:25:46 +03:00
err_free_dma_tx :
2018-10-31 13:57:04 +03:00
if ( master - > dma_tx )
dma_release_channel ( master - > dma_tx ) ;
2017-06-13 08:25:40 +03:00
err_disable_pm_runtime :
pm_runtime_disable ( & pdev - > dev ) ;
err_disable_spiclk :
2014-07-01 05:03:59 +04:00
clk_disable_unprepare ( rs - > spiclk ) ;
2017-06-13 08:25:40 +03:00
err_disable_apbclk :
2014-07-01 05:03:59 +04:00
clk_disable_unprepare ( rs - > apb_pclk ) ;
2017-06-13 08:25:40 +03:00
err_put_master :
2014-07-01 05:03:59 +04:00
spi_master_put ( master ) ;
return ret ;
}
static int rockchip_spi_remove ( struct platform_device * pdev )
{
struct spi_master * master = spi_master_get ( platform_get_drvdata ( pdev ) ) ;
struct rockchip_spi * rs = spi_master_get_devdata ( master ) ;
2017-08-07 15:40:19 +03:00
pm_runtime_get_sync ( & pdev - > dev ) ;
2014-07-01 05:03:59 +04:00
clk_disable_unprepare ( rs - > spiclk ) ;
clk_disable_unprepare ( rs - > apb_pclk ) ;
2017-08-07 15:40:19 +03:00
pm_runtime_put_noidle ( & pdev - > dev ) ;
pm_runtime_disable ( & pdev - > dev ) ;
pm_runtime_set_suspended ( & pdev - > dev ) ;
2018-10-31 13:57:04 +03:00
if ( master - > dma_tx )
dma_release_channel ( master - > dma_tx ) ;
if ( master - > dma_rx )
dma_release_channel ( master - > dma_rx ) ;
2014-07-01 05:03:59 +04:00
2016-02-15 11:28:12 +03:00
spi_master_put ( master ) ;
2014-07-01 05:03:59 +04:00
return 0 ;
}
# ifdef CONFIG_PM_SLEEP
static int rockchip_spi_suspend ( struct device * dev )
{
2017-08-07 15:40:18 +03:00
int ret ;
2014-07-01 05:03:59 +04:00
struct spi_master * master = dev_get_drvdata ( dev ) ;
2018-10-31 13:57:05 +03:00
ret = spi_master_suspend ( master ) ;
2017-08-07 15:40:18 +03:00
if ( ret < 0 )
2014-07-01 05:03:59 +04:00
return ret ;
2017-08-07 15:40:20 +03:00
ret = pm_runtime_force_suspend ( dev ) ;
if ( ret < 0 )
return ret ;
2014-07-01 05:03:59 +04:00
2016-12-17 03:59:16 +03:00
pinctrl_pm_select_sleep_state ( dev ) ;
2017-08-07 15:40:18 +03:00
return 0 ;
2014-07-01 05:03:59 +04:00
}
static int rockchip_spi_resume ( struct device * dev )
{
2017-08-07 15:40:18 +03:00
int ret ;
2014-07-01 05:03:59 +04:00
struct spi_master * master = dev_get_drvdata ( dev ) ;
struct rockchip_spi * rs = spi_master_get_devdata ( master ) ;
2016-12-17 03:59:16 +03:00
pinctrl_pm_select_default_state ( dev ) ;
2017-08-07 15:40:20 +03:00
ret = pm_runtime_force_resume ( dev ) ;
if ( ret < 0 )
return ret ;
2014-07-01 05:03:59 +04:00
2018-10-31 13:57:05 +03:00
ret = spi_master_resume ( master ) ;
2014-07-01 05:03:59 +04:00
if ( ret < 0 ) {
clk_disable_unprepare ( rs - > spiclk ) ;
clk_disable_unprepare ( rs - > apb_pclk ) ;
}
2017-08-07 15:40:18 +03:00
return 0 ;
2014-07-01 05:03:59 +04:00
}
# endif /* CONFIG_PM_SLEEP */
2014-12-13 02:41:15 +03:00
# ifdef CONFIG_PM
2014-07-01 05:03:59 +04:00
static int rockchip_spi_runtime_suspend ( struct device * dev )
{
struct spi_master * master = dev_get_drvdata ( dev ) ;
struct rockchip_spi * rs = spi_master_get_devdata ( master ) ;
clk_disable_unprepare ( rs - > spiclk ) ;
clk_disable_unprepare ( rs - > apb_pclk ) ;
return 0 ;
}
static int rockchip_spi_runtime_resume ( struct device * dev )
{
int ret ;
struct spi_master * master = dev_get_drvdata ( dev ) ;
struct rockchip_spi * rs = spi_master_get_devdata ( master ) ;
ret = clk_prepare_enable ( rs - > apb_pclk ) ;
2017-08-07 15:40:18 +03:00
if ( ret < 0 )
2014-07-01 05:03:59 +04:00
return ret ;
ret = clk_prepare_enable ( rs - > spiclk ) ;
2017-08-07 15:40:18 +03:00
if ( ret < 0 )
2014-07-01 05:03:59 +04:00
clk_disable_unprepare ( rs - > apb_pclk ) ;
2017-08-07 15:40:18 +03:00
return 0 ;
2014-07-01 05:03:59 +04:00
}
2014-12-13 02:41:15 +03:00
# endif /* CONFIG_PM */
2014-07-01 05:03:59 +04:00
static const struct dev_pm_ops rockchip_spi_pm = {
SET_SYSTEM_SLEEP_PM_OPS ( rockchip_spi_suspend , rockchip_spi_resume )
SET_RUNTIME_PM_OPS ( rockchip_spi_runtime_suspend ,
rockchip_spi_runtime_resume , NULL )
} ;
static const struct of_device_id rockchip_spi_dt_match [ ] = {
2017-08-14 11:34:22 +03:00
{ . compatible = " rockchip,rv1108-spi " , } ,
2016-05-20 02:56:21 +03:00
{ . compatible = " rockchip,rk3036-spi " , } ,
2014-07-01 05:03:59 +04:00
{ . compatible = " rockchip,rk3066-spi " , } ,
2014-07-11 06:09:19 +04:00
{ . compatible = " rockchip,rk3188-spi " , } ,
2016-05-20 02:56:21 +03:00
{ . compatible = " rockchip,rk3228-spi " , } ,
2014-07-11 06:09:19 +04:00
{ . compatible = " rockchip,rk3288-spi " , } ,
2016-05-20 02:56:21 +03:00
{ . compatible = " rockchip,rk3368-spi " , } ,
2016-02-18 14:16:31 +03:00
{ . compatible = " rockchip,rk3399-spi " , } ,
2014-07-01 05:03:59 +04:00
{ } ,
} ;
MODULE_DEVICE_TABLE ( of , rockchip_spi_dt_match ) ;
static struct platform_driver rockchip_spi_driver = {
. driver = {
. name = DRIVER_NAME ,
. pm = & rockchip_spi_pm ,
. of_match_table = of_match_ptr ( rockchip_spi_dt_match ) ,
} ,
. probe = rockchip_spi_probe ,
. remove = rockchip_spi_remove ,
} ;
module_platform_driver ( rockchip_spi_driver ) ;
2014-07-11 06:07:56 +04:00
MODULE_AUTHOR ( " Addy Ke <addy.ke@rock-chips.com> " ) ;
2014-07-01 05:03:59 +04:00
MODULE_DESCRIPTION ( " ROCKCHIP SPI Controller Driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;