2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2014-02-05 17:05:05 +04:00
/*
* Copyright ( C ) 2012 - 2014 Allwinner Tech
* Pan Nan < pannan @ allwinnertech . com >
*
* Copyright ( C ) 2014 Maxime Ripard
* Maxime Ripard < maxime . ripard @ free - electrons . com >
*/
2020-07-06 17:34:38 +03:00
# include <linux/bitfield.h>
2014-02-05 17:05:05 +04:00
# include <linux/clk.h>
# include <linux/delay.h>
# include <linux/device.h>
# include <linux/interrupt.h>
# include <linux/io.h>
# include <linux/module.h>
2016-10-28 09:54:12 +03:00
# include <linux/of_device.h>
2014-02-05 17:05:05 +04:00
# include <linux/platform_device.h>
# include <linux/pm_runtime.h>
# include <linux/reset.h>
2020-10-22 10:52:21 +03:00
# include <linux/dmaengine.h>
2014-02-05 17:05:05 +04:00
# include <linux/spi/spi.h>
2020-10-19 18:03:43 +03:00
# define SUN6I_AUTOSUSPEND_TIMEOUT 2000
2014-02-05 17:05:05 +04:00
# define SUN6I_FIFO_DEPTH 128
2016-10-28 09:54:12 +03:00
# define SUN8I_FIFO_DEPTH 64
2014-02-05 17:05:05 +04:00
# define SUN6I_GBL_CTL_REG 0x04
# define SUN6I_GBL_CTL_BUS_ENABLE BIT(0)
# define SUN6I_GBL_CTL_MASTER BIT(1)
# define SUN6I_GBL_CTL_TP BIT(7)
# define SUN6I_GBL_CTL_RST BIT(31)
# define SUN6I_TFR_CTL_REG 0x08
# define SUN6I_TFR_CTL_CPHA BIT(0)
# define SUN6I_TFR_CTL_CPOL BIT(1)
# define SUN6I_TFR_CTL_SPOL BIT(2)
2014-02-13 06:18:15 +04:00
# define SUN6I_TFR_CTL_CS_MASK 0x30
# define SUN6I_TFR_CTL_CS(cs) (((cs) << 4) & SUN6I_TFR_CTL_CS_MASK)
2014-02-05 17:05:05 +04:00
# define SUN6I_TFR_CTL_CS_MANUAL BIT(6)
# define SUN6I_TFR_CTL_CS_LEVEL BIT(7)
# define SUN6I_TFR_CTL_DHB BIT(8)
# define SUN6I_TFR_CTL_FBS BIT(12)
# define SUN6I_TFR_CTL_XCH BIT(31)
# define SUN6I_INT_CTL_REG 0x10
2017-03-06 15:14:43 +03:00
# define SUN6I_INT_CTL_RF_RDY BIT(0)
# define SUN6I_INT_CTL_TF_ERQ BIT(4)
2014-02-05 17:05:05 +04:00
# define SUN6I_INT_CTL_RF_OVF BIT(8)
# define SUN6I_INT_CTL_TC BIT(12)
# define SUN6I_INT_STA_REG 0x14
# define SUN6I_FIFO_CTL_REG 0x18
2017-03-06 15:14:43 +03:00
# define SUN6I_FIFO_CTL_RF_RDY_TRIG_LEVEL_MASK 0xff
2020-10-22 10:52:21 +03:00
# define SUN6I_FIFO_CTL_RF_DRQ_EN BIT(8)
2017-03-06 15:14:43 +03:00
# define SUN6I_FIFO_CTL_RF_RDY_TRIG_LEVEL_BITS 0
2014-02-05 17:05:05 +04:00
# define SUN6I_FIFO_CTL_RF_RST BIT(15)
2017-03-06 15:14:43 +03:00
# define SUN6I_FIFO_CTL_TF_ERQ_TRIG_LEVEL_MASK 0xff
# define SUN6I_FIFO_CTL_TF_ERQ_TRIG_LEVEL_BITS 16
2020-10-22 10:52:21 +03:00
# define SUN6I_FIFO_CTL_TF_DRQ_EN BIT(24)
2014-02-05 17:05:05 +04:00
# define SUN6I_FIFO_CTL_TF_RST BIT(31)
# define SUN6I_FIFO_STA_REG 0x1c
2020-07-06 17:34:39 +03:00
# define SUN6I_FIFO_STA_RF_CNT_MASK GENMASK(7, 0)
2020-07-06 17:34:38 +03:00
# define SUN6I_FIFO_STA_TF_CNT_MASK GENMASK(23, 16)
2014-02-05 17:05:05 +04:00
# define SUN6I_CLK_CTL_REG 0x24
# define SUN6I_CLK_CTL_CDR2_MASK 0xff
# define SUN6I_CLK_CTL_CDR2(div) (((div) & SUN6I_CLK_CTL_CDR2_MASK) << 0)
# define SUN6I_CLK_CTL_CDR1_MASK 0xf
# define SUN6I_CLK_CTL_CDR1(div) (((div) & SUN6I_CLK_CTL_CDR1_MASK) << 8)
# define SUN6I_CLK_CTL_DRS BIT(12)
2017-03-06 15:14:43 +03:00
# define SUN6I_MAX_XFER_SIZE 0xffffff
2014-02-05 17:05:05 +04:00
# define SUN6I_BURST_CNT_REG 0x30
# define SUN6I_XMIT_CNT_REG 0x34
# define SUN6I_BURST_CTL_CNT_REG 0x38
# define SUN6I_TXDATA_REG 0x200
# define SUN6I_RXDATA_REG 0x300
struct sun6i_spi {
struct spi_master * master ;
void __iomem * base_addr ;
2020-10-22 10:52:21 +03:00
dma_addr_t dma_addr_rx ;
dma_addr_t dma_addr_tx ;
2014-02-05 17:05:05 +04:00
struct clk * hclk ;
struct clk * mclk ;
struct reset_control * rstc ;
struct completion done ;
const u8 * tx_buf ;
u8 * rx_buf ;
int len ;
2016-10-28 09:54:12 +03:00
unsigned long fifo_depth ;
2014-02-05 17:05:05 +04:00
} ;
static inline u32 sun6i_spi_read ( struct sun6i_spi * sspi , u32 reg )
{
return readl ( sspi - > base_addr + reg ) ;
}
static inline void sun6i_spi_write ( struct sun6i_spi * sspi , u32 reg , u32 value )
{
writel ( value , sspi - > base_addr + reg ) ;
}
2020-07-06 17:34:39 +03:00
static inline u32 sun6i_spi_get_rx_fifo_count ( struct sun6i_spi * sspi )
{
u32 reg = sun6i_spi_read ( sspi , SUN6I_FIFO_STA_REG ) ;
return FIELD_GET ( SUN6I_FIFO_STA_RF_CNT_MASK , reg ) ;
}
2017-03-06 15:14:43 +03:00
static inline u32 sun6i_spi_get_tx_fifo_count ( struct sun6i_spi * sspi )
{
u32 reg = sun6i_spi_read ( sspi , SUN6I_FIFO_STA_REG ) ;
2020-07-06 17:34:38 +03:00
return FIELD_GET ( SUN6I_FIFO_STA_TF_CNT_MASK , reg ) ;
2017-03-06 15:14:43 +03:00
}
static inline void sun6i_spi_disable_interrupt ( struct sun6i_spi * sspi , u32 mask )
{
u32 reg = sun6i_spi_read ( sspi , SUN6I_INT_CTL_REG ) ;
reg & = ~ mask ;
sun6i_spi_write ( sspi , SUN6I_INT_CTL_REG , reg ) ;
}
2020-07-06 17:34:40 +03:00
static inline void sun6i_spi_drain_fifo ( struct sun6i_spi * sspi )
2014-02-05 17:05:05 +04:00
{
2020-07-06 17:34:40 +03:00
u32 len ;
2014-02-05 17:05:05 +04:00
u8 byte ;
/* See how much data is available */
2020-07-06 17:34:40 +03:00
len = sun6i_spi_get_rx_fifo_count ( sspi ) ;
2014-02-05 17:05:05 +04:00
while ( len - - ) {
byte = readb ( sspi - > base_addr + SUN6I_RXDATA_REG ) ;
if ( sspi - > rx_buf )
* sspi - > rx_buf + + = byte ;
}
}
2020-07-06 17:34:41 +03:00
static inline void sun6i_spi_fill_fifo ( struct sun6i_spi * sspi )
2014-02-05 17:05:05 +04:00
{
2017-03-06 15:14:43 +03:00
u32 cnt ;
2020-07-06 17:34:41 +03:00
int len ;
2014-02-05 17:05:05 +04:00
u8 byte ;
2017-03-06 15:14:43 +03:00
/* See how much data we can fit */
cnt = sspi - > fifo_depth - sun6i_spi_get_tx_fifo_count ( sspi ) ;
2020-07-06 17:34:41 +03:00
len = min ( ( int ) cnt , sspi - > len ) ;
2014-02-05 17:05:05 +04:00
while ( len - - ) {
byte = sspi - > tx_buf ? * sspi - > tx_buf + + : 0 ;
writeb ( byte , sspi - > base_addr + SUN6I_TXDATA_REG ) ;
sspi - > len - - ;
}
}
static void sun6i_spi_set_cs ( struct spi_device * spi , bool enable )
{
struct sun6i_spi * sspi = spi_master_get_devdata ( spi - > master ) ;
u32 reg ;
reg = sun6i_spi_read ( sspi , SUN6I_TFR_CTL_REG ) ;
reg & = ~ SUN6I_TFR_CTL_CS_MASK ;
reg | = SUN6I_TFR_CTL_CS ( spi - > chip_select ) ;
if ( enable )
reg | = SUN6I_TFR_CTL_CS_LEVEL ;
else
reg & = ~ SUN6I_TFR_CTL_CS_LEVEL ;
sun6i_spi_write ( sspi , SUN6I_TFR_CTL_REG , reg ) ;
}
2016-06-13 20:46:50 +03:00
static size_t sun6i_spi_max_transfer_size ( struct spi_device * spi )
{
2017-03-20 17:38:49 +03:00
return SUN6I_MAX_XFER_SIZE - 1 ;
2016-06-13 20:46:50 +03:00
}
2014-02-05 17:05:05 +04:00
2020-10-22 10:52:21 +03:00
static int sun6i_spi_prepare_dma ( struct sun6i_spi * sspi ,
struct spi_transfer * tfr )
{
struct dma_async_tx_descriptor * rxdesc , * txdesc ;
struct spi_master * master = sspi - > master ;
rxdesc = NULL ;
if ( tfr - > rx_buf ) {
struct dma_slave_config rxconf = {
. direction = DMA_DEV_TO_MEM ,
. src_addr = sspi - > dma_addr_rx ,
. src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ,
. src_maxburst = 8 ,
} ;
dmaengine_slave_config ( master - > dma_rx , & rxconf ) ;
rxdesc = dmaengine_prep_slave_sg ( master - > dma_rx ,
tfr - > rx_sg . sgl ,
tfr - > rx_sg . nents ,
DMA_DEV_TO_MEM ,
DMA_PREP_INTERRUPT ) ;
if ( ! rxdesc )
return - EINVAL ;
}
txdesc = NULL ;
if ( tfr - > tx_buf ) {
struct dma_slave_config txconf = {
. direction = DMA_MEM_TO_DEV ,
. dst_addr = sspi - > dma_addr_tx ,
. dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ,
. dst_maxburst = 8 ,
} ;
dmaengine_slave_config ( master - > dma_tx , & txconf ) ;
txdesc = dmaengine_prep_slave_sg ( master - > dma_tx ,
tfr - > tx_sg . sgl ,
tfr - > tx_sg . nents ,
DMA_MEM_TO_DEV ,
DMA_PREP_INTERRUPT ) ;
if ( ! txdesc ) {
if ( rxdesc )
dmaengine_terminate_sync ( master - > dma_rx ) ;
return - EINVAL ;
}
}
if ( tfr - > rx_buf ) {
dmaengine_submit ( rxdesc ) ;
dma_async_issue_pending ( master - > dma_rx ) ;
}
if ( tfr - > tx_buf ) {
dmaengine_submit ( txdesc ) ;
dma_async_issue_pending ( master - > dma_tx ) ;
}
return 0 ;
}
2014-02-05 17:05:05 +04:00
static int sun6i_spi_transfer_one ( struct spi_master * master ,
struct spi_device * spi ,
struct spi_transfer * tfr )
{
struct sun6i_spi * sspi = spi_master_get_devdata ( master ) ;
2020-07-06 17:34:34 +03:00
unsigned int mclk_rate , div , div_cdr1 , div_cdr2 , timeout ;
2016-06-13 20:46:49 +03:00
unsigned int start , end , tx_time ;
2017-03-06 15:14:43 +03:00
unsigned int trig_level ;
2020-07-06 17:34:43 +03:00
unsigned int tx_len = 0 , rx_len = 0 ;
2020-10-22 10:52:21 +03:00
bool use_dma ;
2014-02-05 17:05:05 +04:00
int ret = 0 ;
u32 reg ;
2017-03-06 15:14:43 +03:00
if ( tfr - > len > SUN6I_MAX_XFER_SIZE )
2014-02-05 17:05:05 +04:00
return - EINVAL ;
reinit_completion ( & sspi - > done ) ;
sspi - > tx_buf = tfr - > tx_buf ;
sspi - > rx_buf = tfr - > rx_buf ;
sspi - > len = tfr - > len ;
2020-10-22 10:52:21 +03:00
use_dma = master - > can_dma ? master - > can_dma ( master , spi , tfr ) : false ;
2014-02-05 17:05:05 +04:00
/* Clear pending interrupts */
sun6i_spi_write ( sspi , SUN6I_INT_STA_REG , ~ 0 ) ;
/* Reset FIFO */
sun6i_spi_write ( sspi , SUN6I_FIFO_CTL_REG ,
SUN6I_FIFO_CTL_RF_RST | SUN6I_FIFO_CTL_TF_RST ) ;
2020-10-22 10:52:21 +03:00
reg = 0 ;
if ( ! use_dma ) {
/*
* Setup FIFO interrupt trigger level
* Here we choose 3 / 4 of the full fifo depth , as it ' s
* the hardcoded value used in old generation of Allwinner
* SPI controller . ( See spi - sun4i . c )
*/
trig_level = sspi - > fifo_depth / 4 * 3 ;
} else {
/*
* Setup FIFO DMA request trigger level
* We choose 1 / 2 of the full fifo depth , that value will
* be used as DMA burst length .
*/
trig_level = sspi - > fifo_depth / 2 ;
if ( tfr - > tx_buf )
reg | = SUN6I_FIFO_CTL_TF_DRQ_EN ;
if ( tfr - > rx_buf )
reg | = SUN6I_FIFO_CTL_RF_DRQ_EN ;
}
reg | = ( trig_level < < SUN6I_FIFO_CTL_RF_RDY_TRIG_LEVEL_BITS ) |
( trig_level < < SUN6I_FIFO_CTL_TF_ERQ_TRIG_LEVEL_BITS ) ;
sun6i_spi_write ( sspi , SUN6I_FIFO_CTL_REG , reg ) ;
2017-03-06 15:14:43 +03:00
2014-02-05 17:05:05 +04:00
/*
* Setup the transfer control register : Chip Select ,
* polarities , etc .
*/
reg = sun6i_spi_read ( sspi , SUN6I_TFR_CTL_REG ) ;
if ( spi - > mode & SPI_CPOL )
reg | = SUN6I_TFR_CTL_CPOL ;
else
reg & = ~ SUN6I_TFR_CTL_CPOL ;
if ( spi - > mode & SPI_CPHA )
reg | = SUN6I_TFR_CTL_CPHA ;
else
reg & = ~ SUN6I_TFR_CTL_CPHA ;
if ( spi - > mode & SPI_LSB_FIRST )
reg | = SUN6I_TFR_CTL_FBS ;
else
reg & = ~ SUN6I_TFR_CTL_FBS ;
/*
* If it ' s a TX only transfer , we don ' t want to fill the RX
* FIFO with bogus data
*/
2020-07-06 17:34:43 +03:00
if ( sspi - > rx_buf ) {
2014-02-05 17:05:05 +04:00
reg & = ~ SUN6I_TFR_CTL_DHB ;
2020-07-06 17:34:43 +03:00
rx_len = tfr - > len ;
} else {
2014-02-05 17:05:05 +04:00
reg | = SUN6I_TFR_CTL_DHB ;
2020-07-06 17:34:43 +03:00
}
2014-02-05 17:05:05 +04:00
/* We want to control the chip select manually */
reg | = SUN6I_TFR_CTL_CS_MANUAL ;
sun6i_spi_write ( sspi , SUN6I_TFR_CTL_REG , reg ) ;
/* Ensure that we have a parent clock fast enough */
mclk_rate = clk_get_rate ( sspi - > mclk ) ;
2015-11-08 14:03:23 +03:00
if ( mclk_rate < ( 2 * tfr - > speed_hz ) ) {
clk_set_rate ( sspi - > mclk , 2 * tfr - > speed_hz ) ;
2014-02-05 17:05:05 +04:00
mclk_rate = clk_get_rate ( sspi - > mclk ) ;
}
/*
* Setup clock divider .
*
* We have two choices there . Either we can use the clock
* divide rate 1 , which is calculated thanks to this formula :
* SPI_CLK = MOD_CLK / ( 2 ^ cdr )
* Or we can use CDR2 , which is calculated with the formula :
* SPI_CLK = MOD_CLK / ( 2 * ( cdr + 1 ) )
* Wether we use the former or the latter is set through the
* DRS bit .
*
* First try CDR2 , and if we can ' t reach the expected
* frequency , fall back to CDR1 .
*/
2020-07-06 17:34:34 +03:00
div_cdr1 = DIV_ROUND_UP ( mclk_rate , tfr - > speed_hz ) ;
div_cdr2 = DIV_ROUND_UP ( div_cdr1 , 2 ) ;
if ( div_cdr2 < = ( SUN6I_CLK_CTL_CDR2_MASK + 1 ) ) {
reg = SUN6I_CLK_CTL_CDR2 ( div_cdr2 - 1 ) | SUN6I_CLK_CTL_DRS ;
2020-07-06 17:34:35 +03:00
tfr - > effective_speed_hz = mclk_rate / ( 2 * div_cdr2 ) ;
2014-02-05 17:05:05 +04:00
} else {
2020-07-06 17:34:34 +03:00
div = min ( SUN6I_CLK_CTL_CDR1_MASK , order_base_2 ( div_cdr1 ) ) ;
2014-02-05 17:05:05 +04:00
reg = SUN6I_CLK_CTL_CDR1 ( div ) ;
2020-07-06 17:34:35 +03:00
tfr - > effective_speed_hz = mclk_rate / ( 1 < < div ) ;
2014-02-05 17:05:05 +04:00
}
sun6i_spi_write ( sspi , SUN6I_CLK_CTL_REG , reg ) ;
2021-06-14 17:45:07 +03:00
/* Finally enable the bus - doing so before might raise SCK to HIGH */
reg = sun6i_spi_read ( sspi , SUN6I_GBL_CTL_REG ) ;
reg | = SUN6I_GBL_CTL_BUS_ENABLE ;
sun6i_spi_write ( sspi , SUN6I_GBL_CTL_REG , reg ) ;
2014-02-05 17:05:05 +04:00
/* Setup the transfer now... */
if ( sspi - > tx_buf )
tx_len = tfr - > len ;
/* Setup the counters */
2020-07-06 17:34:37 +03:00
sun6i_spi_write ( sspi , SUN6I_BURST_CNT_REG , tfr - > len ) ;
sun6i_spi_write ( sspi , SUN6I_XMIT_CNT_REG , tx_len ) ;
sun6i_spi_write ( sspi , SUN6I_BURST_CTL_CNT_REG , tx_len ) ;
2014-02-05 17:05:05 +04:00
2020-10-22 10:52:21 +03:00
if ( ! use_dma ) {
/* Fill the TX FIFO */
sun6i_spi_fill_fifo ( sspi ) ;
} else {
ret = sun6i_spi_prepare_dma ( sspi , tfr ) ;
if ( ret ) {
dev_warn ( & master - > dev ,
" %s: prepare DMA failed, ret=%d " ,
dev_name ( & spi - > dev ) , ret ) ;
return ret ;
}
}
2014-02-05 17:05:05 +04:00
/* Enable the interrupts */
2020-07-06 17:34:43 +03:00
reg = SUN6I_INT_CTL_TC ;
2020-07-06 17:34:42 +03:00
2020-10-22 10:52:21 +03:00
if ( ! use_dma ) {
if ( rx_len > sspi - > fifo_depth )
reg | = SUN6I_INT_CTL_RF_RDY ;
if ( tx_len > sspi - > fifo_depth )
reg | = SUN6I_INT_CTL_TF_ERQ ;
}
2020-07-06 17:34:42 +03:00
sun6i_spi_write ( sspi , SUN6I_INT_CTL_REG , reg ) ;
2014-02-05 17:05:05 +04:00
/* Start the transfer */
reg = sun6i_spi_read ( sspi , SUN6I_TFR_CTL_REG ) ;
sun6i_spi_write ( sspi , SUN6I_TFR_CTL_REG , reg | SUN6I_TFR_CTL_XCH ) ;
2016-06-13 20:46:49 +03:00
tx_time = max ( tfr - > len * 8 * 2 / ( tfr - > speed_hz / 1000 ) , 100U ) ;
start = jiffies ;
2014-02-05 17:05:05 +04:00
timeout = wait_for_completion_timeout ( & sspi - > done ,
2016-06-13 20:46:49 +03:00
msecs_to_jiffies ( tx_time ) ) ;
end = jiffies ;
2014-02-05 17:05:05 +04:00
if ( ! timeout ) {
2016-06-13 20:46:49 +03:00
dev_warn ( & master - > dev ,
" %s: timeout transferring %u bytes@%iHz for %i(%i)ms " ,
dev_name ( & spi - > dev ) , tfr - > len , tfr - > speed_hz ,
jiffies_to_msecs ( end - start ) , tx_time ) ;
2014-02-05 17:05:05 +04:00
ret = - ETIMEDOUT ;
}
sun6i_spi_write ( sspi , SUN6I_INT_CTL_REG , 0 ) ;
2020-10-22 10:52:21 +03:00
if ( ret & & use_dma ) {
dmaengine_terminate_sync ( master - > dma_rx ) ;
dmaengine_terminate_sync ( master - > dma_tx ) ;
}
2014-02-05 17:05:05 +04:00
return ret ;
}
static irqreturn_t sun6i_spi_handler ( int irq , void * dev_id )
{
struct sun6i_spi * sspi = dev_id ;
u32 status = sun6i_spi_read ( sspi , SUN6I_INT_STA_REG ) ;
/* Transfer complete */
if ( status & SUN6I_INT_CTL_TC ) {
sun6i_spi_write ( sspi , SUN6I_INT_STA_REG , SUN6I_INT_CTL_TC ) ;
2020-07-06 17:34:40 +03:00
sun6i_spi_drain_fifo ( sspi ) ;
2014-02-05 17:05:05 +04:00
complete ( & sspi - > done ) ;
return IRQ_HANDLED ;
}
2017-03-06 15:14:43 +03:00
/* Receive FIFO 3/4 full */
if ( status & SUN6I_INT_CTL_RF_RDY ) {
2020-07-06 17:34:40 +03:00
sun6i_spi_drain_fifo ( sspi ) ;
2017-03-06 15:14:43 +03:00
/* Only clear the interrupt _after_ draining the FIFO */
sun6i_spi_write ( sspi , SUN6I_INT_STA_REG , SUN6I_INT_CTL_RF_RDY ) ;
return IRQ_HANDLED ;
}
/* Transmit FIFO 3/4 empty */
if ( status & SUN6I_INT_CTL_TF_ERQ ) {
2020-07-06 17:34:41 +03:00
sun6i_spi_fill_fifo ( sspi ) ;
2017-03-06 15:14:43 +03:00
if ( ! sspi - > len )
/* nothing left to transmit */
sun6i_spi_disable_interrupt ( sspi , SUN6I_INT_CTL_TF_ERQ ) ;
/* Only clear the interrupt _after_ re-seeding the FIFO */
sun6i_spi_write ( sspi , SUN6I_INT_STA_REG , SUN6I_INT_CTL_TF_ERQ ) ;
return IRQ_HANDLED ;
}
2014-02-05 17:05:05 +04:00
return IRQ_NONE ;
}
static int sun6i_spi_runtime_resume ( struct device * dev )
{
struct spi_master * master = dev_get_drvdata ( dev ) ;
struct sun6i_spi * sspi = spi_master_get_devdata ( master ) ;
int ret ;
ret = clk_prepare_enable ( sspi - > hclk ) ;
if ( ret ) {
dev_err ( dev , " Couldn't enable AHB clock \n " ) ;
goto out ;
}
ret = clk_prepare_enable ( sspi - > mclk ) ;
if ( ret ) {
dev_err ( dev , " Couldn't enable module clock \n " ) ;
goto err ;
}
ret = reset_control_deassert ( sspi - > rstc ) ;
if ( ret ) {
dev_err ( dev , " Couldn't deassert the device from reset \n " ) ;
goto err2 ;
}
sun6i_spi_write ( sspi , SUN6I_GBL_CTL_REG ,
2021-06-14 17:45:07 +03:00
SUN6I_GBL_CTL_MASTER | SUN6I_GBL_CTL_TP ) ;
2014-02-05 17:05:05 +04:00
return 0 ;
err2 :
clk_disable_unprepare ( sspi - > mclk ) ;
err :
clk_disable_unprepare ( sspi - > hclk ) ;
out :
return ret ;
}
static int sun6i_spi_runtime_suspend ( struct device * dev )
{
struct spi_master * master = dev_get_drvdata ( dev ) ;
struct sun6i_spi * sspi = spi_master_get_devdata ( master ) ;
reset_control_assert ( sspi - > rstc ) ;
clk_disable_unprepare ( sspi - > mclk ) ;
clk_disable_unprepare ( sspi - > hclk ) ;
return 0 ;
}
2020-10-22 10:52:21 +03:00
static bool sun6i_spi_can_dma ( struct spi_master * master ,
struct spi_device * spi ,
struct spi_transfer * xfer )
{
struct sun6i_spi * sspi = spi_master_get_devdata ( master ) ;
/*
* If the number of spi words to transfer is less or equal than
* the fifo length we can just fill the fifo and wait for a single
* irq , so don ' t bother setting up dma
*/
return xfer - > len > sspi - > fifo_depth ;
}
2014-02-05 17:05:05 +04:00
static int sun6i_spi_probe ( struct platform_device * pdev )
{
struct spi_master * master ;
struct sun6i_spi * sspi ;
2020-10-22 10:52:21 +03:00
struct resource * mem ;
2014-02-05 17:05:05 +04:00
int ret = 0 , irq ;
master = spi_alloc_master ( & pdev - > dev , sizeof ( struct sun6i_spi ) ) ;
if ( ! master ) {
dev_err ( & pdev - > dev , " Unable to allocate SPI Master \n " ) ;
return - ENOMEM ;
}
platform_set_drvdata ( pdev , master ) ;
sspi = spi_master_get_devdata ( master ) ;
2020-10-22 10:52:21 +03:00
sspi - > base_addr = devm_platform_get_and_ioremap_resource ( pdev , 0 , & mem ) ;
2014-02-05 17:05:05 +04:00
if ( IS_ERR ( sspi - > base_addr ) ) {
ret = PTR_ERR ( sspi - > base_addr ) ;
goto err_free_master ;
}
irq = platform_get_irq ( pdev , 0 ) ;
if ( irq < 0 ) {
ret = - ENXIO ;
goto err_free_master ;
}
ret = devm_request_irq ( & pdev - > dev , irq , sun6i_spi_handler ,
0 , " sun6i-spi " , sspi ) ;
if ( ret ) {
dev_err ( & pdev - > dev , " Cannot request IRQ \n " ) ;
goto err_free_master ;
}
sspi - > master = master ;
2016-10-28 09:54:12 +03:00
sspi - > fifo_depth = ( unsigned long ) of_device_get_match_data ( & pdev - > dev ) ;
2016-06-14 15:10:04 +03:00
master - > max_speed_hz = 100 * 1000 * 1000 ;
master - > min_speed_hz = 3 * 1000 ;
2020-05-11 07:53:30 +03:00
master - > use_gpio_descriptors = true ;
2014-02-05 17:05:05 +04:00
master - > set_cs = sun6i_spi_set_cs ;
master - > transfer_one = sun6i_spi_transfer_one ;
master - > num_chipselect = 4 ;
master - > mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST ;
2014-03-02 18:25:58 +04:00
master - > bits_per_word_mask = SPI_BPW_MASK ( 8 ) ;
2014-02-05 17:05:05 +04:00
master - > dev . of_node = pdev - > dev . of_node ;
master - > auto_runtime_pm = true ;
2016-06-13 20:46:50 +03:00
master - > max_transfer_size = sun6i_spi_max_transfer_size ;
2014-02-05 17:05:05 +04:00
sspi - > hclk = devm_clk_get ( & pdev - > dev , " ahb " ) ;
if ( IS_ERR ( sspi - > hclk ) ) {
dev_err ( & pdev - > dev , " Unable to acquire AHB clock \n " ) ;
ret = PTR_ERR ( sspi - > hclk ) ;
goto err_free_master ;
}
sspi - > mclk = devm_clk_get ( & pdev - > dev , " mod " ) ;
if ( IS_ERR ( sspi - > mclk ) ) {
dev_err ( & pdev - > dev , " Unable to acquire module clock \n " ) ;
ret = PTR_ERR ( sspi - > mclk ) ;
goto err_free_master ;
}
init_completion ( & sspi - > done ) ;
2017-07-19 18:26:21 +03:00
sspi - > rstc = devm_reset_control_get_exclusive ( & pdev - > dev , NULL ) ;
2014-02-05 17:05:05 +04:00
if ( IS_ERR ( sspi - > rstc ) ) {
dev_err ( & pdev - > dev , " Couldn't get reset controller \n " ) ;
ret = PTR_ERR ( sspi - > rstc ) ;
goto err_free_master ;
}
2020-10-22 10:52:21 +03:00
master - > dma_tx = dma_request_chan ( & pdev - > dev , " tx " ) ;
if ( IS_ERR ( master - > dma_tx ) ) {
/* Check tx to see if we need defer probing driver */
if ( PTR_ERR ( master - > dma_tx ) = = - EPROBE_DEFER ) {
ret = - EPROBE_DEFER ;
goto err_free_master ;
}
dev_warn ( & pdev - > dev , " Failed to request TX DMA channel \n " ) ;
master - > dma_tx = NULL ;
}
master - > dma_rx = dma_request_chan ( & pdev - > dev , " rx " ) ;
if ( IS_ERR ( master - > dma_rx ) ) {
if ( PTR_ERR ( master - > dma_rx ) = = - EPROBE_DEFER ) {
ret = - EPROBE_DEFER ;
goto err_free_dma_tx ;
}
dev_warn ( & pdev - > dev , " Failed to request RX DMA channel \n " ) ;
master - > dma_rx = NULL ;
}
if ( master - > dma_tx & & master - > dma_rx ) {
sspi - > dma_addr_tx = mem - > start + SUN6I_TXDATA_REG ;
sspi - > dma_addr_rx = mem - > start + SUN6I_RXDATA_REG ;
master - > can_dma = sun6i_spi_can_dma ;
}
2014-02-05 17:05:05 +04:00
/*
* This wake - up / shutdown pattern is to be able to have the
* device woken up , even if runtime_pm is disabled
*/
ret = sun6i_spi_runtime_resume ( & pdev - > dev ) ;
if ( ret ) {
dev_err ( & pdev - > dev , " Couldn't resume the device \n " ) ;
2020-10-22 10:52:21 +03:00
goto err_free_dma_rx ;
2014-02-05 17:05:05 +04:00
}
2020-10-19 18:03:43 +03:00
pm_runtime_set_autosuspend_delay ( & pdev - > dev , SUN6I_AUTOSUSPEND_TIMEOUT ) ;
pm_runtime_use_autosuspend ( & pdev - > dev ) ;
2014-02-05 17:05:05 +04:00
pm_runtime_set_active ( & pdev - > dev ) ;
pm_runtime_enable ( & pdev - > dev ) ;
ret = devm_spi_register_master ( & pdev - > dev , master ) ;
if ( ret ) {
dev_err ( & pdev - > dev , " cannot register SPI master \n " ) ;
goto err_pm_disable ;
}
return 0 ;
err_pm_disable :
pm_runtime_disable ( & pdev - > dev ) ;
sun6i_spi_runtime_suspend ( & pdev - > dev ) ;
2020-10-22 10:52:21 +03:00
err_free_dma_rx :
if ( master - > dma_rx )
dma_release_channel ( master - > dma_rx ) ;
err_free_dma_tx :
if ( master - > dma_tx )
dma_release_channel ( master - > dma_tx ) ;
2014-02-05 17:05:05 +04:00
err_free_master :
spi_master_put ( master ) ;
return ret ;
}
static int sun6i_spi_remove ( struct platform_device * pdev )
{
2020-10-22 10:52:21 +03:00
struct spi_master * master = platform_get_drvdata ( pdev ) ;
2017-12-07 17:04:53 +03:00
pm_runtime_force_suspend ( & pdev - > dev ) ;
2014-02-05 17:05:05 +04:00
2020-10-22 10:52:21 +03:00
if ( master - > dma_tx )
dma_release_channel ( master - > dma_tx ) ;
if ( master - > dma_rx )
dma_release_channel ( master - > dma_rx ) ;
2014-02-05 17:05:05 +04:00
return 0 ;
}
static const struct of_device_id sun6i_spi_match [ ] = {
2016-10-28 09:54:12 +03:00
{ . compatible = " allwinner,sun6i-a31-spi " , . data = ( void * ) SUN6I_FIFO_DEPTH } ,
{ . compatible = " allwinner,sun8i-h3-spi " , . data = ( void * ) SUN8I_FIFO_DEPTH } ,
2014-02-05 17:05:05 +04:00
{ }
} ;
MODULE_DEVICE_TABLE ( of , sun6i_spi_match ) ;
static const struct dev_pm_ops sun6i_spi_pm_ops = {
. runtime_resume = sun6i_spi_runtime_resume ,
. runtime_suspend = sun6i_spi_runtime_suspend ,
} ;
static struct platform_driver sun6i_spi_driver = {
. probe = sun6i_spi_probe ,
. remove = sun6i_spi_remove ,
. driver = {
. name = " sun6i-spi " ,
. of_match_table = sun6i_spi_match ,
. pm = & sun6i_spi_pm_ops ,
} ,
} ;
module_platform_driver ( sun6i_spi_driver ) ;
MODULE_AUTHOR ( " Pan Nan <pannan@allwinnertech.com> " ) ;
MODULE_AUTHOR ( " Maxime Ripard <maxime.ripard@free-electrons.com> " ) ;
MODULE_DESCRIPTION ( " Allwinner A31 SPI controller driver " ) ;
MODULE_LICENSE ( " GPL " ) ;