2011-06-06 11:16:30 +04:00
/*
2009-11-30 10:39:42 +03:00
* Copyright ( C ) 2009 Samsung Electronics Ltd .
* Jaswinder Singh < jassi . brar @ samsung . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*/
# include <linux/init.h>
# include <linux/module.h>
2011-11-10 14:57:32 +04:00
# include <linux/interrupt.h>
2009-11-30 10:39:42 +03:00
# include <linux/delay.h>
# include <linux/clk.h>
# include <linux/dma-mapping.h>
2013-04-12 00:42:03 +04:00
# include <linux/dmaengine.h>
2009-11-30 10:39:42 +03:00
# include <linux/platform_device.h>
2011-12-04 04:58:06 +04:00
# include <linux/pm_runtime.h>
2009-11-30 10:39:42 +03:00
# include <linux/spi/spi.h>
2012-07-13 02:15:14 +04:00
# include <linux/gpio.h>
2012-07-13 02:15:15 +04:00
# include <linux/of.h>
# include <linux/of_gpio.h>
2009-11-30 10:39:42 +03:00
2012-08-24 17:22:12 +04:00
# include <linux/platform_data/spi-s3c64xx.h>
2009-11-30 10:39:42 +03:00
2014-11-06 12:51:49 +03:00
# define MAX_SPI_PORTS 6
2013-05-20 10:51:32 +04:00
# define S3C64XX_SPI_QUIRK_POLL (1 << 0)
2014-11-06 12:51:49 +03:00
# define S3C64XX_SPI_QUIRK_CS_AUTO (1 << 1)
2012-07-13 02:15:14 +04:00
2009-11-30 10:39:42 +03:00
/* Registers and bit-fields */
# define S3C64XX_SPI_CH_CFG 0x00
# define S3C64XX_SPI_CLK_CFG 0x04
# define S3C64XX_SPI_MODE_CFG 0x08
# define S3C64XX_SPI_SLAVE_SEL 0x0C
# define S3C64XX_SPI_INT_EN 0x10
# define S3C64XX_SPI_STATUS 0x14
# define S3C64XX_SPI_TX_DATA 0x18
# define S3C64XX_SPI_RX_DATA 0x1C
# define S3C64XX_SPI_PACKET_CNT 0x20
# define S3C64XX_SPI_PENDING_CLR 0x24
# define S3C64XX_SPI_SWAP_CFG 0x28
# define S3C64XX_SPI_FB_CLK 0x2C
# define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */
# define S3C64XX_SPI_CH_SW_RST (1<<5)
# define S3C64XX_SPI_CH_SLAVE (1<<4)
# define S3C64XX_SPI_CPOL_L (1<<3)
# define S3C64XX_SPI_CPHA_B (1<<2)
# define S3C64XX_SPI_CH_RXCH_ON (1<<1)
# define S3C64XX_SPI_CH_TXCH_ON (1<<0)
# define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
# define S3C64XX_SPI_CLKSEL_SRCSHFT 9
# define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
2013-01-31 10:25:01 +04:00
# define S3C64XX_SPI_PSR_MASK 0xff
2009-11-30 10:39:42 +03:00
# define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
# define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
# define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29)
# define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29)
# define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17)
# define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
# define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
# define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
# define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
# define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
# define S3C64XX_SPI_MODE_4BURST (1<<0)
# define S3C64XX_SPI_SLAVE_AUTO (1<<1)
# define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0)
2014-11-06 12:51:49 +03:00
# define S3C64XX_SPI_SLAVE_NSC_CNT_2 (2<<4)
2009-11-30 10:39:42 +03:00
# define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
# define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
# define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
# define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3)
# define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2)
# define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
# define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
# define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
# define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
# define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
# define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2)
# define S3C64XX_SPI_ST_RX_FIFORDY (1<<1)
# define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
# define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
# define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
# define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
# define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2)
# define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1)
# define S3C64XX_SPI_PND_TRAILING_CLR (1<<0)
# define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7)
# define S3C64XX_SPI_SWAP_RX_BYTE (1<<6)
# define S3C64XX_SPI_SWAP_RX_BIT (1<<5)
# define S3C64XX_SPI_SWAP_RX_EN (1<<4)
# define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3)
# define S3C64XX_SPI_SWAP_TX_BYTE (1<<2)
# define S3C64XX_SPI_SWAP_TX_BIT (1<<1)
# define S3C64XX_SPI_SWAP_TX_EN (1<<0)
# define S3C64XX_SPI_FBCLK_MSK (3<<0)
2012-07-13 02:15:14 +04:00
# define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
# define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
( 1 < < ( i ) - > port_conf - > tx_st_done ) ) ? 1 : 0 )
# define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
# define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
FIFO_LVL_MASK ( i ) )
2009-11-30 10:39:42 +03:00
# define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
# define S3C64XX_SPI_TRAILCNT_OFF 19
# define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
# define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
2013-05-20 10:51:32 +04:00
# define is_polling(x) (x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL)
2009-11-30 10:39:42 +03:00
# define RXBUSY (1<<2)
# define TXBUSY (1<<3)
2011-09-02 04:44:42 +04:00
struct s3c64xx_spi_dma_data {
2013-04-12 00:42:03 +04:00
struct dma_chan * ch ;
2012-04-30 20:31:27 +04:00
enum dma_transfer_direction direction ;
2013-04-12 00:42:03 +04:00
unsigned int dmach ;
2011-09-02 04:44:42 +04:00
} ;
2012-07-13 02:15:14 +04:00
/**
* struct s3c64xx_spi_info - SPI Controller hardware info
* @ fifo_lvl_mask : Bit - mask for { TX | RX } _FIFO_LVL bits in SPI_STATUS register .
* @ rx_lvl_offset : Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter .
* @ tx_st_done : Bit offset of TX_DONE bit in SPI_STATUS regiter .
* @ high_speed : True , if the controller supports HIGH_SPEED_EN bit .
* @ clk_from_cmu : True , if the controller does not include a clock mux and
* prescaler unit .
*
* The Samsung s3c64xx SPI controller are used on various Samsung SoC ' s but
* differ in some aspects such as the size of the fifo and spi bus clock
* setup . Such differences are specified to the driver using this structure
* which is provided as driver data to the driver .
*/
struct s3c64xx_spi_port_config {
int fifo_lvl_mask [ MAX_SPI_PORTS ] ;
int rx_lvl_offset ;
int tx_st_done ;
2013-05-20 10:51:32 +04:00
int quirks ;
2012-07-13 02:15:14 +04:00
bool high_speed ;
bool clk_from_cmu ;
} ;
2009-11-30 10:39:42 +03:00
/**
* struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver .
* @ clk : Pointer to the spi clock .
2010-01-20 23:49:44 +03:00
* @ src_clk : Pointer to the clock used to generate SPI signals .
2009-11-30 10:39:42 +03:00
* @ master : Pointer to the SPI Protocol master .
* @ cntrlr_info : Platform specific data for the controller this driver manages .
* @ tgl_spi : Pointer to the last CS left untoggled by the cs_change hint .
* @ lock : Controller specific lock .
* @ state : Set of FLAGS to indicate status .
* @ rx_dmach : Controller ' s DMA channel for Rx .
* @ tx_dmach : Controller ' s DMA channel for Tx .
* @ sfr_start : BUS address of SPI controller regs .
* @ regs : Pointer to ioremap ' ed controller registers .
2011-11-10 14:57:32 +04:00
* @ irq : interrupt
2009-11-30 10:39:42 +03:00
* @ xfer_completion : To indicate completion of xfer task .
* @ cur_mode : Stores the active configuration of the controller .
* @ cur_bpw : Stores the active bits per word settings .
* @ cur_speed : Stores the active xfer clock speed .
*/
struct s3c64xx_spi_driver_data {
void __iomem * regs ;
struct clk * clk ;
2010-01-20 23:49:44 +03:00
struct clk * src_clk ;
2009-11-30 10:39:42 +03:00
struct platform_device * pdev ;
struct spi_master * master ;
2010-01-20 23:49:44 +03:00
struct s3c64xx_spi_info * cntrlr_info ;
2009-11-30 10:39:42 +03:00
struct spi_device * tgl_spi ;
spinlock_t lock ;
unsigned long sfr_start ;
struct completion xfer_completion ;
unsigned state ;
unsigned cur_mode , cur_bpw ;
unsigned cur_speed ;
2011-09-02 04:44:42 +04:00
struct s3c64xx_spi_dma_data rx_dma ;
struct s3c64xx_spi_dma_data tx_dma ;
2012-07-13 02:15:14 +04:00
struct s3c64xx_spi_port_config * port_conf ;
unsigned int port_id ;
2009-11-30 10:39:42 +03:00
} ;
static void flush_fifo ( struct s3c64xx_spi_driver_data * sdd )
{
void __iomem * regs = sdd - > regs ;
unsigned long loops ;
u32 val ;
writel ( 0 , regs + S3C64XX_SPI_PACKET_CNT ) ;
2012-05-23 16:29:51 +04:00
val = readl ( regs + S3C64XX_SPI_CH_CFG ) ;
val & = ~ ( S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON ) ;
writel ( val , regs + S3C64XX_SPI_CH_CFG ) ;
2009-11-30 10:39:42 +03:00
val = readl ( regs + S3C64XX_SPI_CH_CFG ) ;
val | = S3C64XX_SPI_CH_SW_RST ;
val & = ~ S3C64XX_SPI_CH_HS_EN ;
writel ( val , regs + S3C64XX_SPI_CH_CFG ) ;
/* Flush TxFIFO*/
loops = msecs_to_loops ( 1 ) ;
do {
val = readl ( regs + S3C64XX_SPI_STATUS ) ;
2012-07-13 02:15:14 +04:00
} while ( TX_FIFO_LVL ( val , sdd ) & & loops - - ) ;
2009-11-30 10:39:42 +03:00
2010-08-23 20:40:56 +04:00
if ( loops = = 0 )
dev_warn ( & sdd - > pdev - > dev , " Timed out flushing TX FIFO \n " ) ;
2009-11-30 10:39:42 +03:00
/* Flush RxFIFO*/
loops = msecs_to_loops ( 1 ) ;
do {
val = readl ( regs + S3C64XX_SPI_STATUS ) ;
2012-07-13 02:15:14 +04:00
if ( RX_FIFO_LVL ( val , sdd ) )
2009-11-30 10:39:42 +03:00
readl ( regs + S3C64XX_SPI_RX_DATA ) ;
else
break ;
} while ( loops - - ) ;
2010-08-23 20:40:56 +04:00
if ( loops = = 0 )
dev_warn ( & sdd - > pdev - > dev , " Timed out flushing RX FIFO \n " ) ;
2009-11-30 10:39:42 +03:00
val = readl ( regs + S3C64XX_SPI_CH_CFG ) ;
val & = ~ S3C64XX_SPI_CH_SW_RST ;
writel ( val , regs + S3C64XX_SPI_CH_CFG ) ;
val = readl ( regs + S3C64XX_SPI_MODE_CFG ) ;
val & = ~ ( S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON ) ;
writel ( val , regs + S3C64XX_SPI_MODE_CFG ) ;
}
2011-09-02 04:44:42 +04:00
static void s3c64xx_spi_dmacb ( void * data )
2011-09-02 04:44:41 +04:00
{
2011-09-02 04:44:42 +04:00
struct s3c64xx_spi_driver_data * sdd ;
struct s3c64xx_spi_dma_data * dma = data ;
2011-09-02 04:44:41 +04:00
unsigned long flags ;
2012-03-10 04:48:46 +04:00
if ( dma - > direction = = DMA_DEV_TO_MEM )
2011-09-02 04:44:42 +04:00
sdd = container_of ( data ,
struct s3c64xx_spi_driver_data , rx_dma ) ;
else
sdd = container_of ( data ,
struct s3c64xx_spi_driver_data , tx_dma ) ;
2011-09-02 04:44:41 +04:00
spin_lock_irqsave ( & sdd - > lock , flags ) ;
2012-03-10 04:48:46 +04:00
if ( dma - > direction = = DMA_DEV_TO_MEM ) {
2011-09-02 04:44:42 +04:00
sdd - > state & = ~ RXBUSY ;
if ( ! ( sdd - > state & TXBUSY ) )
complete ( & sdd - > xfer_completion ) ;
} else {
sdd - > state & = ~ TXBUSY ;
if ( ! ( sdd - > state & RXBUSY ) )
complete ( & sdd - > xfer_completion ) ;
}
2011-09-02 04:44:41 +04:00
spin_unlock_irqrestore ( & sdd - > lock , flags ) ;
}
2013-04-12 00:42:03 +04:00
static void prepare_dma ( struct s3c64xx_spi_dma_data * dma ,
2014-02-02 17:47:47 +04:00
struct sg_table * sgt )
2013-04-12 00:42:03 +04:00
{
struct s3c64xx_spi_driver_data * sdd ;
struct dma_slave_config config ;
struct dma_async_tx_descriptor * desc ;
2013-08-11 04:33:28 +04:00
memset ( & config , 0 , sizeof ( config ) ) ;
2013-04-12 00:42:03 +04:00
if ( dma - > direction = = DMA_DEV_TO_MEM ) {
sdd = container_of ( ( void * ) dma ,
struct s3c64xx_spi_driver_data , rx_dma ) ;
config . direction = dma - > direction ;
config . src_addr = sdd - > sfr_start + S3C64XX_SPI_RX_DATA ;
config . src_addr_width = sdd - > cur_bpw / 8 ;
config . src_maxburst = 1 ;
dmaengine_slave_config ( dma - > ch , & config ) ;
} else {
sdd = container_of ( ( void * ) dma ,
struct s3c64xx_spi_driver_data , tx_dma ) ;
config . direction = dma - > direction ;
config . dst_addr = sdd - > sfr_start + S3C64XX_SPI_TX_DATA ;
config . dst_addr_width = sdd - > cur_bpw / 8 ;
config . dst_maxburst = 1 ;
dmaengine_slave_config ( dma - > ch , & config ) ;
}
2014-02-02 17:47:47 +04:00
desc = dmaengine_prep_slave_sg ( dma - > ch , sgt - > sgl , sgt - > nents ,
dma - > direction , DMA_PREP_INTERRUPT ) ;
2013-04-12 00:42:03 +04:00
desc - > callback = s3c64xx_spi_dmacb ;
desc - > callback_param = dma ;
dmaengine_submit ( desc ) ;
dma_async_issue_pending ( dma - > ch ) ;
}
static int s3c64xx_spi_prepare_transfer ( struct spi_master * spi )
{
struct s3c64xx_spi_driver_data * sdd = spi_master_get_devdata ( spi ) ;
dma_filter_fn filter = sdd - > cntrlr_info - > filter ;
struct device * dev = & sdd - > pdev - > dev ;
dma_cap_mask_t mask ;
2013-04-18 21:12:00 +04:00
int ret ;
2013-04-12 00:42:03 +04:00
2013-08-13 22:03:01 +04:00
if ( ! is_polling ( sdd ) ) {
dma_cap_zero ( mask ) ;
dma_cap_set ( DMA_SLAVE , mask ) ;
/* Acquire DMA channels */
sdd - > rx_dma . ch = dma_request_slave_channel_compat ( mask , filter ,
2015-02-23 15:30:46 +03:00
( void * ) ( long ) sdd - > rx_dma . dmach , dev , " rx " ) ;
2013-08-13 22:03:01 +04:00
if ( ! sdd - > rx_dma . ch ) {
dev_err ( dev , " Failed to get RX DMA channel \n " ) ;
ret = - EBUSY ;
goto out ;
}
2014-01-16 16:25:46 +04:00
spi - > dma_rx = sdd - > rx_dma . ch ;
2013-04-18 21:12:00 +04:00
2013-08-13 22:03:01 +04:00
sdd - > tx_dma . ch = dma_request_slave_channel_compat ( mask , filter ,
2015-02-23 15:30:46 +03:00
( void * ) ( long ) sdd - > tx_dma . dmach , dev , " tx " ) ;
2013-08-13 22:03:01 +04:00
if ( ! sdd - > tx_dma . ch ) {
dev_err ( dev , " Failed to get TX DMA channel \n " ) ;
ret = - EBUSY ;
goto out_rx ;
}
2014-01-16 16:25:46 +04:00
spi - > dma_tx = sdd - > tx_dma . ch ;
2013-04-18 21:12:00 +04:00
}
2013-04-12 00:42:03 +04:00
return 0 ;
2013-04-18 21:12:00 +04:00
out_rx :
dma_release_channel ( sdd - > rx_dma . ch ) ;
out :
return ret ;
2013-04-12 00:42:03 +04:00
}
static int s3c64xx_spi_unprepare_transfer ( struct spi_master * spi )
{
struct s3c64xx_spi_driver_data * sdd = spi_master_get_devdata ( spi ) ;
/* Free DMA channels */
2013-05-20 10:51:32 +04:00
if ( ! is_polling ( sdd ) ) {
dma_release_channel ( sdd - > rx_dma . ch ) ;
dma_release_channel ( sdd - > tx_dma . ch ) ;
}
2013-04-12 00:42:03 +04:00
return 0 ;
}
2014-01-16 16:25:46 +04:00
static bool s3c64xx_spi_can_dma ( struct spi_master * master ,
struct spi_device * spi ,
struct spi_transfer * xfer )
{
struct s3c64xx_spi_driver_data * sdd = spi_master_get_devdata ( master ) ;
return xfer - > len > ( FIFO_LVL_MASK ( sdd ) > > 1 ) + 1 ;
}
2009-11-30 10:39:42 +03:00
static void enable_datapath ( struct s3c64xx_spi_driver_data * sdd ,
struct spi_device * spi ,
struct spi_transfer * xfer , int dma_mode )
{
void __iomem * regs = sdd - > regs ;
u32 modecfg , chcfg ;
modecfg = readl ( regs + S3C64XX_SPI_MODE_CFG ) ;
modecfg & = ~ ( S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON ) ;
chcfg = readl ( regs + S3C64XX_SPI_CH_CFG ) ;
chcfg & = ~ S3C64XX_SPI_CH_TXCH_ON ;
if ( dma_mode ) {
chcfg & = ~ S3C64XX_SPI_CH_RXCH_ON ;
} else {
/* Always shift in data in FIFO, even if xfer is Tx only,
* this helps setting PCKT_CNT value for generating clocks
* as exactly needed .
*/
chcfg | = S3C64XX_SPI_CH_RXCH_ON ;
writel ( ( ( xfer - > len * 8 / sdd - > cur_bpw ) & 0xffff )
| S3C64XX_SPI_PACKET_CNT_EN ,
regs + S3C64XX_SPI_PACKET_CNT ) ;
}
if ( xfer - > tx_buf ! = NULL ) {
sdd - > state | = TXBUSY ;
chcfg | = S3C64XX_SPI_CH_TXCH_ON ;
if ( dma_mode ) {
modecfg | = S3C64XX_SPI_MODE_TXDMA_ON ;
2014-02-02 17:47:47 +04:00
prepare_dma ( & sdd - > tx_dma , & xfer - > tx_sg ) ;
2009-11-30 10:39:42 +03:00
} else {
2010-09-29 12:31:33 +04:00
switch ( sdd - > cur_bpw ) {
case 32 :
iowrite32_rep ( regs + S3C64XX_SPI_TX_DATA ,
xfer - > tx_buf , xfer - > len / 4 ) ;
break ;
case 16 :
iowrite16_rep ( regs + S3C64XX_SPI_TX_DATA ,
xfer - > tx_buf , xfer - > len / 2 ) ;
break ;
default :
iowrite8_rep ( regs + S3C64XX_SPI_TX_DATA ,
xfer - > tx_buf , xfer - > len ) ;
break ;
}
2009-11-30 10:39:42 +03:00
}
}
if ( xfer - > rx_buf ! = NULL ) {
sdd - > state | = RXBUSY ;
2012-07-13 02:15:14 +04:00
if ( sdd - > port_conf - > high_speed & & sdd - > cur_speed > = 30000000UL
2009-11-30 10:39:42 +03:00
& & ! ( sdd - > cur_mode & SPI_CPHA ) )
chcfg | = S3C64XX_SPI_CH_HS_EN ;
if ( dma_mode ) {
modecfg | = S3C64XX_SPI_MODE_RXDMA_ON ;
chcfg | = S3C64XX_SPI_CH_RXCH_ON ;
writel ( ( ( xfer - > len * 8 / sdd - > cur_bpw ) & 0xffff )
| S3C64XX_SPI_PACKET_CNT_EN ,
regs + S3C64XX_SPI_PACKET_CNT ) ;
2014-02-02 17:47:47 +04:00
prepare_dma ( & sdd - > rx_dma , & xfer - > rx_sg ) ;
2009-11-30 10:39:42 +03:00
}
}
writel ( modecfg , regs + S3C64XX_SPI_MODE_CFG ) ;
writel ( chcfg , regs + S3C64XX_SPI_CH_CFG ) ;
}
2013-06-19 22:12:39 +04:00
static u32 s3c64xx_spi_wait_for_timeout ( struct s3c64xx_spi_driver_data * sdd ,
2013-05-20 10:51:32 +04:00
int timeout_ms )
{
void __iomem * regs = sdd - > regs ;
unsigned long val = 1 ;
u32 status ;
/* max fifo depth available */
u32 max_fifo = ( FIFO_LVL_MASK ( sdd ) > > 1 ) + 1 ;
if ( timeout_ms )
val = msecs_to_loops ( timeout_ms ) ;
do {
status = readl ( regs + S3C64XX_SPI_STATUS ) ;
} while ( RX_FIFO_LVL ( status , sdd ) < max_fifo & & - - val ) ;
/* return the actual received data length */
return RX_FIFO_LVL ( status , sdd ) ;
2009-11-30 10:39:42 +03:00
}
2014-01-25 00:05:43 +04:00
static int wait_for_dma ( struct s3c64xx_spi_driver_data * sdd ,
struct spi_transfer * xfer )
2009-11-30 10:39:42 +03:00
{
void __iomem * regs = sdd - > regs ;
unsigned long val ;
2014-01-25 00:05:43 +04:00
u32 status ;
2009-11-30 10:39:42 +03:00
int ms ;
/* millisecs to xfer 'len' bytes @ 'cur_speed' */
ms = xfer - > len * 8 * 1000 / sdd - > cur_speed ;
2010-09-07 19:37:52 +04:00
ms + = 10 ; /* some tolerance */
2009-11-30 10:39:42 +03:00
2014-01-25 00:05:43 +04:00
val = msecs_to_jiffies ( ms ) + 10 ;
val = wait_for_completion_timeout ( & sdd - > xfer_completion , val ) ;
/*
* If the previous xfer was completed within timeout , then
* proceed further else return - EIO .
* DmaTx returns after simply writing data in the FIFO ,
* w / o waiting for real transmission on the bus to finish .
* DmaRx returns only after Dma read data from FIFO which
* needs bus transmission to finish , so we don ' t worry if
* Xfer involved Rx ( with or without Tx ) .
*/
if ( val & & ! xfer - > rx_buf ) {
val = msecs_to_loops ( 10 ) ;
status = readl ( regs + S3C64XX_SPI_STATUS ) ;
while ( ( TX_FIFO_LVL ( status , sdd )
| | ! S3C64XX_SPI_ST_TX_DONE ( status , sdd ) )
& & - - val ) {
cpu_relax ( ) ;
2010-09-03 05:36:46 +04:00
status = readl ( regs + S3C64XX_SPI_STATUS ) ;
2014-01-25 00:05:43 +04:00
}
2009-11-30 10:39:42 +03:00
}
2014-01-25 00:05:43 +04:00
/* If timed out while checking rx/tx status return error */
if ( ! val )
return - EIO ;
2009-11-30 10:39:42 +03:00
2014-01-25 00:05:43 +04:00
return 0 ;
}
2013-05-20 10:51:32 +04:00
2014-01-25 00:05:43 +04:00
static int wait_for_pio ( struct s3c64xx_spi_driver_data * sdd ,
struct spi_transfer * xfer )
{
void __iomem * regs = sdd - > regs ;
unsigned long val ;
u32 status ;
int loops ;
u32 cpy_len ;
u8 * buf ;
int ms ;
2009-11-30 10:39:42 +03:00
2014-01-25 00:05:43 +04:00
/* millisecs to xfer 'len' bytes @ 'cur_speed' */
ms = xfer - > len * 8 * 1000 / sdd - > cur_speed ;
ms + = 10 ; /* some tolerance */
2013-05-20 10:51:32 +04:00
2014-01-25 00:05:43 +04:00
val = msecs_to_loops ( ms ) ;
do {
status = readl ( regs + S3C64XX_SPI_STATUS ) ;
} while ( RX_FIFO_LVL ( status , sdd ) < xfer - > len & & - - val ) ;
2013-05-20 10:51:32 +04:00
2014-01-25 00:05:43 +04:00
/* If it was only Tx */
if ( ! xfer - > rx_buf ) {
sdd - > state & = ~ TXBUSY ;
return 0 ;
2009-11-30 10:39:42 +03:00
}
2014-01-25 00:05:43 +04:00
/*
* If the receive length is bigger than the controller fifo
* size , calculate the loops and read the fifo as many times .
* loops = length / max fifo size ( calculated by using the
* fifo mask ) .
* For any size less than the fifo size the below code is
* executed atleast once .
*/
loops = xfer - > len / ( ( FIFO_LVL_MASK ( sdd ) > > 1 ) + 1 ) ;
buf = xfer - > rx_buf ;
do {
/* wait for data to be received in the fifo */
cpy_len = s3c64xx_spi_wait_for_timeout ( sdd ,
( loops ? ms : 0 ) ) ;
switch ( sdd - > cur_bpw ) {
case 32 :
ioread32_rep ( regs + S3C64XX_SPI_RX_DATA ,
buf , cpy_len / 4 ) ;
break ;
case 16 :
ioread16_rep ( regs + S3C64XX_SPI_RX_DATA ,
buf , cpy_len / 2 ) ;
break ;
default :
ioread8_rep ( regs + S3C64XX_SPI_RX_DATA ,
buf , cpy_len ) ;
break ;
}
buf = buf + cpy_len ;
} while ( loops - - ) ;
sdd - > state & = ~ RXBUSY ;
2009-11-30 10:39:42 +03:00
return 0 ;
}
static void s3c64xx_spi_config ( struct s3c64xx_spi_driver_data * sdd )
{
void __iomem * regs = sdd - > regs ;
u32 val ;
/* Disable Clock */
2012-07-13 02:15:14 +04:00
if ( sdd - > port_conf - > clk_from_cmu ) {
2012-10-03 03:30:12 +04:00
clk_disable_unprepare ( sdd - > src_clk ) ;
2010-09-29 12:31:33 +04:00
} else {
val = readl ( regs + S3C64XX_SPI_CLK_CFG ) ;
val & = ~ S3C64XX_SPI_ENCLK_ENABLE ;
writel ( val , regs + S3C64XX_SPI_CLK_CFG ) ;
}
2009-11-30 10:39:42 +03:00
/* Set Polarity and Phase */
val = readl ( regs + S3C64XX_SPI_CH_CFG ) ;
val & = ~ ( S3C64XX_SPI_CH_SLAVE |
S3C64XX_SPI_CPOL_L |
S3C64XX_SPI_CPHA_B ) ;
if ( sdd - > cur_mode & SPI_CPOL )
val | = S3C64XX_SPI_CPOL_L ;
if ( sdd - > cur_mode & SPI_CPHA )
val | = S3C64XX_SPI_CPHA_B ;
writel ( val , regs + S3C64XX_SPI_CH_CFG ) ;
/* Set Channel & DMA Mode */
val = readl ( regs + S3C64XX_SPI_MODE_CFG ) ;
val & = ~ ( S3C64XX_SPI_MODE_BUS_TSZ_MASK
| S3C64XX_SPI_MODE_CH_TSZ_MASK ) ;
switch ( sdd - > cur_bpw ) {
case 32 :
val | = S3C64XX_SPI_MODE_BUS_TSZ_WORD ;
2010-09-29 12:31:33 +04:00
val | = S3C64XX_SPI_MODE_CH_TSZ_WORD ;
2009-11-30 10:39:42 +03:00
break ;
case 16 :
val | = S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD ;
2010-09-29 12:31:33 +04:00
val | = S3C64XX_SPI_MODE_CH_TSZ_HALFWORD ;
2009-11-30 10:39:42 +03:00
break ;
default :
val | = S3C64XX_SPI_MODE_BUS_TSZ_BYTE ;
2010-09-29 12:31:33 +04:00
val | = S3C64XX_SPI_MODE_CH_TSZ_BYTE ;
2009-11-30 10:39:42 +03:00
break ;
}
writel ( val , regs + S3C64XX_SPI_MODE_CFG ) ;
2012-07-13 02:15:14 +04:00
if ( sdd - > port_conf - > clk_from_cmu ) {
2010-09-29 12:31:33 +04:00
/* Configure Clock */
/* There is half-multiplier before the SPI */
clk_set_rate ( sdd - > src_clk , sdd - > cur_speed * 2 ) ;
/* Enable Clock */
2012-10-03 03:30:12 +04:00
clk_prepare_enable ( sdd - > src_clk ) ;
2010-09-29 12:31:33 +04:00
} else {
/* Configure Clock */
val = readl ( regs + S3C64XX_SPI_CLK_CFG ) ;
val & = ~ S3C64XX_SPI_PSR_MASK ;
val | = ( ( clk_get_rate ( sdd - > src_clk ) / sdd - > cur_speed / 2 - 1 )
& S3C64XX_SPI_PSR_MASK ) ;
writel ( val , regs + S3C64XX_SPI_CLK_CFG ) ;
/* Enable Clock */
val = readl ( regs + S3C64XX_SPI_CLK_CFG ) ;
val | = S3C64XX_SPI_ENCLK_ENABLE ;
writel ( val , regs + S3C64XX_SPI_CLK_CFG ) ;
}
2009-11-30 10:39:42 +03:00
}
# define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
2013-10-05 03:42:58 +04:00
static int s3c64xx_spi_prepare_message ( struct spi_master * master ,
struct spi_message * msg )
2009-11-30 10:39:42 +03:00
{
2012-02-16 02:48:32 +04:00
struct s3c64xx_spi_driver_data * sdd = spi_master_get_devdata ( master ) ;
2009-11-30 10:39:42 +03:00
struct spi_device * spi = msg - > spi ;
struct s3c64xx_spi_csinfo * cs = spi - > controller_data ;
/* If Master's(controller) state differs from that needed by Slave */
if ( sdd - > cur_speed ! = spi - > max_speed_hz
| | sdd - > cur_mode ! = spi - > mode
| | sdd - > cur_bpw ! = spi - > bits_per_word ) {
sdd - > cur_bpw = spi - > bits_per_word ;
sdd - > cur_speed = spi - > max_speed_hz ;
sdd - > cur_mode = spi - > mode ;
s3c64xx_spi_config ( sdd ) ;
}
/* Configure feedback delay */
writel ( cs - > fb_delay & 0x3 , sdd - > regs + S3C64XX_SPI_FB_CLK ) ;
2013-10-05 03:42:58 +04:00
return 0 ;
}
2010-09-29 12:31:33 +04:00
2013-10-05 14:51:14 +04:00
static int s3c64xx_spi_transfer_one ( struct spi_master * master ,
struct spi_device * spi ,
struct spi_transfer * xfer )
2013-10-05 03:42:58 +04:00
{
struct s3c64xx_spi_driver_data * sdd = spi_master_get_devdata ( master ) ;
2013-10-05 14:51:14 +04:00
int status ;
2013-10-05 03:42:58 +04:00
u32 speed ;
u8 bpw ;
2013-10-05 14:51:14 +04:00
unsigned long flags ;
int use_dma ;
2009-11-30 10:39:42 +03:00
2014-01-12 17:07:50 +04:00
reinit_completion ( & sdd - > xfer_completion ) ;
2009-11-30 10:39:42 +03:00
2013-10-05 14:51:14 +04:00
/* Only BPW and Speed may change across transfers */
bpw = xfer - > bits_per_word ;
speed = xfer - > speed_hz ? : spi - > max_speed_hz ;
2009-11-30 10:39:42 +03:00
2013-10-05 14:51:14 +04:00
if ( bpw ! = sdd - > cur_bpw | | speed ! = sdd - > cur_speed ) {
sdd - > cur_bpw = bpw ;
sdd - > cur_speed = speed ;
s3c64xx_spi_config ( sdd ) ;
}
2009-11-30 10:39:42 +03:00
2013-10-05 14:51:14 +04:00
/* Polling method for xfers not bigger than FIFO capacity */
use_dma = 0 ;
if ( ! is_polling ( sdd ) & &
( sdd - > rx_dma . ch & & sdd - > tx_dma . ch & &
( xfer - > len > ( ( FIFO_LVL_MASK ( sdd ) > > 1 ) + 1 ) ) ) )
use_dma = 1 ;
2009-11-30 10:39:42 +03:00
2013-10-05 14:51:14 +04:00
spin_lock_irqsave ( & sdd - > lock , flags ) ;
2009-11-30 10:39:42 +03:00
2013-10-05 14:51:14 +04:00
/* Pending only which is to be done */
sdd - > state & = ~ RXBUSY ;
sdd - > state & = ~ TXBUSY ;
2009-11-30 10:39:42 +03:00
2013-10-05 14:51:14 +04:00
enable_datapath ( sdd , spi , xfer , use_dma ) ;
2009-11-30 10:39:42 +03:00
2013-10-05 14:51:14 +04:00
/* Start the signals */
2014-11-06 12:51:49 +03:00
if ( ! ( sdd - > port_conf - > quirks & S3C64XX_SPI_QUIRK_CS_AUTO ) )
writel ( 0 , sdd - > regs + S3C64XX_SPI_SLAVE_SEL ) ;
else
writel ( readl ( sdd - > regs + S3C64XX_SPI_SLAVE_SEL )
| S3C64XX_SPI_SLAVE_AUTO | S3C64XX_SPI_SLAVE_NSC_CNT_2 ,
sdd - > regs + S3C64XX_SPI_SLAVE_SEL ) ;
2009-11-30 10:39:42 +03:00
2013-10-05 14:51:14 +04:00
spin_unlock_irqrestore ( & sdd - > lock , flags ) ;
2009-11-30 10:39:42 +03:00
2014-01-25 00:05:43 +04:00
if ( use_dma )
status = wait_for_dma ( sdd , xfer ) ;
else
status = wait_for_pio ( sdd , xfer ) ;
2013-10-05 14:51:14 +04:00
if ( status ) {
dev_err ( & spi - > dev , " I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d \n " ,
xfer - > rx_buf ? 1 : 0 , xfer - > tx_buf ? 1 : 0 ,
( sdd - > state & RXBUSY ) ? ' f ' : ' p ' ,
( sdd - > state & TXBUSY ) ? ' f ' : ' p ' ,
xfer - > len ) ;
if ( use_dma ) {
if ( xfer - > tx_buf ! = NULL
& & ( sdd - > state & TXBUSY ) )
2014-02-07 16:39:22 +04:00
dmaengine_terminate_all ( sdd - > tx_dma . ch ) ;
2013-10-05 14:51:14 +04:00
if ( xfer - > rx_buf ! = NULL
& & ( sdd - > state & RXBUSY ) )
2014-02-07 16:39:22 +04:00
dmaengine_terminate_all ( sdd - > rx_dma . ch ) ;
2009-11-30 10:39:42 +03:00
}
2013-09-27 22:56:31 +04:00
} else {
2009-11-30 10:39:42 +03:00
flush_fifo ( sdd ) ;
}
2013-10-05 14:51:14 +04:00
return status ;
2009-11-30 10:39:42 +03:00
}
2012-07-13 02:15:15 +04:00
static struct s3c64xx_spi_csinfo * s3c64xx_get_slave_ctrldata (
struct spi_device * spi )
{
struct s3c64xx_spi_csinfo * cs ;
2012-08-04 15:18:20 +04:00
struct device_node * slave_np , * data_np = NULL ;
2012-07-13 02:15:15 +04:00
u32 fb_delay = 0 ;
slave_np = spi - > dev . of_node ;
if ( ! slave_np ) {
dev_err ( & spi - > dev , " device node not found \n " ) ;
return ERR_PTR ( - EINVAL ) ;
}
2012-09-18 11:10:49 +04:00
data_np = of_get_child_by_name ( slave_np , " controller-data " ) ;
2012-07-13 02:15:15 +04:00
if ( ! data_np ) {
dev_err ( & spi - > dev , " child node 'controller-data' not found \n " ) ;
return ERR_PTR ( - EINVAL ) ;
}
cs = kzalloc ( sizeof ( * cs ) , GFP_KERNEL ) ;
if ( ! cs ) {
2012-09-18 11:10:49 +04:00
of_node_put ( data_np ) ;
2012-07-13 02:15:15 +04:00
return ERR_PTR ( - ENOMEM ) ;
}
of_property_read_u32 ( data_np , " samsung,spi-feedback-delay " , & fb_delay ) ;
cs - > fb_delay = fb_delay ;
2012-09-18 11:10:49 +04:00
of_node_put ( data_np ) ;
2012-07-13 02:15:15 +04:00
return cs ;
}
2009-11-30 10:39:42 +03:00
/*
* Here we only check the validity of requested configuration
* and save the configuration in a local data - structure .
* The controller is actually configured only just before we
* get a message to transfer .
*/
static int s3c64xx_spi_setup ( struct spi_device * spi )
{
struct s3c64xx_spi_csinfo * cs = spi - > controller_data ;
struct s3c64xx_spi_driver_data * sdd ;
2010-01-20 23:49:44 +03:00
struct s3c64xx_spi_info * sci ;
2012-07-13 02:15:15 +04:00
int err ;
2009-11-30 10:39:42 +03:00
2012-07-13 02:15:15 +04:00
sdd = spi_master_get_devdata ( spi - > master ) ;
2014-07-16 19:19:08 +04:00
if ( spi - > dev . of_node ) {
2013-03-26 13:27:35 +04:00
cs = s3c64xx_get_slave_ctrldata ( spi ) ;
2012-07-13 02:15:15 +04:00
spi - > controller_data = cs ;
2014-07-16 19:19:08 +04:00
} else if ( cs ) {
/* On non-DT platforms the SPI core will set spi->cs_gpio
* to - ENOENT . The GPIO pin used to drive the chip select
* is defined by using platform data so spi - > cs_gpio value
* has to be override to have the proper GPIO pin number .
*/
spi - > cs_gpio = cs - > line ;
2012-07-13 02:15:15 +04:00
}
if ( IS_ERR_OR_NULL ( cs ) ) {
2009-11-30 10:39:42 +03:00
dev_err ( & spi - > dev , " No CS for SPI(%d) \n " , spi - > chip_select ) ;
return - ENODEV ;
}
2013-08-11 04:33:29 +04:00
if ( ! spi_get_ctldata ( spi ) ) {
2014-07-16 19:19:08 +04:00
if ( gpio_is_valid ( spi - > cs_gpio ) ) {
err = gpio_request_one ( spi - > cs_gpio , GPIOF_OUT_INIT_HIGH ,
dev_name ( & spi - > dev ) ) ;
if ( err ) {
dev_err ( & spi - > dev ,
" Failed to get /CS gpio [%d]: %d \n " ,
spi - > cs_gpio , err ) ;
goto err_gpio_req ;
}
2012-07-13 02:15:14 +04:00
}
2013-06-21 09:56:12 +04:00
spi_set_ctldata ( spi , cs ) ;
2009-11-30 10:39:42 +03:00
}
sci = sdd - > cntrlr_info ;
2011-12-04 04:58:06 +04:00
pm_runtime_get_sync ( & sdd - > pdev - > dev ) ;
2009-11-30 10:39:42 +03:00
/* Check if we can provide the requested rate */
2012-07-13 02:15:14 +04:00
if ( ! sdd - > port_conf - > clk_from_cmu ) {
2010-09-29 12:31:33 +04:00
u32 psr , speed ;
/* Max possible */
speed = clk_get_rate ( sdd - > src_clk ) / 2 / ( 0 + 1 ) ;
if ( spi - > max_speed_hz > speed )
spi - > max_speed_hz = speed ;
psr = clk_get_rate ( sdd - > src_clk ) / 2 / spi - > max_speed_hz - 1 ;
psr & = S3C64XX_SPI_PSR_MASK ;
if ( psr = = S3C64XX_SPI_PSR_MASK )
psr - - ;
speed = clk_get_rate ( sdd - > src_clk ) / 2 / ( psr + 1 ) ;
if ( spi - > max_speed_hz < speed ) {
if ( psr + 1 < S3C64XX_SPI_PSR_MASK ) {
psr + + ;
} else {
err = - EINVAL ;
goto setup_exit ;
}
}
2009-11-30 10:39:42 +03:00
2010-09-29 12:31:33 +04:00
speed = clk_get_rate ( sdd - > src_clk ) / 2 / ( psr + 1 ) ;
2012-07-13 02:15:15 +04:00
if ( spi - > max_speed_hz > = speed ) {
2010-09-29 12:31:33 +04:00
spi - > max_speed_hz = speed ;
2012-07-13 02:15:15 +04:00
} else {
2012-12-20 22:27:31 +04:00
dev_err ( & spi - > dev , " Can't set %dHz transfer speed \n " ,
spi - > max_speed_hz ) ;
2009-11-30 10:39:42 +03:00
err = - EINVAL ;
2012-07-13 02:15:15 +04:00
goto setup_exit ;
}
2009-11-30 10:39:42 +03:00
}
2011-12-04 04:58:06 +04:00
pm_runtime_put ( & sdd - > pdev - > dev ) ;
2014-11-06 12:51:49 +03:00
if ( ! ( sdd - > port_conf - > quirks & S3C64XX_SPI_QUIRK_CS_AUTO ) )
writel ( S3C64XX_SPI_SLAVE_SIG_INACT , sdd - > regs + S3C64XX_SPI_SLAVE_SEL ) ;
2012-07-13 02:15:15 +04:00
return 0 ;
2011-12-04 04:58:06 +04:00
2009-11-30 10:39:42 +03:00
setup_exit :
2013-10-17 16:45:41 +04:00
pm_runtime_put ( & sdd - > pdev - > dev ) ;
2009-11-30 10:39:42 +03:00
/* setup() returns with device de-selected */
2014-11-06 12:51:49 +03:00
if ( ! ( sdd - > port_conf - > quirks & S3C64XX_SPI_QUIRK_CS_AUTO ) )
writel ( S3C64XX_SPI_SLAVE_SIG_INACT , sdd - > regs + S3C64XX_SPI_SLAVE_SEL ) ;
2009-11-30 10:39:42 +03:00
2014-07-16 19:19:08 +04:00
if ( gpio_is_valid ( spi - > cs_gpio ) )
gpio_free ( spi - > cs_gpio ) ;
2012-07-13 02:15:15 +04:00
spi_set_ctldata ( spi , NULL ) ;
err_gpio_req :
2012-09-13 18:31:30 +04:00
if ( spi - > dev . of_node )
kfree ( cs ) ;
2012-07-13 02:15:15 +04:00
2009-11-30 10:39:42 +03:00
return err ;
}
2012-07-13 02:15:14 +04:00
static void s3c64xx_spi_cleanup ( struct spi_device * spi )
{
struct s3c64xx_spi_csinfo * cs = spi_get_ctldata ( spi ) ;
2014-07-16 19:19:08 +04:00
if ( gpio_is_valid ( spi - > cs_gpio ) ) {
2013-09-27 21:58:55 +04:00
gpio_free ( spi - > cs_gpio ) ;
2012-07-13 02:15:15 +04:00
if ( spi - > dev . of_node )
kfree ( cs ) ;
2014-07-16 19:19:08 +04:00
else {
/* On non-DT platforms, the SPI core sets
* spi - > cs_gpio to - ENOENT and . setup ( )
* overrides it with the GPIO pin value
* passed using platform data .
*/
spi - > cs_gpio = - ENOENT ;
}
2012-07-13 02:15:15 +04:00
}
2014-07-16 19:19:08 +04:00
2012-07-13 02:15:14 +04:00
spi_set_ctldata ( spi , NULL ) ;
}
2011-11-10 14:57:32 +04:00
static irqreturn_t s3c64xx_spi_irq ( int irq , void * data )
{
struct s3c64xx_spi_driver_data * sdd = data ;
struct spi_master * spi = sdd - > master ;
2013-03-13 10:43:30 +04:00
unsigned int val , clr = 0 ;
2011-11-10 14:57:32 +04:00
2013-03-13 10:43:30 +04:00
val = readl ( sdd - > regs + S3C64XX_SPI_STATUS ) ;
2011-11-10 14:57:32 +04:00
2013-03-13 10:43:30 +04:00
if ( val & S3C64XX_SPI_ST_RX_OVERRUN_ERR ) {
clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR ;
2011-11-10 14:57:32 +04:00
dev_err ( & spi - > dev , " RX overrun \n " ) ;
2013-03-13 10:43:30 +04:00
}
if ( val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR ) {
clr | = S3C64XX_SPI_PND_RX_UNDERRUN_CLR ;
2011-11-10 14:57:32 +04:00
dev_err ( & spi - > dev , " RX underrun \n " ) ;
2013-03-13 10:43:30 +04:00
}
if ( val & S3C64XX_SPI_ST_TX_OVERRUN_ERR ) {
clr | = S3C64XX_SPI_PND_TX_OVERRUN_CLR ;
2011-11-10 14:57:32 +04:00
dev_err ( & spi - > dev , " TX overrun \n " ) ;
2013-03-13 10:43:30 +04:00
}
if ( val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR ) {
clr | = S3C64XX_SPI_PND_TX_UNDERRUN_CLR ;
2011-11-10 14:57:32 +04:00
dev_err ( & spi - > dev , " TX underrun \n " ) ;
2013-03-13 10:43:30 +04:00
}
/* Clear the pending irq by setting and then clearing it */
writel ( clr , sdd - > regs + S3C64XX_SPI_PENDING_CLR ) ;
writel ( 0 , sdd - > regs + S3C64XX_SPI_PENDING_CLR ) ;
2011-11-10 14:57:32 +04:00
return IRQ_HANDLED ;
}
2009-11-30 10:39:42 +03:00
static void s3c64xx_spi_hwinit ( struct s3c64xx_spi_driver_data * sdd , int channel )
{
2010-01-20 23:49:44 +03:00
struct s3c64xx_spi_info * sci = sdd - > cntrlr_info ;
2009-11-30 10:39:42 +03:00
void __iomem * regs = sdd - > regs ;
unsigned int val ;
sdd - > cur_speed = 0 ;
2014-11-06 12:51:49 +03:00
if ( ! ( sdd - > port_conf - > quirks & S3C64XX_SPI_QUIRK_CS_AUTO ) )
writel ( S3C64XX_SPI_SLAVE_SIG_INACT , sdd - > regs + S3C64XX_SPI_SLAVE_SEL ) ;
2009-11-30 10:39:42 +03:00
/* Disable Interrupts - we use Polling if not DMA mode */
writel ( 0 , regs + S3C64XX_SPI_INT_EN ) ;
2012-07-13 02:15:14 +04:00
if ( ! sdd - > port_conf - > clk_from_cmu )
2010-09-29 12:31:33 +04:00
writel ( sci - > src_clk_nr < < S3C64XX_SPI_CLKSEL_SRCSHFT ,
2009-11-30 10:39:42 +03:00
regs + S3C64XX_SPI_CLK_CFG ) ;
writel ( 0 , regs + S3C64XX_SPI_MODE_CFG ) ;
writel ( 0 , regs + S3C64XX_SPI_PACKET_CNT ) ;
2013-03-13 10:43:30 +04:00
/* Clear any irq pending bits, should set and clear the bits */
val = S3C64XX_SPI_PND_RX_OVERRUN_CLR |
S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
S3C64XX_SPI_PND_TX_OVERRUN_CLR |
S3C64XX_SPI_PND_TX_UNDERRUN_CLR ;
writel ( val , regs + S3C64XX_SPI_PENDING_CLR ) ;
writel ( 0 , regs + S3C64XX_SPI_PENDING_CLR ) ;
2009-11-30 10:39:42 +03:00
writel ( 0 , regs + S3C64XX_SPI_SWAP_CFG ) ;
val = readl ( regs + S3C64XX_SPI_MODE_CFG ) ;
val & = ~ S3C64XX_SPI_MODE_4BURST ;
val & = ~ ( S3C64XX_SPI_MAX_TRAILCNT < < S3C64XX_SPI_TRAILCNT_OFF ) ;
val | = ( S3C64XX_SPI_TRAILCNT < < S3C64XX_SPI_TRAILCNT_OFF ) ;
writel ( val , regs + S3C64XX_SPI_MODE_CFG ) ;
flush_fifo ( sdd ) ;
}
2012-07-13 02:15:15 +04:00
# ifdef CONFIG_OF
2013-01-31 10:25:01 +04:00
static struct s3c64xx_spi_info * s3c64xx_spi_parse_dt ( struct device * dev )
2012-07-13 02:15:15 +04:00
{
struct s3c64xx_spi_info * sci ;
u32 temp ;
sci = devm_kzalloc ( dev , sizeof ( * sci ) , GFP_KERNEL ) ;
2014-04-29 12:20:20 +04:00
if ( ! sci )
2012-07-13 02:15:15 +04:00
return ERR_PTR ( - ENOMEM ) ;
if ( of_property_read_u32 ( dev - > of_node , " samsung,spi-src-clk " , & temp ) ) {
2013-01-31 10:25:01 +04:00
dev_warn ( dev , " spi bus clock parent not specified, using clock at index 0 as parent \n " ) ;
2012-07-13 02:15:15 +04:00
sci - > src_clk_nr = 0 ;
} else {
sci - > src_clk_nr = temp ;
}
if ( of_property_read_u32 ( dev - > of_node , " num-cs " , & temp ) ) {
2013-01-31 10:25:01 +04:00
dev_warn ( dev , " number of chip select lines not specified, assuming 1 chip select line \n " ) ;
2012-07-13 02:15:15 +04:00
sci - > num_cs = 1 ;
} else {
sci - > num_cs = temp ;
}
return sci ;
}
# else
static struct s3c64xx_spi_info * s3c64xx_spi_parse_dt ( struct device * dev )
{
2013-07-30 11:58:59 +04:00
return dev_get_platdata ( dev ) ;
2012-07-13 02:15:15 +04:00
}
# endif
static const struct of_device_id s3c64xx_spi_dt_match [ ] ;
2012-07-13 02:15:14 +04:00
static inline struct s3c64xx_spi_port_config * s3c64xx_spi_get_port_config (
struct platform_device * pdev )
{
2012-07-13 02:15:15 +04:00
# ifdef CONFIG_OF
if ( pdev - > dev . of_node ) {
const struct of_device_id * match ;
match = of_match_node ( s3c64xx_spi_dt_match , pdev - > dev . of_node ) ;
return ( struct s3c64xx_spi_port_config * ) match - > data ;
}
# endif
2012-07-13 02:15:14 +04:00
return ( struct s3c64xx_spi_port_config * )
platform_get_device_id ( pdev ) - > driver_data ;
}
2013-02-05 17:27:35 +04:00
static int s3c64xx_spi_probe ( struct platform_device * pdev )
2009-11-30 10:39:42 +03:00
{
2012-07-13 02:15:15 +04:00
struct resource * mem_res ;
2013-01-18 15:47:03 +04:00
struct resource * res ;
2009-11-30 10:39:42 +03:00
struct s3c64xx_spi_driver_data * sdd ;
2013-07-30 11:58:59 +04:00
struct s3c64xx_spi_info * sci = dev_get_platdata ( & pdev - > dev ) ;
2009-11-30 10:39:42 +03:00
struct spi_master * master ;
2011-11-10 14:57:32 +04:00
int ret , irq ;
2011-11-02 15:04:19 +04:00
char clk_name [ 16 ] ;
2009-11-30 10:39:42 +03:00
2012-07-13 02:15:15 +04:00
if ( ! sci & & pdev - > dev . of_node ) {
sci = s3c64xx_spi_parse_dt ( & pdev - > dev ) ;
if ( IS_ERR ( sci ) )
return PTR_ERR ( sci ) ;
2009-11-30 10:39:42 +03:00
}
2012-07-13 02:15:15 +04:00
if ( ! sci ) {
2009-11-30 10:39:42 +03:00
dev_err ( & pdev - > dev , " platform_data missing! \n " ) ;
return - ENODEV ;
}
mem_res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
if ( mem_res = = NULL ) {
dev_err ( & pdev - > dev , " Unable to get SPI MEM resource \n " ) ;
return - ENXIO ;
}
2011-11-10 14:57:32 +04:00
irq = platform_get_irq ( pdev , 0 ) ;
if ( irq < 0 ) {
dev_warn ( & pdev - > dev , " Failed to get IRQ: %d \n " , irq ) ;
return irq ;
}
2009-11-30 10:39:42 +03:00
master = spi_alloc_master ( & pdev - > dev ,
sizeof ( struct s3c64xx_spi_driver_data ) ) ;
if ( master = = NULL ) {
dev_err ( & pdev - > dev , " Unable to allocate SPI Master \n " ) ;
return - ENOMEM ;
}
platform_set_drvdata ( pdev , master ) ;
sdd = spi_master_get_devdata ( master ) ;
2012-07-13 02:15:14 +04:00
sdd - > port_conf = s3c64xx_spi_get_port_config ( pdev ) ;
2009-11-30 10:39:42 +03:00
sdd - > master = master ;
sdd - > cntrlr_info = sci ;
sdd - > pdev = pdev ;
sdd - > sfr_start = mem_res - > start ;
2012-07-13 02:15:15 +04:00
if ( pdev - > dev . of_node ) {
ret = of_alias_get_id ( pdev - > dev . of_node , " spi " ) ;
if ( ret < 0 ) {
2013-01-31 10:25:01 +04:00
dev_err ( & pdev - > dev , " failed to get alias id, errno %d \n " ,
ret ) ;
2012-07-13 02:15:15 +04:00
goto err0 ;
}
sdd - > port_id = ret ;
} else {
sdd - > port_id = pdev - > id ;
}
2009-11-30 10:39:42 +03:00
sdd - > cur_bpw = 8 ;
2013-01-18 15:47:03 +04:00
if ( ! sdd - > pdev - > dev . of_node ) {
res = platform_get_resource ( pdev , IORESOURCE_DMA , 0 ) ;
if ( ! res ) {
2013-07-15 10:11:57 +04:00
dev_warn ( & pdev - > dev , " Unable to get SPI tx dma resource. Switching to poll mode \n " ) ;
2013-05-20 10:51:32 +04:00
sdd - > port_conf - > quirks = S3C64XX_SPI_QUIRK_POLL ;
} else
sdd - > tx_dma . dmach = res - > start ;
2013-01-18 15:47:03 +04:00
res = platform_get_resource ( pdev , IORESOURCE_DMA , 1 ) ;
if ( ! res ) {
2013-07-15 10:11:57 +04:00
dev_warn ( & pdev - > dev , " Unable to get SPI rx dma resource. Switching to poll mode \n " ) ;
2013-05-20 10:51:32 +04:00
sdd - > port_conf - > quirks = S3C64XX_SPI_QUIRK_POLL ;
} else
sdd - > rx_dma . dmach = res - > start ;
2013-01-18 15:47:03 +04:00
}
2012-07-13 02:15:15 +04:00
2013-01-18 15:47:03 +04:00
sdd - > tx_dma . direction = DMA_MEM_TO_DEV ;
sdd - > rx_dma . direction = DMA_DEV_TO_MEM ;
2012-07-13 02:15:15 +04:00
master - > dev . of_node = pdev - > dev . of_node ;
2012-07-13 02:15:14 +04:00
master - > bus_num = sdd - > port_id ;
2009-11-30 10:39:42 +03:00
master - > setup = s3c64xx_spi_setup ;
2012-07-13 02:15:14 +04:00
master - > cleanup = s3c64xx_spi_cleanup ;
2012-02-16 02:48:32 +04:00
master - > prepare_transfer_hardware = s3c64xx_spi_prepare_transfer ;
2013-10-05 03:42:58 +04:00
master - > prepare_message = s3c64xx_spi_prepare_message ;
2013-10-05 14:51:14 +04:00
master - > transfer_one = s3c64xx_spi_transfer_one ;
2012-02-16 02:48:32 +04:00
master - > unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer ;
2009-11-30 10:39:42 +03:00
master - > num_chipselect = sci - > num_cs ;
master - > dma_alignment = 8 ;
2013-05-22 06:36:35 +04:00
master - > bits_per_word_mask = SPI_BPW_MASK ( 32 ) | SPI_BPW_MASK ( 16 ) |
SPI_BPW_MASK ( 8 ) ;
2009-11-30 10:39:42 +03:00
/* the spi->mode bits understood by this driver: */
master - > mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH ;
2013-07-28 18:24:54 +04:00
master - > auto_runtime_pm = true ;
2014-01-16 16:25:46 +04:00
if ( ! is_polling ( sdd ) )
master - > can_dma = s3c64xx_spi_can_dma ;
2009-11-30 10:39:42 +03:00
2013-01-21 14:09:18 +04:00
sdd - > regs = devm_ioremap_resource ( & pdev - > dev , mem_res ) ;
if ( IS_ERR ( sdd - > regs ) ) {
ret = PTR_ERR ( sdd - > regs ) ;
2013-01-10 06:04:21 +04:00
goto err0 ;
2009-11-30 10:39:42 +03:00
}
2013-04-16 07:42:57 +04:00
if ( sci - > cfg_gpio & & sci - > cfg_gpio ( ) ) {
2009-11-30 10:39:42 +03:00
dev_err ( & pdev - > dev , " Unable to config gpio \n " ) ;
ret = - EBUSY ;
2013-01-10 06:04:21 +04:00
goto err0 ;
2009-11-30 10:39:42 +03:00
}
/* Setup clocks */
2013-01-10 06:04:21 +04:00
sdd - > clk = devm_clk_get ( & pdev - > dev , " spi " ) ;
2009-11-30 10:39:42 +03:00
if ( IS_ERR ( sdd - > clk ) ) {
dev_err ( & pdev - > dev , " Unable to acquire clock 'spi' \n " ) ;
ret = PTR_ERR ( sdd - > clk ) ;
2013-04-16 07:42:57 +04:00
goto err0 ;
2009-11-30 10:39:42 +03:00
}
2012-10-03 03:30:12 +04:00
if ( clk_prepare_enable ( sdd - > clk ) ) {
2009-11-30 10:39:42 +03:00
dev_err ( & pdev - > dev , " Couldn't enable clock 'spi' \n " ) ;
ret = - EBUSY ;
2013-04-16 07:42:57 +04:00
goto err0 ;
2009-11-30 10:39:42 +03:00
}
2011-11-02 15:04:19 +04:00
sprintf ( clk_name , " spi_busclk%d " , sci - > src_clk_nr ) ;
2013-01-10 06:04:21 +04:00
sdd - > src_clk = devm_clk_get ( & pdev - > dev , clk_name ) ;
2010-01-20 23:49:44 +03:00
if ( IS_ERR ( sdd - > src_clk ) ) {
2009-11-30 10:39:42 +03:00
dev_err ( & pdev - > dev ,
2011-11-02 15:04:19 +04:00
" Unable to acquire clock '%s' \n " , clk_name ) ;
2010-01-20 23:49:44 +03:00
ret = PTR_ERR ( sdd - > src_clk ) ;
2013-01-10 06:04:21 +04:00
goto err2 ;
2009-11-30 10:39:42 +03:00
}
2012-10-03 03:30:12 +04:00
if ( clk_prepare_enable ( sdd - > src_clk ) ) {
2011-11-02 15:04:19 +04:00
dev_err ( & pdev - > dev , " Couldn't enable clock '%s' \n " , clk_name ) ;
2009-11-30 10:39:42 +03:00
ret = - EBUSY ;
2013-01-10 06:04:21 +04:00
goto err2 ;
2009-11-30 10:39:42 +03:00
}
/* Setup Deufult Mode */
2012-07-13 02:15:14 +04:00
s3c64xx_spi_hwinit ( sdd , sdd - > port_id ) ;
2009-11-30 10:39:42 +03:00
spin_lock_init ( & sdd - > lock ) ;
init_completion ( & sdd - > xfer_completion ) ;
2013-01-10 06:04:21 +04:00
ret = devm_request_irq ( & pdev - > dev , irq , s3c64xx_spi_irq , 0 ,
" spi-s3c64xx " , sdd ) ;
2011-11-10 14:57:32 +04:00
if ( ret ! = 0 ) {
dev_err ( & pdev - > dev , " Failed to request IRQ %d: %d \n " ,
irq , ret ) ;
2013-01-10 06:04:21 +04:00
goto err3 ;
2011-11-10 14:57:32 +04:00
}
writel ( S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN ,
sdd - > regs + S3C64XX_SPI_INT_EN ) ;
2013-10-17 20:06:46 +04:00
pm_runtime_set_active ( & pdev - > dev ) ;
2013-09-27 14:52:35 +04:00
pm_runtime_enable ( & pdev - > dev ) ;
2013-08-31 21:55:53 +04:00
ret = devm_spi_register_master ( & pdev - > dev , master ) ;
if ( ret ! = 0 ) {
dev_err ( & pdev - > dev , " cannot register SPI master: %d \n " , ret ) ;
2013-01-10 06:04:21 +04:00
goto err3 ;
2009-11-30 10:39:42 +03:00
}
2013-01-31 10:25:01 +04:00
dev_dbg ( & pdev - > dev , " Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached \n " ,
2012-07-13 02:15:14 +04:00
sdd - > port_id , master - > num_chipselect ) ;
2013-07-16 03:53:33 +04:00
dev_dbg ( & pdev - > dev , " \t IOmem=[%pR] \t DMA=[Rx-%d, Tx-%d] \n " ,
mem_res ,
2011-09-02 04:44:42 +04:00
sdd - > rx_dma . dmach , sdd - > tx_dma . dmach ) ;
2009-11-30 10:39:42 +03:00
return 0 ;
2013-01-10 06:04:21 +04:00
err3 :
2012-10-03 03:30:12 +04:00
clk_disable_unprepare ( sdd - > src_clk ) ;
2013-01-10 06:04:21 +04:00
err2 :
2012-10-03 03:30:12 +04:00
clk_disable_unprepare ( sdd - > clk ) ;
2009-11-30 10:39:42 +03:00
err0 :
spi_master_put ( master ) ;
return ret ;
}
static int s3c64xx_spi_remove ( struct platform_device * pdev )
{
struct spi_master * master = spi_master_get ( platform_get_drvdata ( pdev ) ) ;
struct s3c64xx_spi_driver_data * sdd = spi_master_get_devdata ( master ) ;
2011-12-04 04:58:06 +04:00
pm_runtime_disable ( & pdev - > dev ) ;
2011-11-10 14:57:32 +04:00
writel ( 0 , sdd - > regs + S3C64XX_SPI_INT_EN ) ;
2012-10-03 03:30:12 +04:00
clk_disable_unprepare ( sdd - > src_clk ) ;
2009-11-30 10:39:42 +03:00
2012-10-03 03:30:12 +04:00
clk_disable_unprepare ( sdd - > clk ) ;
2009-11-30 10:39:42 +03:00
return 0 ;
}
2013-03-22 06:09:08 +04:00
# ifdef CONFIG_PM_SLEEP
2011-12-04 04:36:18 +04:00
static int s3c64xx_spi_suspend ( struct device * dev )
2009-11-30 10:39:42 +03:00
{
2012-08-17 07:14:25 +04:00
struct spi_master * master = dev_get_drvdata ( dev ) ;
2009-11-30 10:39:42 +03:00
struct s3c64xx_spi_driver_data * sdd = spi_master_get_devdata ( master ) ;
2013-10-21 17:42:49 +04:00
int ret = spi_master_suspend ( master ) ;
if ( ret )
return ret ;
2009-11-30 10:39:42 +03:00
2013-10-21 17:42:50 +04:00
if ( ! pm_runtime_suspended ( dev ) ) {
clk_disable_unprepare ( sdd - > clk ) ;
clk_disable_unprepare ( sdd - > src_clk ) ;
}
2009-11-30 10:39:42 +03:00
sdd - > cur_speed = 0 ; /* Output Clock is stopped */
return 0 ;
}
2011-12-04 04:36:18 +04:00
static int s3c64xx_spi_resume ( struct device * dev )
2009-11-30 10:39:42 +03:00
{
2012-08-17 07:14:25 +04:00
struct spi_master * master = dev_get_drvdata ( dev ) ;
2009-11-30 10:39:42 +03:00
struct s3c64xx_spi_driver_data * sdd = spi_master_get_devdata ( master ) ;
2010-01-20 23:49:44 +03:00
struct s3c64xx_spi_info * sci = sdd - > cntrlr_info ;
2009-11-30 10:39:42 +03:00
2013-04-16 07:42:57 +04:00
if ( sci - > cfg_gpio )
2012-07-13 02:15:15 +04:00
sci - > cfg_gpio ( ) ;
2009-11-30 10:39:42 +03:00
2013-10-21 17:42:50 +04:00
if ( ! pm_runtime_suspended ( dev ) ) {
clk_prepare_enable ( sdd - > src_clk ) ;
clk_prepare_enable ( sdd - > clk ) ;
}
2009-11-30 10:39:42 +03:00
2012-07-13 02:15:14 +04:00
s3c64xx_spi_hwinit ( sdd , sdd - > port_id ) ;
2009-11-30 10:39:42 +03:00
2013-10-21 17:42:49 +04:00
return spi_master_resume ( master ) ;
2009-11-30 10:39:42 +03:00
}
2013-03-22 06:09:08 +04:00
# endif /* CONFIG_PM_SLEEP */
2009-11-30 10:39:42 +03:00
2014-12-13 02:41:15 +03:00
# ifdef CONFIG_PM
2011-12-04 04:58:06 +04:00
static int s3c64xx_spi_runtime_suspend ( struct device * dev )
{
2012-08-17 07:14:25 +04:00
struct spi_master * master = dev_get_drvdata ( dev ) ;
2011-12-04 04:58:06 +04:00
struct s3c64xx_spi_driver_data * sdd = spi_master_get_devdata ( master ) ;
2012-10-03 03:30:12 +04:00
clk_disable_unprepare ( sdd - > clk ) ;
clk_disable_unprepare ( sdd - > src_clk ) ;
2011-12-04 04:58:06 +04:00
return 0 ;
}
static int s3c64xx_spi_runtime_resume ( struct device * dev )
{
2012-08-17 07:14:25 +04:00
struct spi_master * master = dev_get_drvdata ( dev ) ;
2011-12-04 04:58:06 +04:00
struct s3c64xx_spi_driver_data * sdd = spi_master_get_devdata ( master ) ;
2013-09-27 21:44:53 +04:00
int ret ;
2011-12-04 04:58:06 +04:00
2013-09-27 21:44:53 +04:00
ret = clk_prepare_enable ( sdd - > src_clk ) ;
if ( ret ! = 0 )
return ret ;
ret = clk_prepare_enable ( sdd - > clk ) ;
if ( ret ! = 0 ) {
clk_disable_unprepare ( sdd - > src_clk ) ;
return ret ;
}
2011-12-04 04:58:06 +04:00
return 0 ;
}
2014-12-13 02:41:15 +03:00
# endif /* CONFIG_PM */
2011-12-04 04:58:06 +04:00
2011-12-04 04:36:18 +04:00
static const struct dev_pm_ops s3c64xx_spi_pm = {
SET_SYSTEM_SLEEP_PM_OPS ( s3c64xx_spi_suspend , s3c64xx_spi_resume )
2011-12-04 04:58:06 +04:00
SET_RUNTIME_PM_OPS ( s3c64xx_spi_runtime_suspend ,
s3c64xx_spi_runtime_resume , NULL )
2011-12-04 04:36:18 +04:00
} ;
2012-08-03 08:38:12 +04:00
static struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
2012-07-13 02:15:14 +04:00
. fifo_lvl_mask = { 0x7f } ,
. rx_lvl_offset = 13 ,
. tx_st_done = 21 ,
. high_speed = true ,
} ;
2012-08-03 08:38:12 +04:00
static struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
2012-07-13 02:15:14 +04:00
. fifo_lvl_mask = { 0x7f , 0x7F } ,
. rx_lvl_offset = 13 ,
. tx_st_done = 21 ,
} ;
2012-08-03 08:38:12 +04:00
static struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
2012-07-13 02:15:14 +04:00
. fifo_lvl_mask = { 0x1ff , 0x7F } ,
. rx_lvl_offset = 15 ,
. tx_st_done = 25 ,
. high_speed = true ,
} ;
2012-08-03 08:38:12 +04:00
static struct s3c64xx_spi_port_config exynos4_spi_port_config = {
2012-07-13 02:15:14 +04:00
. fifo_lvl_mask = { 0x1ff , 0x7F , 0x7F } ,
. rx_lvl_offset = 15 ,
. tx_st_done = 25 ,
. high_speed = true ,
. clk_from_cmu = true ,
} ;
2013-06-21 09:56:13 +04:00
static struct s3c64xx_spi_port_config exynos5440_spi_port_config = {
. fifo_lvl_mask = { 0x1ff } ,
. rx_lvl_offset = 15 ,
. tx_st_done = 25 ,
. high_speed = true ,
. clk_from_cmu = true ,
. quirks = S3C64XX_SPI_QUIRK_POLL ,
} ;
2014-11-06 12:51:49 +03:00
static struct s3c64xx_spi_port_config exynos7_spi_port_config = {
. fifo_lvl_mask = { 0x1ff , 0x7F , 0x7F , 0x7F , 0x7F , 0x1ff } ,
. rx_lvl_offset = 15 ,
. tx_st_done = 25 ,
. high_speed = true ,
. clk_from_cmu = true ,
. quirks = S3C64XX_SPI_QUIRK_CS_AUTO ,
} ;
2015-05-01 18:44:06 +03:00
static const struct platform_device_id s3c64xx_spi_driver_ids [ ] = {
2012-07-13 02:15:14 +04:00
{
. name = " s3c2443-spi " ,
. driver_data = ( kernel_ulong_t ) & s3c2443_spi_port_config ,
} , {
. name = " s3c6410-spi " ,
. driver_data = ( kernel_ulong_t ) & s3c6410_spi_port_config ,
} , {
. name = " s5pv210-spi " ,
. driver_data = ( kernel_ulong_t ) & s5pv210_spi_port_config ,
} , {
. name = " exynos4210-spi " ,
. driver_data = ( kernel_ulong_t ) & exynos4_spi_port_config ,
} ,
{ } ,
} ;
2012-07-13 02:15:15 +04:00
static const struct of_device_id s3c64xx_spi_dt_match [ ] = {
2013-09-23 13:45:45 +04:00
{ . compatible = " samsung,s3c2443-spi " ,
. data = ( void * ) & s3c2443_spi_port_config ,
} ,
{ . compatible = " samsung,s3c6410-spi " ,
. data = ( void * ) & s3c6410_spi_port_config ,
} ,
{ . compatible = " samsung,s5pv210-spi " ,
. data = ( void * ) & s5pv210_spi_port_config ,
} ,
2012-07-13 02:15:15 +04:00
{ . compatible = " samsung,exynos4210-spi " ,
. data = ( void * ) & exynos4_spi_port_config ,
} ,
2013-06-21 09:56:13 +04:00
{ . compatible = " samsung,exynos5440-spi " ,
. data = ( void * ) & exynos5440_spi_port_config ,
} ,
2014-11-06 12:51:49 +03:00
{ . compatible = " samsung,exynos7-spi " ,
. data = ( void * ) & exynos7_spi_port_config ,
} ,
2012-07-13 02:15:15 +04:00
{ } ,
} ;
MODULE_DEVICE_TABLE ( of , s3c64xx_spi_dt_match ) ;
2009-11-30 10:39:42 +03:00
static struct platform_driver s3c64xx_spi_driver = {
. driver = {
. name = " s3c64xx-spi " ,
2011-12-04 04:36:18 +04:00
. pm = & s3c64xx_spi_pm ,
2012-07-13 02:15:15 +04:00
. of_match_table = of_match_ptr ( s3c64xx_spi_dt_match ) ,
2009-11-30 10:39:42 +03:00
} ,
2013-09-09 18:09:25 +04:00
. probe = s3c64xx_spi_probe ,
2009-11-30 10:39:42 +03:00
. remove = s3c64xx_spi_remove ,
2012-07-13 02:15:14 +04:00
. id_table = s3c64xx_spi_driver_ids ,
2009-11-30 10:39:42 +03:00
} ;
MODULE_ALIAS ( " platform:s3c64xx-spi " ) ;
2013-09-09 18:09:25 +04:00
module_platform_driver ( s3c64xx_spi_driver ) ;
2009-11-30 10:39:42 +03:00
MODULE_AUTHOR ( " Jaswinder Singh <jassi.brar@samsung.com> " ) ;
MODULE_DESCRIPTION ( " S3C64XX SPI Controller Driver " ) ;
MODULE_LICENSE ( " GPL " ) ;