2010-05-26 14:41:59 -07:00
/*
* MMCIF eMMC driver .
*
* Copyright ( C ) 2010 Renesas Solutions Corp .
* Yusuke Goda < yusuke . goda . sx @ renesas . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License .
*
*
* TODO
* 1. DMA
* 2. Power management
* 3. Handle MMC errors better
*
*/
2011-12-25 21:07:52 +01:00
/*
* The MMCIF driver is now processing MMC requests asynchronously , according
* to the Linux MMC API requirement .
*
* The MMCIF driver processes MMC requests in up to 3 stages : command , optional
* data , and optional stop . To achieve asynchronous processing each of these
* stages is split into two halves : a top and a bottom half . The top half
* initialises the hardware , installs a timeout handler to handle completion
* timeouts , and returns . In case of the command stage this immediately returns
* control to the caller , leaving all further processing to run asynchronously .
* All further request processing is performed by the bottom halves .
*
* The bottom half further consists of a " hard " IRQ handler , an IRQ handler
* thread , a DMA completion callback , if DMA is used , a timeout work , and
* request - and stage - specific handler methods .
*
* Each bottom half run begins with either a hardware interrupt , a DMA callback
* invocation , or a timeout work run . In case of an error or a successful
* processing completion , the MMC core is informed and the request processing is
* finished . In case processing has to continue , i . e . , if data has to be read
* from or written to the card , or if a stop command has to be sent , the next
* top half is called , which performs the necessary hardware handling and
* reschedules the timeout work . This returns the driver state machine into the
* bottom half waiting state .
*/
2011-11-23 15:52:30 +01:00
# include <linux/bitops.h>
2010-11-24 10:05:12 +00:00
# include <linux/clk.h>
# include <linux/completion.h>
2010-11-24 10:05:18 +00:00
# include <linux/delay.h>
2010-05-26 14:41:59 -07:00
# include <linux/dma-mapping.h>
2010-11-24 10:05:22 +00:00
# include <linux/dmaengine.h>
2010-05-26 14:41:59 -07:00
# include <linux/mmc/card.h>
# include <linux/mmc/core.h>
2010-11-24 10:05:18 +00:00
# include <linux/mmc/host.h>
2010-05-26 14:41:59 -07:00
# include <linux/mmc/mmc.h>
# include <linux/mmc/sdio.h>
# include <linux/mmc/sh_mmcif.h>
2012-06-14 14:24:35 +02:00
# include <linux/mmc/slot-gpio.h>
2012-05-01 18:18:16 +02:00
# include <linux/mod_devicetable.h>
2012-12-12 15:38:14 +01:00
# include <linux/mutex.h>
2010-11-24 10:05:22 +00:00
# include <linux/pagemap.h>
2010-11-24 10:05:18 +00:00
# include <linux/platform_device.h>
2012-03-13 01:02:15 +01:00
# include <linux/pm_qos.h>
2011-05-05 16:20:48 +00:00
# include <linux/pm_runtime.h>
2011-04-15 18:30:47 +00:00
# include <linux/spinlock.h>
2011-07-03 15:15:51 -04:00
# include <linux/module.h>
2010-05-26 14:41:59 -07:00
# define DRIVER_NAME "sh_mmcif"
# define DRIVER_VERSION "2010-04-28"
/* CE_CMD_SET */
# define CMD_MASK 0x3f000000
# define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22))
# define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
# define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22)) /* R2 */
# define CMD_SET_RBSY (1 << 21) /* R1b */
# define CMD_SET_CCSEN (1 << 20)
# define CMD_SET_WDAT (1 << 19) /* 1: on data, 0: no data */
# define CMD_SET_DWEN (1 << 18) /* 1: write, 0: read */
# define CMD_SET_CMLTE (1 << 17) /* 1: multi block trans, 0: single */
# define CMD_SET_CMD12EN (1 << 16) /* 1: CMD12 auto issue */
# define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14)) /* index check */
# define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14)) /* check bits check */
# define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14)) /* no check */
# define CMD_SET_CRC7C ((0 << 13) | (0 << 12)) /* CRC7 check*/
# define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12)) /* check bits check*/
# define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
# define CMD_SET_CRC16C (1 << 10) /* 0: CRC16 check*/
# define CMD_SET_CRCSTE (1 << 8) /* 1: not receive CRC status */
# define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */
# define CMD_SET_OPDM (1 << 6) /* 1: open/drain */
# define CMD_SET_CCSH (1 << 5)
2012-12-12 15:38:08 +01:00
# define CMD_SET_DARS (1 << 2) /* Dual Data Rate */
2010-05-26 14:41:59 -07:00
# define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */
# define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */
# define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */
/* CE_CMD_CTRL */
# define CMD_CTRL_BREAK (1 << 0)
/* CE_BLOCK_SET */
# define BLOCK_SIZE_MASK 0x0000ffff
/* CE_INT */
# define INT_CCSDE (1 << 29)
# define INT_CMD12DRE (1 << 26)
# define INT_CMD12RBE (1 << 25)
# define INT_CMD12CRE (1 << 24)
# define INT_DTRANE (1 << 23)
# define INT_BUFRE (1 << 22)
# define INT_BUFWEN (1 << 21)
# define INT_BUFREN (1 << 20)
# define INT_CCSRCV (1 << 19)
# define INT_RBSYE (1 << 17)
# define INT_CRSPE (1 << 16)
# define INT_CMDVIO (1 << 15)
# define INT_BUFVIO (1 << 14)
# define INT_WDATERR (1 << 11)
# define INT_RDATERR (1 << 10)
# define INT_RIDXERR (1 << 9)
# define INT_RSPERR (1 << 8)
# define INT_CCSTO (1 << 5)
# define INT_CRCSTO (1 << 4)
# define INT_WDATTO (1 << 3)
# define INT_RDATTO (1 << 2)
# define INT_RBSYTO (1 << 1)
# define INT_RSPTO (1 << 0)
# define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \
INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
INT_CCSTO | INT_CRCSTO | INT_WDATTO | \
INT_RDATTO | INT_RBSYTO | INT_RSPTO )
2012-12-12 15:45:14 +01:00
# define INT_ALL (INT_RBSYE | INT_CRSPE | INT_BUFREN | \
INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE )
2010-05-26 14:41:59 -07:00
/* CE_INT_MASK */
# define MASK_ALL 0x00000000
# define MASK_MCCSDE (1 << 29)
# define MASK_MCMD12DRE (1 << 26)
# define MASK_MCMD12RBE (1 << 25)
# define MASK_MCMD12CRE (1 << 24)
# define MASK_MDTRANE (1 << 23)
# define MASK_MBUFRE (1 << 22)
# define MASK_MBUFWEN (1 << 21)
# define MASK_MBUFREN (1 << 20)
# define MASK_MCCSRCV (1 << 19)
# define MASK_MRBSYE (1 << 17)
# define MASK_MCRSPE (1 << 16)
# define MASK_MCMDVIO (1 << 15)
# define MASK_MBUFVIO (1 << 14)
# define MASK_MWDATERR (1 << 11)
# define MASK_MRDATERR (1 << 10)
# define MASK_MRIDXERR (1 << 9)
# define MASK_MRSPERR (1 << 8)
# define MASK_MCCSTO (1 << 5)
# define MASK_MCRCSTO (1 << 4)
# define MASK_MWDATTO (1 << 3)
# define MASK_MRDATTO (1 << 2)
# define MASK_MRBSYTO (1 << 1)
# define MASK_MRSPTO (1 << 0)
2011-12-14 19:31:52 +01:00
# define MASK_START_CMD (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \
MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO )
2012-12-12 15:45:14 +01:00
# define MASK_CLEAN (INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE | \
MASK_MBUFREN | MASK_MBUFWEN | \
MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE | \
MASK_MCMD12RBE | MASK_MCMD12CRE )
2010-05-26 14:41:59 -07:00
/* CE_HOST_STS1 */
# define STS1_CMDSEQ (1 << 31)
/* CE_HOST_STS2 */
# define STS2_CRCSTE (1 << 31)
# define STS2_CRC16E (1 << 30)
# define STS2_AC12CRCE (1 << 29)
# define STS2_RSPCRC7E (1 << 28)
# define STS2_CRCSTEBE (1 << 27)
# define STS2_RDATEBE (1 << 26)
# define STS2_AC12REBE (1 << 25)
# define STS2_RSPEBE (1 << 24)
# define STS2_AC12IDXE (1 << 23)
# define STS2_RSPIDXE (1 << 22)
# define STS2_CCSTO (1 << 15)
# define STS2_RDATTO (1 << 14)
# define STS2_DATBSYTO (1 << 13)
# define STS2_CRCSTTO (1 << 12)
# define STS2_AC12BSYTO (1 << 11)
# define STS2_RSPBSYTO (1 << 10)
# define STS2_AC12RSPTO (1 << 9)
# define STS2_RSPTO (1 << 8)
# define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \
STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE )
# define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \
STS2_DATBSYTO | STS2_CRCSTTO | \
STS2_AC12BSYTO | STS2_RSPBSYTO | \
STS2_AC12RSPTO | STS2_RSPTO )
# define CLKDEV_EMMC_DATA 52000000 /* 52MHz */
# define CLKDEV_MMC_DATA 20000000 /* 20MHz */
# define CLKDEV_INIT 400000 /* 400 KHz */
2011-04-15 18:30:47 +00:00
enum mmcif_state {
STATE_IDLE ,
STATE_REQUEST ,
STATE_IOS ,
2012-12-12 15:38:14 +01:00
STATE_TIMEOUT ,
2011-04-15 18:30:47 +00:00
} ;
2011-12-25 21:07:52 +01:00
enum mmcif_wait_for {
MMCIF_WAIT_FOR_REQUEST ,
MMCIF_WAIT_FOR_CMD ,
MMCIF_WAIT_FOR_MREAD ,
MMCIF_WAIT_FOR_MWRITE ,
MMCIF_WAIT_FOR_READ ,
MMCIF_WAIT_FOR_WRITE ,
MMCIF_WAIT_FOR_READ_END ,
MMCIF_WAIT_FOR_WRITE_END ,
MMCIF_WAIT_FOR_STOP ,
} ;
2010-05-26 14:41:59 -07:00
struct sh_mmcif_host {
struct mmc_host * mmc ;
2011-12-25 21:07:52 +01:00
struct mmc_request * mrq ;
2010-05-26 14:41:59 -07:00
struct platform_device * pd ;
struct clk * hclk ;
unsigned int clk ;
int bus_width ;
2012-12-12 15:38:08 +01:00
unsigned char timing ;
2010-11-24 10:05:12 +00:00
bool sd_error ;
2011-12-25 21:07:52 +01:00
bool dying ;
2010-05-26 14:41:59 -07:00
long timeout ;
void __iomem * addr ;
2011-12-25 21:07:52 +01:00
u32 * pio_ptr ;
2011-12-14 19:31:52 +01:00
spinlock_t lock ; /* protect sh_mmcif_host::state */
2011-04-15 18:30:47 +00:00
enum mmcif_state state ;
2011-12-25 21:07:52 +01:00
enum mmcif_wait_for wait_for ;
struct delayed_work timeout_work ;
size_t blocksize ;
int sg_idx ;
int sg_blkidx ;
2011-05-05 16:20:48 +00:00
bool power ;
2011-05-26 15:33:30 +02:00
bool card_present ;
2012-12-12 15:38:14 +01:00
struct mutex thread_lock ;
2010-05-26 14:41:59 -07:00
2010-11-24 10:05:22 +00:00
/* DMA support */
struct dma_chan * chan_rx ;
struct dma_chan * chan_tx ;
struct completion dma_complete ;
2011-02-10 16:09:50 +01:00
bool dma_active ;
2010-11-24 10:05:22 +00:00
} ;
2010-05-26 14:41:59 -07:00
static inline void sh_mmcif_bitset ( struct sh_mmcif_host * host ,
unsigned int reg , u32 val )
{
2010-05-18 14:42:51 +00:00
writel ( val | readl ( host - > addr + reg ) , host - > addr + reg ) ;
2010-05-26 14:41:59 -07:00
}
static inline void sh_mmcif_bitclr ( struct sh_mmcif_host * host ,
unsigned int reg , u32 val )
{
2010-05-18 14:42:51 +00:00
writel ( ~ val & readl ( host - > addr + reg ) , host - > addr + reg ) ;
2010-05-26 14:41:59 -07:00
}
2010-11-24 10:05:22 +00:00
static void mmcif_dma_complete ( void * arg )
{
struct sh_mmcif_host * host = arg ;
2012-12-12 15:38:14 +01:00
struct mmc_request * mrq = host - > mrq ;
2011-12-26 12:52:13 -05:00
2010-11-24 10:05:22 +00:00
dev_dbg ( & host - > pd - > dev , " Command completed \n " ) ;
2012-12-12 15:38:14 +01:00
if ( WARN ( ! mrq | | ! mrq - > data , " %s: NULL data in DMA completion! \n " ,
2010-11-24 10:05:22 +00:00
dev_name ( & host - > pd - > dev ) ) )
return ;
complete ( & host - > dma_complete ) ;
}
static void sh_mmcif_start_dma_rx ( struct sh_mmcif_host * host )
{
2011-12-26 12:52:13 -05:00
struct mmc_data * data = host - > mrq - > data ;
struct scatterlist * sg = data - > sg ;
2010-11-24 10:05:22 +00:00
struct dma_async_tx_descriptor * desc = NULL ;
struct dma_chan * chan = host - > chan_rx ;
dma_cookie_t cookie = - EINVAL ;
int ret ;
2011-12-26 12:52:13 -05:00
ret = dma_map_sg ( chan - > device - > dev , sg , data - > sg_len ,
2011-02-10 16:09:29 +01:00
DMA_FROM_DEVICE ) ;
2010-11-24 10:05:22 +00:00
if ( ret > 0 ) {
2011-02-10 16:09:50 +01:00
host - > dma_active = true ;
2012-03-08 16:11:18 -05:00
desc = dmaengine_prep_slave_sg ( chan , sg , ret ,
2011-10-14 10:45:11 +05:30
DMA_DEV_TO_MEM , DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
2010-11-24 10:05:22 +00:00
}
if ( desc ) {
desc - > callback = mmcif_dma_complete ;
desc - > callback_param = host ;
2011-02-10 16:10:00 +01:00
cookie = dmaengine_submit ( desc ) ;
sh_mmcif_bitset ( host , MMCIF_CE_BUF_ACC , BUF_ACC_DMAREN ) ;
dma_async_issue_pending ( chan ) ;
2010-11-24 10:05:22 +00:00
}
dev_dbg ( & host - > pd - > dev , " %s(): mapped %d -> %d, cookie %d \n " ,
2011-12-26 12:52:13 -05:00
__func__ , data - > sg_len , ret , cookie ) ;
2010-11-24 10:05:22 +00:00
if ( ! desc ) {
/* DMA failed, fall back to PIO */
if ( ret > = 0 )
ret = - EIO ;
host - > chan_rx = NULL ;
2011-02-10 16:09:50 +01:00
host - > dma_active = false ;
2010-11-24 10:05:22 +00:00
dma_release_channel ( chan ) ;
/* Free the Tx channel too */
chan = host - > chan_tx ;
if ( chan ) {
host - > chan_tx = NULL ;
dma_release_channel ( chan ) ;
}
dev_warn ( & host - > pd - > dev ,
" DMA failed: %d, falling back to PIO \n " , ret ) ;
sh_mmcif_bitclr ( host , MMCIF_CE_BUF_ACC , BUF_ACC_DMAREN | BUF_ACC_DMAWEN ) ;
}
dev_dbg ( & host - > pd - > dev , " %s(): desc %p, cookie %d, sg[%d] \n " , __func__ ,
2011-12-26 12:52:13 -05:00
desc , cookie , data - > sg_len ) ;
2010-11-24 10:05:22 +00:00
}
static void sh_mmcif_start_dma_tx ( struct sh_mmcif_host * host )
{
2011-12-26 12:52:13 -05:00
struct mmc_data * data = host - > mrq - > data ;
struct scatterlist * sg = data - > sg ;
2010-11-24 10:05:22 +00:00
struct dma_async_tx_descriptor * desc = NULL ;
struct dma_chan * chan = host - > chan_tx ;
dma_cookie_t cookie = - EINVAL ;
int ret ;
2011-12-26 12:52:13 -05:00
ret = dma_map_sg ( chan - > device - > dev , sg , data - > sg_len ,
2011-02-10 16:09:29 +01:00
DMA_TO_DEVICE ) ;
2010-11-24 10:05:22 +00:00
if ( ret > 0 ) {
2011-02-10 16:09:50 +01:00
host - > dma_active = true ;
2012-03-08 16:11:18 -05:00
desc = dmaengine_prep_slave_sg ( chan , sg , ret ,
2011-10-14 10:45:11 +05:30
DMA_MEM_TO_DEV , DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
2010-11-24 10:05:22 +00:00
}
if ( desc ) {
desc - > callback = mmcif_dma_complete ;
desc - > callback_param = host ;
2011-02-10 16:10:00 +01:00
cookie = dmaengine_submit ( desc ) ;
sh_mmcif_bitset ( host , MMCIF_CE_BUF_ACC , BUF_ACC_DMAWEN ) ;
dma_async_issue_pending ( chan ) ;
2010-11-24 10:05:22 +00:00
}
dev_dbg ( & host - > pd - > dev , " %s(): mapped %d -> %d, cookie %d \n " ,
2011-12-26 12:52:13 -05:00
__func__ , data - > sg_len , ret , cookie ) ;
2010-11-24 10:05:22 +00:00
if ( ! desc ) {
/* DMA failed, fall back to PIO */
if ( ret > = 0 )
ret = - EIO ;
host - > chan_tx = NULL ;
2011-02-10 16:09:50 +01:00
host - > dma_active = false ;
2010-11-24 10:05:22 +00:00
dma_release_channel ( chan ) ;
/* Free the Rx channel too */
chan = host - > chan_rx ;
if ( chan ) {
host - > chan_rx = NULL ;
dma_release_channel ( chan ) ;
}
dev_warn ( & host - > pd - > dev ,
" DMA failed: %d, falling back to PIO \n " , ret ) ;
sh_mmcif_bitclr ( host , MMCIF_CE_BUF_ACC , BUF_ACC_DMAREN | BUF_ACC_DMAWEN ) ;
}
dev_dbg ( & host - > pd - > dev , " %s(): desc %p, cookie %d \n " , __func__ ,
desc , cookie ) ;
}
static void sh_mmcif_request_dma ( struct sh_mmcif_host * host ,
struct sh_mmcif_plat_data * pdata )
{
2012-07-05 12:29:43 +02:00
struct resource * res = platform_get_resource ( host - > pd , IORESOURCE_MEM , 0 ) ;
struct dma_slave_config cfg ;
dma_cap_mask_t mask ;
int ret ;
2010-11-24 10:05:22 +00:00
2011-02-10 16:09:50 +01:00
host - > dma_active = false ;
2010-11-24 10:05:22 +00:00
2012-05-01 18:18:16 +02:00
if ( ! pdata )
return ;
2012-07-05 12:29:43 +02:00
if ( pdata - > slave_id_tx < = 0 | | pdata - > slave_id_rx < = 0 )
return ;
2010-11-24 10:05:22 +00:00
2012-07-05 12:29:43 +02:00
/* We can only either use DMA for both Tx and Rx or not use it at all */
dma_cap_zero ( mask ) ;
dma_cap_set ( DMA_SLAVE , mask ) ;
2010-11-24 10:05:22 +00:00
2012-07-05 12:29:43 +02:00
host - > chan_tx = dma_request_channel ( mask , shdma_chan_filter ,
( void * ) pdata - > slave_id_tx ) ;
dev_dbg ( & host - > pd - > dev , " %s: TX: got channel %p \n " , __func__ ,
host - > chan_tx ) ;
2010-11-24 10:05:22 +00:00
2012-07-05 12:29:43 +02:00
if ( ! host - > chan_tx )
return ;
2010-11-24 10:05:22 +00:00
2012-07-05 12:29:43 +02:00
cfg . slave_id = pdata - > slave_id_tx ;
cfg . direction = DMA_MEM_TO_DEV ;
cfg . dst_addr = res - > start + MMCIF_CE_DATA ;
cfg . src_addr = 0 ;
ret = dmaengine_slave_config ( host - > chan_tx , & cfg ) ;
if ( ret < 0 )
goto ecfgtx ;
host - > chan_rx = dma_request_channel ( mask , shdma_chan_filter ,
( void * ) pdata - > slave_id_rx ) ;
dev_dbg ( & host - > pd - > dev , " %s: RX: got channel %p \n " , __func__ ,
host - > chan_rx ) ;
if ( ! host - > chan_rx )
goto erqrx ;
cfg . slave_id = pdata - > slave_id_rx ;
cfg . direction = DMA_DEV_TO_MEM ;
cfg . dst_addr = 0 ;
cfg . src_addr = res - > start + MMCIF_CE_DATA ;
ret = dmaengine_slave_config ( host - > chan_rx , & cfg ) ;
if ( ret < 0 )
goto ecfgrx ;
return ;
ecfgrx :
dma_release_channel ( host - > chan_rx ) ;
host - > chan_rx = NULL ;
erqrx :
ecfgtx :
dma_release_channel ( host - > chan_tx ) ;
host - > chan_tx = NULL ;
2010-11-24 10:05:22 +00:00
}
static void sh_mmcif_release_dma ( struct sh_mmcif_host * host )
{
sh_mmcif_bitclr ( host , MMCIF_CE_BUF_ACC , BUF_ACC_DMAREN | BUF_ACC_DMAWEN ) ;
/* Descriptors are freed automatically */
if ( host - > chan_tx ) {
struct dma_chan * chan = host - > chan_tx ;
host - > chan_tx = NULL ;
dma_release_channel ( chan ) ;
}
if ( host - > chan_rx ) {
struct dma_chan * chan = host - > chan_rx ;
host - > chan_rx = NULL ;
dma_release_channel ( chan ) ;
}
2011-02-10 16:09:50 +01:00
host - > dma_active = false ;
2010-11-24 10:05:22 +00:00
}
2010-05-26 14:41:59 -07:00
static void sh_mmcif_clock_control ( struct sh_mmcif_host * host , unsigned int clk )
{
struct sh_mmcif_plat_data * p = host - > pd - > dev . platform_data ;
2012-05-01 18:18:16 +02:00
bool sup_pclk = p ? p - > sup_pclk : false ;
2010-05-26 14:41:59 -07:00
sh_mmcif_bitclr ( host , MMCIF_CE_CLK_CTRL , CLK_ENABLE ) ;
sh_mmcif_bitclr ( host , MMCIF_CE_CLK_CTRL , CLK_CLEAR ) ;
if ( ! clk )
return ;
2012-05-01 18:18:16 +02:00
if ( sup_pclk & & clk = = host - > clk )
2010-05-26 14:41:59 -07:00
sh_mmcif_bitset ( host , MMCIF_CE_CLK_CTRL , CLK_SUP_PCLK ) ;
else
sh_mmcif_bitset ( host , MMCIF_CE_CLK_CTRL , CLK_CLEAR &
2012-03-28 18:01:09 +09:00
( ( fls ( DIV_ROUND_UP ( host - > clk ,
clk ) - 1 ) - 1 ) < < 16 ) ) ;
2010-05-26 14:41:59 -07:00
sh_mmcif_bitset ( host , MMCIF_CE_CLK_CTRL , CLK_ENABLE ) ;
}
static void sh_mmcif_sync_reset ( struct sh_mmcif_host * host )
{
u32 tmp ;
2010-05-18 14:42:51 +00:00
tmp = 0x010f0000 & sh_mmcif_readl ( host - > addr , MMCIF_CE_CLK_CTRL ) ;
2010-05-26 14:41:59 -07:00
2010-05-18 14:42:51 +00:00
sh_mmcif_writel ( host - > addr , MMCIF_CE_VERSION , SOFT_RST_ON ) ;
sh_mmcif_writel ( host - > addr , MMCIF_CE_VERSION , SOFT_RST_OFF ) ;
2010-05-26 14:41:59 -07:00
sh_mmcif_bitset ( host , MMCIF_CE_CLK_CTRL , tmp |
SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29 ) ;
/* byte swap on */
sh_mmcif_bitset ( host , MMCIF_CE_BUF_ACC , BUF_ACC_ATYP ) ;
}
static int sh_mmcif_error_manage ( struct sh_mmcif_host * host )
{
u32 state1 , state2 ;
2011-12-14 19:31:52 +01:00
int ret , timeout ;
2010-05-26 14:41:59 -07:00
2010-11-24 10:05:12 +00:00
host - > sd_error = false ;
2010-05-26 14:41:59 -07:00
2010-05-18 14:42:51 +00:00
state1 = sh_mmcif_readl ( host - > addr , MMCIF_CE_HOST_STS1 ) ;
state2 = sh_mmcif_readl ( host - > addr , MMCIF_CE_HOST_STS2 ) ;
2010-11-24 10:05:18 +00:00
dev_dbg ( & host - > pd - > dev , " ERR HOST_STS1 = %08x \n " , state1 ) ;
dev_dbg ( & host - > pd - > dev , " ERR HOST_STS2 = %08x \n " , state2 ) ;
2010-05-26 14:41:59 -07:00
if ( state1 & STS1_CMDSEQ ) {
sh_mmcif_bitset ( host , MMCIF_CE_CMD_CTRL , CMD_CTRL_BREAK ) ;
sh_mmcif_bitset ( host , MMCIF_CE_CMD_CTRL , ~ CMD_CTRL_BREAK ) ;
2011-12-14 19:31:52 +01:00
for ( timeout = 10000000 ; timeout ; timeout - - ) {
2010-05-18 14:42:51 +00:00
if ( ! ( sh_mmcif_readl ( host - > addr , MMCIF_CE_HOST_STS1 )
2011-12-14 19:31:52 +01:00
& STS1_CMDSEQ ) )
2010-05-26 14:41:59 -07:00
break ;
mdelay ( 1 ) ;
}
2011-12-14 19:31:52 +01:00
if ( ! timeout ) {
dev_err ( & host - > pd - > dev ,
" Forced end of command sequence timeout err \n " ) ;
return - EIO ;
}
2010-05-26 14:41:59 -07:00
sh_mmcif_sync_reset ( host ) ;
2010-11-24 10:05:18 +00:00
dev_dbg ( & host - > pd - > dev , " Forced end of command sequence \n " ) ;
2010-05-26 14:41:59 -07:00
return - EIO ;
}
if ( state2 & STS2_CRC_ERR ) {
2012-12-12 15:38:18 +01:00
dev_err ( & host - > pd - > dev , " CRC error: state %u, wait %u \n " ,
host - > state , host - > wait_for ) ;
2010-05-26 14:41:59 -07:00
ret = - EIO ;
} else if ( state2 & STS2_TIMEOUT_ERR ) {
2012-12-12 15:38:18 +01:00
dev_err ( & host - > pd - > dev , " Timeout: state %u, wait %u \n " ,
host - > state , host - > wait_for ) ;
2010-05-26 14:41:59 -07:00
ret = - ETIMEDOUT ;
} else {
2012-12-12 15:38:18 +01:00
dev_dbg ( & host - > pd - > dev , " End/Index error: state %u, wait %u \n " ,
host - > state , host - > wait_for ) ;
2010-05-26 14:41:59 -07:00
ret = - EIO ;
}
return ret ;
}
2011-12-25 21:07:52 +01:00
static bool sh_mmcif_next_block ( struct sh_mmcif_host * host , u32 * p )
2010-05-26 14:41:59 -07:00
{
2011-12-25 21:07:52 +01:00
struct mmc_data * data = host - > mrq - > data ;
host - > sg_blkidx + = host - > blocksize ;
/* data->sg->length must be a multiple of host->blocksize? */
BUG_ON ( host - > sg_blkidx > data - > sg - > length ) ;
if ( host - > sg_blkidx = = data - > sg - > length ) {
host - > sg_blkidx = 0 ;
if ( + + host - > sg_idx < data - > sg_len )
host - > pio_ptr = sg_virt ( + + data - > sg ) ;
} else {
host - > pio_ptr = p ;
}
2012-12-12 15:38:13 +01:00
return host - > sg_idx ! = data - > sg_len ;
2011-12-25 21:07:52 +01:00
}
static void sh_mmcif_single_read ( struct sh_mmcif_host * host ,
struct mmc_request * mrq )
{
host - > blocksize = ( sh_mmcif_readl ( host - > addr , MMCIF_CE_BLOCK_SET ) &
BLOCK_SIZE_MASK ) + 3 ;
host - > wait_for = MMCIF_WAIT_FOR_READ ;
2010-05-26 14:41:59 -07:00
/* buf read enable */
sh_mmcif_bitset ( host , MMCIF_CE_INT_MASK , MASK_MBUFREN ) ;
2011-12-25 21:07:52 +01:00
}
static bool sh_mmcif_read_block ( struct sh_mmcif_host * host )
{
struct mmc_data * data = host - > mrq - > data ;
u32 * p = sg_virt ( data - > sg ) ;
int i ;
if ( host - > sd_error ) {
data - > error = sh_mmcif_error_manage ( host ) ;
2012-12-12 15:38:18 +01:00
dev_dbg ( & host - > pd - > dev , " %s(): %d \n " , __func__ , data - > error ) ;
2011-12-25 21:07:52 +01:00
return false ;
}
for ( i = 0 ; i < host - > blocksize / 4 ; i + + )
2010-05-18 14:42:51 +00:00
* p + + = sh_mmcif_readl ( host - > addr , MMCIF_CE_DATA ) ;
2010-05-26 14:41:59 -07:00
/* buffer read end */
sh_mmcif_bitset ( host , MMCIF_CE_INT_MASK , MASK_MBUFRE ) ;
2011-12-25 21:07:52 +01:00
host - > wait_for = MMCIF_WAIT_FOR_READ_END ;
2010-05-26 14:41:59 -07:00
2011-12-25 21:07:52 +01:00
return true ;
2010-05-26 14:41:59 -07:00
}
2011-12-25 21:07:52 +01:00
static void sh_mmcif_multi_read ( struct sh_mmcif_host * host ,
struct mmc_request * mrq )
2010-05-26 14:41:59 -07:00
{
struct mmc_data * data = mrq - > data ;
2011-12-25 21:07:52 +01:00
if ( ! data - > sg_len | | ! data - > sg - > length )
return ;
host - > blocksize = sh_mmcif_readl ( host - > addr , MMCIF_CE_BLOCK_SET ) &
BLOCK_SIZE_MASK ;
host - > wait_for = MMCIF_WAIT_FOR_MREAD ;
host - > sg_idx = 0 ;
host - > sg_blkidx = 0 ;
host - > pio_ptr = sg_virt ( data - > sg ) ;
2012-12-12 15:38:11 +01:00
2011-12-25 21:07:52 +01:00
sh_mmcif_bitset ( host , MMCIF_CE_INT_MASK , MASK_MBUFREN ) ;
}
static bool sh_mmcif_mread_block ( struct sh_mmcif_host * host )
{
struct mmc_data * data = host - > mrq - > data ;
u32 * p = host - > pio_ptr ;
int i ;
if ( host - > sd_error ) {
data - > error = sh_mmcif_error_manage ( host ) ;
2012-12-12 15:38:18 +01:00
dev_dbg ( & host - > pd - > dev , " %s(): %d \n " , __func__ , data - > error ) ;
2011-12-25 21:07:52 +01:00
return false ;
2010-05-26 14:41:59 -07:00
}
2011-12-25 21:07:52 +01:00
BUG_ON ( ! data - > sg - > length ) ;
for ( i = 0 ; i < host - > blocksize / 4 ; i + + )
* p + + = sh_mmcif_readl ( host - > addr , MMCIF_CE_DATA ) ;
if ( ! sh_mmcif_next_block ( host , p ) )
return false ;
sh_mmcif_bitset ( host , MMCIF_CE_INT_MASK , MASK_MBUFREN ) ;
return true ;
2010-05-26 14:41:59 -07:00
}
2011-12-25 21:07:52 +01:00
static void sh_mmcif_single_write ( struct sh_mmcif_host * host ,
2010-05-26 14:41:59 -07:00
struct mmc_request * mrq )
{
2011-12-25 21:07:52 +01:00
host - > blocksize = ( sh_mmcif_readl ( host - > addr , MMCIF_CE_BLOCK_SET ) &
BLOCK_SIZE_MASK ) + 3 ;
2010-05-26 14:41:59 -07:00
2011-12-25 21:07:52 +01:00
host - > wait_for = MMCIF_WAIT_FOR_WRITE ;
2010-05-26 14:41:59 -07:00
/* buf write enable */
2011-12-25 21:07:52 +01:00
sh_mmcif_bitset ( host , MMCIF_CE_INT_MASK , MASK_MBUFWEN ) ;
}
static bool sh_mmcif_write_block ( struct sh_mmcif_host * host )
{
struct mmc_data * data = host - > mrq - > data ;
u32 * p = sg_virt ( data - > sg ) ;
int i ;
if ( host - > sd_error ) {
data - > error = sh_mmcif_error_manage ( host ) ;
2012-12-12 15:38:18 +01:00
dev_dbg ( & host - > pd - > dev , " %s(): %d \n " , __func__ , data - > error ) ;
2011-12-25 21:07:52 +01:00
return false ;
}
for ( i = 0 ; i < host - > blocksize / 4 ; i + + )
2010-05-18 14:42:51 +00:00
sh_mmcif_writel ( host - > addr , MMCIF_CE_DATA , * p + + ) ;
2010-05-26 14:41:59 -07:00
/* buffer write end */
sh_mmcif_bitset ( host , MMCIF_CE_INT_MASK , MASK_MDTRANE ) ;
2011-12-25 21:07:52 +01:00
host - > wait_for = MMCIF_WAIT_FOR_WRITE_END ;
2010-05-26 14:41:59 -07:00
2011-12-25 21:07:52 +01:00
return true ;
2010-05-26 14:41:59 -07:00
}
2011-12-25 21:07:52 +01:00
static void sh_mmcif_multi_write ( struct sh_mmcif_host * host ,
struct mmc_request * mrq )
2010-05-26 14:41:59 -07:00
{
struct mmc_data * data = mrq - > data ;
2011-12-25 21:07:52 +01:00
if ( ! data - > sg_len | | ! data - > sg - > length )
return ;
2010-05-26 14:41:59 -07:00
2011-12-25 21:07:52 +01:00
host - > blocksize = sh_mmcif_readl ( host - > addr , MMCIF_CE_BLOCK_SET ) &
BLOCK_SIZE_MASK ;
2010-05-26 14:41:59 -07:00
2011-12-25 21:07:52 +01:00
host - > wait_for = MMCIF_WAIT_FOR_MWRITE ;
host - > sg_idx = 0 ;
host - > sg_blkidx = 0 ;
host - > pio_ptr = sg_virt ( data - > sg ) ;
2012-12-12 15:38:11 +01:00
2011-12-25 21:07:52 +01:00
sh_mmcif_bitset ( host , MMCIF_CE_INT_MASK , MASK_MBUFWEN ) ;
}
2010-05-26 14:41:59 -07:00
2011-12-25 21:07:52 +01:00
static bool sh_mmcif_mwrite_block ( struct sh_mmcif_host * host )
{
struct mmc_data * data = host - > mrq - > data ;
u32 * p = host - > pio_ptr ;
int i ;
if ( host - > sd_error ) {
data - > error = sh_mmcif_error_manage ( host ) ;
2012-12-12 15:38:18 +01:00
dev_dbg ( & host - > pd - > dev , " %s(): %d \n " , __func__ , data - > error ) ;
2011-12-25 21:07:52 +01:00
return false ;
2010-05-26 14:41:59 -07:00
}
2011-12-25 21:07:52 +01:00
BUG_ON ( ! data - > sg - > length ) ;
for ( i = 0 ; i < host - > blocksize / 4 ; i + + )
sh_mmcif_writel ( host - > addr , MMCIF_CE_DATA , * p + + ) ;
if ( ! sh_mmcif_next_block ( host , p ) )
return false ;
sh_mmcif_bitset ( host , MMCIF_CE_INT_MASK , MASK_MBUFWEN ) ;
return true ;
2010-05-26 14:41:59 -07:00
}
static void sh_mmcif_get_response ( struct sh_mmcif_host * host ,
struct mmc_command * cmd )
{
if ( cmd - > flags & MMC_RSP_136 ) {
2010-05-18 14:42:51 +00:00
cmd - > resp [ 0 ] = sh_mmcif_readl ( host - > addr , MMCIF_CE_RESP3 ) ;
cmd - > resp [ 1 ] = sh_mmcif_readl ( host - > addr , MMCIF_CE_RESP2 ) ;
cmd - > resp [ 2 ] = sh_mmcif_readl ( host - > addr , MMCIF_CE_RESP1 ) ;
cmd - > resp [ 3 ] = sh_mmcif_readl ( host - > addr , MMCIF_CE_RESP0 ) ;
2010-05-26 14:41:59 -07:00
} else
2010-05-18 14:42:51 +00:00
cmd - > resp [ 0 ] = sh_mmcif_readl ( host - > addr , MMCIF_CE_RESP0 ) ;
2010-05-26 14:41:59 -07:00
}
static void sh_mmcif_get_cmd12response ( struct sh_mmcif_host * host ,
struct mmc_command * cmd )
{
2010-05-18 14:42:51 +00:00
cmd - > resp [ 0 ] = sh_mmcif_readl ( host - > addr , MMCIF_CE_RESP_CMD12 ) ;
2010-05-26 14:41:59 -07:00
}
static u32 sh_mmcif_set_cmd ( struct sh_mmcif_host * host ,
2011-12-26 12:52:13 -05:00
struct mmc_request * mrq )
2010-05-26 14:41:59 -07:00
{
2011-12-26 12:52:13 -05:00
struct mmc_data * data = mrq - > data ;
struct mmc_command * cmd = mrq - > cmd ;
u32 opc = cmd - > opcode ;
2010-05-26 14:41:59 -07:00
u32 tmp = 0 ;
/* Response Type check */
switch ( mmc_resp_type ( cmd ) ) {
case MMC_RSP_NONE :
tmp | = CMD_SET_RTYP_NO ;
break ;
case MMC_RSP_R1 :
case MMC_RSP_R1B :
case MMC_RSP_R3 :
tmp | = CMD_SET_RTYP_6B ;
break ;
case MMC_RSP_R2 :
tmp | = CMD_SET_RTYP_17B ;
break ;
default :
2010-11-24 10:05:18 +00:00
dev_err ( & host - > pd - > dev , " Unsupported response type. \n " ) ;
2010-05-26 14:41:59 -07:00
break ;
}
switch ( opc ) {
/* RBSY */
2012-12-12 15:38:10 +01:00
case MMC_SLEEP_AWAKE :
2010-05-26 14:41:59 -07:00
case MMC_SWITCH :
case MMC_STOP_TRANSMISSION :
case MMC_SET_WRITE_PROT :
case MMC_CLR_WRITE_PROT :
case MMC_ERASE :
tmp | = CMD_SET_RBSY ;
break ;
}
/* WDAT / DATW */
2011-12-26 12:52:13 -05:00
if ( data ) {
2010-05-26 14:41:59 -07:00
tmp | = CMD_SET_WDAT ;
switch ( host - > bus_width ) {
case MMC_BUS_WIDTH_1 :
tmp | = CMD_SET_DATW_1 ;
break ;
case MMC_BUS_WIDTH_4 :
tmp | = CMD_SET_DATW_4 ;
break ;
case MMC_BUS_WIDTH_8 :
tmp | = CMD_SET_DATW_8 ;
break ;
default :
2010-11-24 10:05:18 +00:00
dev_err ( & host - > pd - > dev , " Unsupported bus width. \n " ) ;
2010-05-26 14:41:59 -07:00
break ;
}
2012-12-12 15:38:08 +01:00
switch ( host - > timing ) {
case MMC_TIMING_UHS_DDR50 :
/*
* MMC core will only set this timing , if the host
* advertises the MMC_CAP_UHS_DDR50 capability . MMCIF
* implementations with this capability , e . g . sh73a0 ,
* will have to set it in their platform data .
*/
tmp | = CMD_SET_DARS ;
break ;
}
2010-05-26 14:41:59 -07:00
}
/* DWEN */
if ( opc = = MMC_WRITE_BLOCK | | opc = = MMC_WRITE_MULTIPLE_BLOCK )
tmp | = CMD_SET_DWEN ;
/* CMLTE/CMD12EN */
if ( opc = = MMC_READ_MULTIPLE_BLOCK | | opc = = MMC_WRITE_MULTIPLE_BLOCK ) {
tmp | = CMD_SET_CMLTE | CMD_SET_CMD12EN ;
sh_mmcif_bitset ( host , MMCIF_CE_BLOCK_SET ,
2011-12-26 12:52:13 -05:00
data - > blocks < < 16 ) ;
2010-05-26 14:41:59 -07:00
}
/* RIDXC[1:0] check bits */
if ( opc = = MMC_SEND_OP_COND | | opc = = MMC_ALL_SEND_CID | |
opc = = MMC_SEND_CSD | | opc = = MMC_SEND_CID )
tmp | = CMD_SET_RIDXC_BITS ;
/* RCRC7C[1:0] check bits */
if ( opc = = MMC_SEND_OP_COND )
tmp | = CMD_SET_CRC7C_BITS ;
/* RCRC7C[1:0] internal CRC7 */
if ( opc = = MMC_ALL_SEND_CID | |
opc = = MMC_SEND_CSD | | opc = = MMC_SEND_CID )
tmp | = CMD_SET_CRC7C_INTERNAL ;
2011-12-26 12:52:13 -05:00
return ( opc < < 24 ) | tmp ;
2010-05-26 14:41:59 -07:00
}
2010-11-24 10:05:18 +00:00
static int sh_mmcif_data_trans ( struct sh_mmcif_host * host ,
2011-12-25 21:07:52 +01:00
struct mmc_request * mrq , u32 opc )
2010-05-26 14:41:59 -07:00
{
switch ( opc ) {
case MMC_READ_MULTIPLE_BLOCK :
2011-12-25 21:07:52 +01:00
sh_mmcif_multi_read ( host , mrq ) ;
return 0 ;
2010-05-26 14:41:59 -07:00
case MMC_WRITE_MULTIPLE_BLOCK :
2011-12-25 21:07:52 +01:00
sh_mmcif_multi_write ( host , mrq ) ;
return 0 ;
2010-05-26 14:41:59 -07:00
case MMC_WRITE_BLOCK :
2011-12-25 21:07:52 +01:00
sh_mmcif_single_write ( host , mrq ) ;
return 0 ;
2010-05-26 14:41:59 -07:00
case MMC_READ_SINGLE_BLOCK :
case MMC_SEND_EXT_CSD :
2011-12-25 21:07:52 +01:00
sh_mmcif_single_read ( host , mrq ) ;
return 0 ;
2010-05-26 14:41:59 -07:00
default :
2012-12-12 15:38:18 +01:00
dev_err ( & host - > pd - > dev , " Unsupported CMD%d \n " , opc ) ;
2011-12-14 19:31:52 +01:00
return - EINVAL ;
2010-05-26 14:41:59 -07:00
}
}
static void sh_mmcif_start_cmd ( struct sh_mmcif_host * host ,
2011-12-14 19:31:52 +01:00
struct mmc_request * mrq )
2010-05-26 14:41:59 -07:00
{
2011-12-14 19:31:52 +01:00
struct mmc_command * cmd = mrq - > cmd ;
2011-12-25 21:07:52 +01:00
u32 opc = cmd - > opcode ;
u32 mask ;
2010-05-26 14:41:59 -07:00
switch ( opc ) {
2011-12-14 19:31:52 +01:00
/* response busy check */
2012-12-12 15:38:10 +01:00
case MMC_SLEEP_AWAKE :
2010-05-26 14:41:59 -07:00
case MMC_SWITCH :
case MMC_STOP_TRANSMISSION :
case MMC_SET_WRITE_PROT :
case MMC_CLR_WRITE_PROT :
case MMC_ERASE :
2011-12-14 19:31:52 +01:00
mask = MASK_START_CMD | MASK_MRBSYE ;
2010-05-26 14:41:59 -07:00
break ;
default :
2011-12-14 19:31:52 +01:00
mask = MASK_START_CMD | MASK_MCRSPE ;
2010-05-26 14:41:59 -07:00
break ;
}
2011-12-26 12:52:13 -05:00
if ( mrq - > data ) {
2010-05-18 14:42:51 +00:00
sh_mmcif_writel ( host - > addr , MMCIF_CE_BLOCK_SET , 0 ) ;
sh_mmcif_writel ( host - > addr , MMCIF_CE_BLOCK_SET ,
mrq - > data - > blksz ) ;
2010-05-26 14:41:59 -07:00
}
2011-12-26 12:52:13 -05:00
opc = sh_mmcif_set_cmd ( host , mrq ) ;
2010-05-26 14:41:59 -07:00
2010-05-18 14:42:51 +00:00
sh_mmcif_writel ( host - > addr , MMCIF_CE_INT , 0xD80430C0 ) ;
sh_mmcif_writel ( host - > addr , MMCIF_CE_INT_MASK , mask ) ;
2010-05-26 14:41:59 -07:00
/* set arg */
2010-05-18 14:42:51 +00:00
sh_mmcif_writel ( host - > addr , MMCIF_CE_ARG , cmd - > arg ) ;
2010-05-26 14:41:59 -07:00
/* set cmd */
2010-05-18 14:42:51 +00:00
sh_mmcif_writel ( host - > addr , MMCIF_CE_CMD_SET , opc ) ;
2010-05-26 14:41:59 -07:00
2011-12-25 21:07:52 +01:00
host - > wait_for = MMCIF_WAIT_FOR_CMD ;
schedule_delayed_work ( & host - > timeout_work , host - > timeout ) ;
2010-05-26 14:41:59 -07:00
}
static void sh_mmcif_stop_cmd ( struct sh_mmcif_host * host ,
2011-12-14 19:31:52 +01:00
struct mmc_request * mrq )
2010-05-26 14:41:59 -07:00
{
2011-12-26 12:52:13 -05:00
switch ( mrq - > cmd - > opcode ) {
case MMC_READ_MULTIPLE_BLOCK :
2010-05-26 14:41:59 -07:00
sh_mmcif_bitset ( host , MMCIF_CE_INT_MASK , MASK_MCMD12DRE ) ;
2011-12-26 12:52:13 -05:00
break ;
case MMC_WRITE_MULTIPLE_BLOCK :
2010-05-26 14:41:59 -07:00
sh_mmcif_bitset ( host , MMCIF_CE_INT_MASK , MASK_MCMD12RBE ) ;
2011-12-26 12:52:13 -05:00
break ;
default :
2010-11-24 10:05:18 +00:00
dev_err ( & host - > pd - > dev , " unsupported stop cmd \n " ) ;
2011-12-26 12:52:13 -05:00
mrq - > stop - > error = sh_mmcif_error_manage ( host ) ;
2010-05-26 14:41:59 -07:00
return ;
}
2011-12-25 21:07:52 +01:00
host - > wait_for = MMCIF_WAIT_FOR_STOP ;
2010-05-26 14:41:59 -07:00
}
static void sh_mmcif_request ( struct mmc_host * mmc , struct mmc_request * mrq )
{
struct sh_mmcif_host * host = mmc_priv ( mmc ) ;
2011-04-15 18:30:47 +00:00
unsigned long flags ;
spin_lock_irqsave ( & host - > lock , flags ) ;
if ( host - > state ! = STATE_IDLE ) {
2012-12-12 15:38:18 +01:00
dev_dbg ( & host - > pd - > dev , " %s() rejected, state %u \n " , __func__ , host - > state ) ;
2011-04-15 18:30:47 +00:00
spin_unlock_irqrestore ( & host - > lock , flags ) ;
mrq - > cmd - > error = - EAGAIN ;
mmc_request_done ( mmc , mrq ) ;
return ;
}
host - > state = STATE_REQUEST ;
spin_unlock_irqrestore ( & host - > lock , flags ) ;
2010-05-26 14:41:59 -07:00
switch ( mrq - > cmd - > opcode ) {
/* MMCIF does not support SD/SDIO command */
2012-06-12 22:56:09 +02:00
case MMC_SLEEP_AWAKE : /* = SD_IO_SEND_OP_COND (5) */
case MMC_SEND_EXT_CSD : /* = SD_SEND_IF_COND (8) */
if ( ( mrq - > cmd - > flags & MMC_CMD_MASK ) ! = MMC_CMD_BCR )
break ;
2010-05-26 14:41:59 -07:00
case MMC_APP_CMD :
2012-12-12 15:38:05 +01:00
case SD_IO_RW_DIRECT :
2011-04-15 18:30:47 +00:00
host - > state = STATE_IDLE ;
2010-05-26 14:41:59 -07:00
mrq - > cmd - > error = - ETIMEDOUT ;
mmc_request_done ( mmc , mrq ) ;
return ;
default :
break ;
}
2011-12-25 21:07:52 +01:00
host - > mrq = mrq ;
2010-05-26 14:41:59 -07:00
2011-12-25 21:07:52 +01:00
sh_mmcif_start_cmd ( host , mrq ) ;
2010-05-26 14:41:59 -07:00
}
2012-04-19 18:02:50 +02:00
static int sh_mmcif_clk_update ( struct sh_mmcif_host * host )
{
int ret = clk_enable ( host - > hclk ) ;
if ( ! ret ) {
host - > clk = clk_get_rate ( host - > hclk ) ;
host - > mmc - > f_max = host - > clk / 2 ;
host - > mmc - > f_min = host - > clk / 512 ;
}
return ret ;
}
2012-04-20 18:27:13 +02:00
static void sh_mmcif_set_power ( struct sh_mmcif_host * host , struct mmc_ios * ios )
{
struct mmc_host * mmc = host - > mmc ;
if ( ! IS_ERR ( mmc - > supply . vmmc ) )
/* Errors ignored... */
mmc_regulator_set_ocr ( mmc , mmc - > supply . vmmc ,
ios - > power_mode ? ios - > vdd : 0 ) ;
}
2010-05-26 14:41:59 -07:00
static void sh_mmcif_set_ios ( struct mmc_host * mmc , struct mmc_ios * ios )
{
struct sh_mmcif_host * host = mmc_priv ( mmc ) ;
2011-04-15 18:30:47 +00:00
unsigned long flags ;
spin_lock_irqsave ( & host - > lock , flags ) ;
if ( host - > state ! = STATE_IDLE ) {
2012-12-12 15:38:18 +01:00
dev_dbg ( & host - > pd - > dev , " %s() rejected, state %u \n " , __func__ , host - > state ) ;
2011-04-15 18:30:47 +00:00
spin_unlock_irqrestore ( & host - > lock , flags ) ;
return ;
}
host - > state = STATE_IOS ;
spin_unlock_irqrestore ( & host - > lock , flags ) ;
2010-05-26 14:41:59 -07:00
2011-02-25 16:58:38 +01:00
if ( ios - > power_mode = = MMC_POWER_UP ) {
2011-05-26 15:33:30 +02:00
if ( ! host - > card_present ) {
2011-05-05 16:20:48 +00:00
/* See if we also get DMA */
sh_mmcif_request_dma ( host , host - > pd - > dev . platform_data ) ;
2011-05-26 15:33:30 +02:00
host - > card_present = true ;
2011-05-05 16:20:48 +00:00
}
2012-04-20 18:27:13 +02:00
sh_mmcif_set_power ( host , ios ) ;
2011-02-25 16:58:38 +01:00
} else if ( ios - > power_mode = = MMC_POWER_OFF | | ! ios - > clock ) {
2010-05-26 14:41:59 -07:00
/* clock stop */
sh_mmcif_clock_control ( host , 0 ) ;
2011-05-05 16:20:48 +00:00
if ( ios - > power_mode = = MMC_POWER_OFF ) {
2011-05-26 15:33:30 +02:00
if ( host - > card_present ) {
2011-05-05 16:20:48 +00:00
sh_mmcif_release_dma ( host ) ;
2011-05-26 15:33:30 +02:00
host - > card_present = false ;
2011-05-05 16:20:48 +00:00
}
2011-05-26 15:33:30 +02:00
}
if ( host - > power ) {
2012-12-12 15:38:06 +01:00
pm_runtime_put_sync ( & host - > pd - > dev ) ;
2012-04-19 18:02:05 +02:00
clk_disable ( host - > hclk ) ;
2011-05-26 15:33:30 +02:00
host - > power = false ;
2012-04-20 18:27:13 +02:00
if ( ios - > power_mode = = MMC_POWER_OFF )
sh_mmcif_set_power ( host , ios ) ;
2011-05-05 16:20:48 +00:00
}
2011-04-15 18:30:47 +00:00
host - > state = STATE_IDLE ;
2010-05-26 14:41:59 -07:00
return ;
}
2011-05-26 15:33:30 +02:00
if ( ios - > clock ) {
if ( ! host - > power ) {
2012-04-19 18:02:50 +02:00
sh_mmcif_clk_update ( host ) ;
2011-05-26 15:33:30 +02:00
pm_runtime_get_sync ( & host - > pd - > dev ) ;
host - > power = true ;
sh_mmcif_sync_reset ( host ) ;
}
2010-05-26 14:41:59 -07:00
sh_mmcif_clock_control ( host , ios - > clock ) ;
2011-05-26 15:33:30 +02:00
}
2010-05-26 14:41:59 -07:00
2012-12-12 15:38:08 +01:00
host - > timing = ios - > timing ;
2010-05-26 14:41:59 -07:00
host - > bus_width = ios - > bus_width ;
2011-04-15 18:30:47 +00:00
host - > state = STATE_IDLE ;
2010-05-26 14:41:59 -07:00
}
2010-08-24 17:27:01 +02:00
static int sh_mmcif_get_cd ( struct mmc_host * mmc )
{
struct sh_mmcif_host * host = mmc_priv ( mmc ) ;
struct sh_mmcif_plat_data * p = host - > pd - > dev . platform_data ;
2012-06-14 14:24:35 +02:00
int ret = mmc_gpio_get_cd ( mmc ) ;
if ( ret > = 0 )
return ret ;
2010-08-24 17:27:01 +02:00
2012-05-01 18:18:16 +02:00
if ( ! p | | ! p - > get_cd )
2010-08-24 17:27:01 +02:00
return - ENOSYS ;
else
return p - > get_cd ( host - > pd ) ;
}
2010-05-26 14:41:59 -07:00
static struct mmc_host_ops sh_mmcif_ops = {
. request = sh_mmcif_request ,
. set_ios = sh_mmcif_set_ios ,
2010-08-24 17:27:01 +02:00
. get_cd = sh_mmcif_get_cd ,
2010-05-26 14:41:59 -07:00
} ;
2011-12-25 21:07:52 +01:00
static bool sh_mmcif_end_cmd ( struct sh_mmcif_host * host )
{
struct mmc_command * cmd = host - > mrq - > cmd ;
2011-12-26 12:52:13 -05:00
struct mmc_data * data = host - > mrq - > data ;
2011-12-25 21:07:52 +01:00
long time ;
if ( host - > sd_error ) {
switch ( cmd - > opcode ) {
case MMC_ALL_SEND_CID :
case MMC_SELECT_CARD :
case MMC_APP_CMD :
cmd - > error = - ETIMEDOUT ;
break ;
default :
cmd - > error = sh_mmcif_error_manage ( host ) ;
break ;
}
2012-12-12 15:38:18 +01:00
dev_dbg ( & host - > pd - > dev , " CMD%d error %d \n " ,
cmd - > opcode , cmd - > error ) ;
2012-12-12 15:38:15 +01:00
host - > sd_error = false ;
2011-12-25 21:07:52 +01:00
return false ;
}
if ( ! ( cmd - > flags & MMC_RSP_PRESENT ) ) {
cmd - > error = 0 ;
return false ;
}
sh_mmcif_get_response ( host , cmd ) ;
2011-12-26 12:52:13 -05:00
if ( ! data )
2011-12-25 21:07:52 +01:00
return false ;
2012-12-12 15:38:16 +01:00
/*
* Completion can be signalled from DMA callback and error , so , have to
* reset here , before setting . dma_active
*/
init_completion ( & host - > dma_complete ) ;
2011-12-26 12:52:13 -05:00
if ( data - > flags & MMC_DATA_READ ) {
2011-12-25 21:07:52 +01:00
if ( host - > chan_rx )
sh_mmcif_start_dma_rx ( host ) ;
} else {
if ( host - > chan_tx )
sh_mmcif_start_dma_tx ( host ) ;
}
if ( ! host - > dma_active ) {
2011-12-26 12:52:13 -05:00
data - > error = sh_mmcif_data_trans ( host , host - > mrq , cmd - > opcode ) ;
2012-12-12 15:38:13 +01:00
return ! data - > error ;
2011-12-25 21:07:52 +01:00
}
/* Running in the IRQ thread, can sleep */
time = wait_for_completion_interruptible_timeout ( & host - > dma_complete ,
host - > timeout ) ;
2012-12-12 15:38:12 +01:00
if ( data - > flags & MMC_DATA_READ )
dma_unmap_sg ( host - > chan_rx - > device - > dev ,
data - > sg , data - > sg_len ,
DMA_FROM_DEVICE ) ;
else
dma_unmap_sg ( host - > chan_tx - > device - > dev ,
data - > sg , data - > sg_len ,
DMA_TO_DEVICE ) ;
2011-12-25 21:07:52 +01:00
if ( host - > sd_error ) {
dev_err ( host - > mmc - > parent ,
" Error IRQ while waiting for DMA completion! \n " ) ;
/* Woken up by an error IRQ: abort DMA */
2011-12-26 12:52:13 -05:00
data - > error = sh_mmcif_error_manage ( host ) ;
2011-12-25 21:07:52 +01:00
} else if ( ! time ) {
2012-12-12 15:38:18 +01:00
dev_err ( host - > mmc - > parent , " DMA timeout! \n " ) ;
2011-12-26 12:52:13 -05:00
data - > error = - ETIMEDOUT ;
2011-12-25 21:07:52 +01:00
} else if ( time < 0 ) {
2012-12-12 15:38:18 +01:00
dev_err ( host - > mmc - > parent ,
" wait_for_completion_...() error %ld! \n " , time ) ;
2011-12-26 12:52:13 -05:00
data - > error = time ;
2011-12-25 21:07:52 +01:00
}
sh_mmcif_bitclr ( host , MMCIF_CE_BUF_ACC ,
BUF_ACC_DMAREN | BUF_ACC_DMAWEN ) ;
host - > dma_active = false ;
2012-12-12 15:38:12 +01:00
if ( data - > error ) {
2011-12-26 12:52:13 -05:00
data - > bytes_xfered = 0 ;
2012-12-12 15:38:12 +01:00
/* Abort DMA */
if ( data - > flags & MMC_DATA_READ )
dmaengine_terminate_all ( host - > chan_rx ) ;
else
dmaengine_terminate_all ( host - > chan_tx ) ;
}
2011-12-25 21:07:52 +01:00
return false ;
}
static irqreturn_t sh_mmcif_irqt ( int irq , void * dev_id )
{
struct sh_mmcif_host * host = dev_id ;
2012-12-12 15:38:14 +01:00
struct mmc_request * mrq ;
2012-12-12 15:38:11 +01:00
bool wait = false ;
2011-12-25 21:07:52 +01:00
cancel_delayed_work_sync ( & host - > timeout_work ) ;
2012-12-12 15:38:14 +01:00
mutex_lock ( & host - > thread_lock ) ;
mrq = host - > mrq ;
if ( ! mrq ) {
dev_dbg ( & host - > pd - > dev , " IRQ thread state %u, wait %u: NULL mrq! \n " ,
host - > state , host - > wait_for ) ;
mutex_unlock ( & host - > thread_lock ) ;
return IRQ_HANDLED ;
}
2011-12-25 21:07:52 +01:00
/*
* All handlers return true , if processing continues , and false , if the
* request has to be completed - successfully or not
*/
switch ( host - > wait_for ) {
case MMCIF_WAIT_FOR_REQUEST :
/* We're too late, the timeout has already kicked in */
2012-12-12 15:38:14 +01:00
mutex_unlock ( & host - > thread_lock ) ;
2011-12-25 21:07:52 +01:00
return IRQ_HANDLED ;
case MMCIF_WAIT_FOR_CMD :
2012-12-12 15:38:11 +01:00
/* Wait for data? */
wait = sh_mmcif_end_cmd ( host ) ;
2011-12-25 21:07:52 +01:00
break ;
case MMCIF_WAIT_FOR_MREAD :
2012-12-12 15:38:11 +01:00
/* Wait for more data? */
wait = sh_mmcif_mread_block ( host ) ;
2011-12-25 21:07:52 +01:00
break ;
case MMCIF_WAIT_FOR_READ :
2012-12-12 15:38:11 +01:00
/* Wait for data end? */
wait = sh_mmcif_read_block ( host ) ;
2011-12-25 21:07:52 +01:00
break ;
case MMCIF_WAIT_FOR_MWRITE :
2012-12-12 15:38:11 +01:00
/* Wait data to write? */
wait = sh_mmcif_mwrite_block ( host ) ;
2011-12-25 21:07:52 +01:00
break ;
case MMCIF_WAIT_FOR_WRITE :
2012-12-12 15:38:11 +01:00
/* Wait for data end? */
wait = sh_mmcif_write_block ( host ) ;
2011-12-25 21:07:52 +01:00
break ;
case MMCIF_WAIT_FOR_STOP :
if ( host - > sd_error ) {
mrq - > stop - > error = sh_mmcif_error_manage ( host ) ;
2012-12-12 15:38:18 +01:00
dev_dbg ( & host - > pd - > dev , " %s(): %d \n " , __func__ , mrq - > stop - > error ) ;
2011-12-25 21:07:52 +01:00
break ;
}
sh_mmcif_get_cmd12response ( host , mrq - > stop ) ;
mrq - > stop - > error = 0 ;
break ;
case MMCIF_WAIT_FOR_READ_END :
case MMCIF_WAIT_FOR_WRITE_END :
2012-12-12 15:38:18 +01:00
if ( host - > sd_error ) {
2012-08-22 06:49:47 +00:00
mrq - > data - > error = sh_mmcif_error_manage ( host ) ;
2012-12-12 15:38:18 +01:00
dev_dbg ( & host - > pd - > dev , " %s(): %d \n " , __func__ , mrq - > data - > error ) ;
}
2011-12-25 21:07:52 +01:00
break ;
default :
BUG ( ) ;
}
2012-12-12 15:38:11 +01:00
if ( wait ) {
schedule_delayed_work ( & host - > timeout_work , host - > timeout ) ;
/* Wait for more data */
2012-12-12 15:38:14 +01:00
mutex_unlock ( & host - > thread_lock ) ;
2012-12-12 15:38:11 +01:00
return IRQ_HANDLED ;
}
2011-12-25 21:07:52 +01:00
if ( host - > wait_for ! = MMCIF_WAIT_FOR_STOP ) {
2012-08-22 06:49:47 +00:00
struct mmc_data * data = mrq - > data ;
2011-12-26 12:52:13 -05:00
if ( ! mrq - > cmd - > error & & data & & ! data - > error )
data - > bytes_xfered =
data - > blocks * data - > blksz ;
2011-12-25 21:07:52 +01:00
2011-12-26 12:52:13 -05:00
if ( mrq - > stop & & ! mrq - > cmd - > error & & ( ! data | | ! data - > error ) ) {
2011-12-25 21:07:52 +01:00
sh_mmcif_stop_cmd ( host , mrq ) ;
2012-12-12 15:38:11 +01:00
if ( ! mrq - > stop - > error ) {
schedule_delayed_work ( & host - > timeout_work , host - > timeout ) ;
2012-12-12 15:38:14 +01:00
mutex_unlock ( & host - > thread_lock ) ;
2011-12-25 21:07:52 +01:00
return IRQ_HANDLED ;
2012-12-12 15:38:11 +01:00
}
2011-12-25 21:07:52 +01:00
}
}
host - > wait_for = MMCIF_WAIT_FOR_REQUEST ;
host - > state = STATE_IDLE ;
2011-12-26 12:52:13 -05:00
host - > mrq = NULL ;
2011-12-25 21:07:52 +01:00
mmc_request_done ( host - > mmc , mrq ) ;
2012-12-12 15:38:14 +01:00
mutex_unlock ( & host - > thread_lock ) ;
2011-12-25 21:07:52 +01:00
return IRQ_HANDLED ;
}
2010-05-26 14:41:59 -07:00
static irqreturn_t sh_mmcif_intr ( int irq , void * dev_id )
{
struct sh_mmcif_host * host = dev_id ;
2010-11-24 10:05:12 +00:00
u32 state ;
2010-05-26 14:41:59 -07:00
2010-05-18 14:42:51 +00:00
state = sh_mmcif_readl ( host - > addr , MMCIF_CE_INT ) ;
2013-05-15 07:50:51 +02:00
sh_mmcif_writel ( host - > addr , MMCIF_CE_INT ,
~ ( state & sh_mmcif_readl ( host - > addr , MMCIF_CE_INT_MASK ) ) ) ;
2012-12-12 15:45:14 +01:00
sh_mmcif_bitclr ( host , MMCIF_CE_INT_MASK , state & MASK_CLEAN ) ;
2010-05-26 14:41:59 -07:00
2012-12-12 15:45:14 +01:00
if ( state & ~ MASK_CLEAN )
dev_dbg ( & host - > pd - > dev , " IRQ state = 0x%08x incompletely cleared \n " ,
state ) ;
if ( state & INT_ERR_STS | | state & ~ INT_ALL ) {
2010-11-24 10:05:12 +00:00
host - > sd_error = true ;
2012-12-12 15:45:14 +01:00
dev_dbg ( & host - > pd - > dev , " int err state = 0x%08x \n " , state ) ;
2010-05-26 14:41:59 -07:00
}
2011-12-25 21:07:52 +01:00
if ( state & ~ ( INT_CMD12RBE | INT_CMD12CRE ) ) {
2012-12-12 15:45:14 +01:00
if ( ! host - > mrq )
dev_dbg ( & host - > pd - > dev , " NULL IRQ state = 0x%08x \n " , state ) ;
2011-12-25 21:07:52 +01:00
if ( ! host - > dma_active )
return IRQ_WAKE_THREAD ;
else if ( host - > sd_error )
mmcif_dma_complete ( host ) ;
} else {
2010-11-24 10:05:12 +00:00
dev_dbg ( & host - > pd - > dev , " Unexpected IRQ 0x%x \n " , state ) ;
2011-12-25 21:07:52 +01:00
}
2010-05-26 14:41:59 -07:00
return IRQ_HANDLED ;
}
2011-12-25 21:07:52 +01:00
static void mmcif_timeout_work ( struct work_struct * work )
{
struct delayed_work * d = container_of ( work , struct delayed_work , work ) ;
struct sh_mmcif_host * host = container_of ( d , struct sh_mmcif_host , timeout_work ) ;
struct mmc_request * mrq = host - > mrq ;
2012-12-12 15:38:14 +01:00
unsigned long flags ;
2011-12-25 21:07:52 +01:00
if ( host - > dying )
/* Don't run after mmc_remove_host() */
return ;
2012-12-12 15:38:18 +01:00
dev_err ( & host - > pd - > dev , " Timeout waiting for %u on CMD%u \n " ,
2012-12-12 15:38:14 +01:00
host - > wait_for , mrq - > cmd - > opcode ) ;
spin_lock_irqsave ( & host - > lock , flags ) ;
if ( host - > state = = STATE_IDLE ) {
spin_unlock_irqrestore ( & host - > lock , flags ) ;
return ;
}
host - > state = STATE_TIMEOUT ;
spin_unlock_irqrestore ( & host - > lock , flags ) ;
2011-12-25 21:07:52 +01:00
/*
* Handle races with cancel_delayed_work ( ) , unless
* cancel_delayed_work_sync ( ) is used
*/
switch ( host - > wait_for ) {
case MMCIF_WAIT_FOR_CMD :
mrq - > cmd - > error = sh_mmcif_error_manage ( host ) ;
break ;
case MMCIF_WAIT_FOR_STOP :
mrq - > stop - > error = sh_mmcif_error_manage ( host ) ;
break ;
case MMCIF_WAIT_FOR_MREAD :
case MMCIF_WAIT_FOR_MWRITE :
case MMCIF_WAIT_FOR_READ :
case MMCIF_WAIT_FOR_WRITE :
case MMCIF_WAIT_FOR_READ_END :
case MMCIF_WAIT_FOR_WRITE_END :
2011-12-26 12:52:13 -05:00
mrq - > data - > error = sh_mmcif_error_manage ( host ) ;
2011-12-25 21:07:52 +01:00
break ;
default :
BUG ( ) ;
}
host - > state = STATE_IDLE ;
host - > wait_for = MMCIF_WAIT_FOR_REQUEST ;
host - > mrq = NULL ;
mmc_request_done ( host - > mmc , mrq ) ;
}
2012-04-20 18:27:13 +02:00
static void sh_mmcif_init_ocr ( struct sh_mmcif_host * host )
{
struct sh_mmcif_plat_data * pd = host - > pd - > dev . platform_data ;
struct mmc_host * mmc = host - > mmc ;
mmc_regulator_get_supply ( mmc ) ;
2012-05-01 18:18:16 +02:00
if ( ! pd )
return ;
2012-04-20 18:27:13 +02:00
if ( ! mmc - > ocr_avail )
mmc - > ocr_avail = pd - > ocr ;
else if ( pd - > ocr )
dev_warn ( mmc_dev ( mmc ) , " Platform OCR mask is ignored \n " ) ;
}
2012-11-19 13:23:06 -05:00
static int sh_mmcif_probe ( struct platform_device * pdev )
2010-05-26 14:41:59 -07:00
{
int ret = 0 , irq [ 2 ] ;
struct mmc_host * mmc ;
2010-11-24 10:05:18 +00:00
struct sh_mmcif_host * host ;
2012-04-19 16:15:52 +02:00
struct sh_mmcif_plat_data * pd = pdev - > dev . platform_data ;
2010-05-26 14:41:59 -07:00
struct resource * res ;
void __iomem * reg ;
2013-01-14 14:12:36 -05:00
const char * name ;
2010-05-26 14:41:59 -07:00
irq [ 0 ] = platform_get_irq ( pdev , 0 ) ;
irq [ 1 ] = platform_get_irq ( pdev , 1 ) ;
2013-01-14 14:12:36 -05:00
if ( irq [ 0 ] < 0 ) {
2010-11-24 10:05:18 +00:00
dev_err ( & pdev - > dev , " Get irq error \n " ) ;
2010-05-26 14:41:59 -07:00
return - ENXIO ;
}
res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
if ( ! res ) {
dev_err ( & pdev - > dev , " platform_get_resource error. \n " ) ;
return - ENXIO ;
}
reg = ioremap ( res - > start , resource_size ( res ) ) ;
if ( ! reg ) {
dev_err ( & pdev - > dev , " ioremap error. \n " ) ;
return - ENOMEM ;
}
2012-04-19 16:15:52 +02:00
2010-05-26 14:41:59 -07:00
mmc = mmc_alloc_host ( sizeof ( struct sh_mmcif_host ) , & pdev - > dev ) ;
if ( ! mmc ) {
ret = - ENOMEM ;
2012-04-19 16:15:52 +02:00
goto ealloch ;
2010-05-26 14:41:59 -07:00
}
2013-06-09 22:14:12 +02:00
ret = mmc_of_parse ( mmc ) ;
if ( ret < 0 )
goto eofparse ;
2010-05-26 14:41:59 -07:00
host = mmc_priv ( mmc ) ;
host - > mmc = mmc ;
host - > addr = reg ;
2012-12-12 15:38:09 +01:00
host - > timeout = msecs_to_jiffies ( 1000 ) ;
2010-05-26 14:41:59 -07:00
host - > pd = pdev ;
2011-04-15 18:30:47 +00:00
spin_lock_init ( & host - > lock ) ;
2010-05-26 14:41:59 -07:00
mmc - > ops = & sh_mmcif_ops ;
2012-04-20 18:27:13 +02:00
sh_mmcif_init_ocr ( host ) ;
2013-02-15 16:13:54 +01:00
mmc - > caps | = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY ;
2012-05-01 18:18:16 +02:00
if ( pd & & pd - > caps )
2010-05-26 14:41:59 -07:00
mmc - > caps | = pd - > caps ;
2010-11-24 10:05:22 +00:00
mmc - > max_segs = 32 ;
2010-05-26 14:41:59 -07:00
mmc - > max_blk_size = 512 ;
2010-11-24 10:05:22 +00:00
mmc - > max_req_size = PAGE_CACHE_SIZE * mmc - > max_segs ;
mmc - > max_blk_count = mmc - > max_req_size / mmc - > max_blk_size ;
2010-05-26 14:41:59 -07:00
mmc - > max_seg_size = mmc - > max_req_size ;
platform_set_drvdata ( pdev , host ) ;
2010-11-24 10:05:22 +00:00
2011-05-05 16:20:48 +00:00
pm_runtime_enable ( & pdev - > dev ) ;
host - > power = false ;
2012-11-28 10:24:27 +01:00
host - > hclk = clk_get ( & pdev - > dev , NULL ) ;
2012-04-19 18:02:05 +02:00
if ( IS_ERR ( host - > hclk ) ) {
ret = PTR_ERR ( host - > hclk ) ;
2012-11-28 10:24:27 +01:00
dev_err ( & pdev - > dev , " cannot get clock: %d \n " , ret ) ;
2012-04-19 18:02:05 +02:00
goto eclkget ;
}
2012-04-19 18:02:50 +02:00
ret = sh_mmcif_clk_update ( host ) ;
if ( ret < 0 )
goto eclkupdate ;
2012-04-19 18:02:05 +02:00
2011-05-05 16:20:48 +00:00
ret = pm_runtime_resume ( & pdev - > dev ) ;
if ( ret < 0 )
2012-04-19 16:15:52 +02:00
goto eresume ;
2010-11-24 10:05:22 +00:00
2012-01-21 00:41:28 +01:00
INIT_DELAYED_WORK ( & host - > timeout_work , mmcif_timeout_work ) ;
2010-05-26 14:41:59 -07:00
2012-04-19 18:02:05 +02:00
sh_mmcif_sync_reset ( host ) ;
2011-04-15 18:30:47 +00:00
sh_mmcif_writel ( host - > addr , MMCIF_CE_INT_MASK , MASK_ALL ) ;
2013-01-14 14:12:36 -05:00
name = irq [ 1 ] < 0 ? dev_name ( & pdev - > dev ) : " sh_mmc:error " ;
ret = request_threaded_irq ( irq [ 0 ] , sh_mmcif_intr , sh_mmcif_irqt , 0 , name , host ) ;
2010-05-26 14:41:59 -07:00
if ( ret ) {
2013-01-14 14:12:36 -05:00
dev_err ( & pdev - > dev , " request_irq error (%s) \n " , name ) ;
2012-04-19 16:15:52 +02:00
goto ereqirq0 ;
2010-05-26 14:41:59 -07:00
}
2013-01-14 14:12:36 -05:00
if ( irq [ 1 ] > = 0 ) {
ret = request_threaded_irq ( irq [ 1 ] , sh_mmcif_intr , sh_mmcif_irqt ,
0 , " sh_mmc:int " , host ) ;
if ( ret ) {
dev_err ( & pdev - > dev , " request_irq error (sh_mmc:int) \n " ) ;
goto ereqirq1 ;
}
2010-05-26 14:41:59 -07:00
}
2012-06-14 14:24:35 +02:00
if ( pd & & pd - > use_cd_gpio ) {
2013-08-08 12:38:31 +02:00
ret = mmc_gpio_request_cd ( mmc , pd - > cd_gpio , 0 ) ;
2012-06-14 14:24:35 +02:00
if ( ret < 0 )
goto erqcd ;
}
2012-12-12 15:38:14 +01:00
mutex_init ( & host - > thread_lock ) ;
2012-04-19 18:02:05 +02:00
clk_disable ( host - > hclk ) ;
2012-01-21 00:41:28 +01:00
ret = mmc_add_host ( mmc ) ;
if ( ret < 0 )
2012-04-19 16:15:52 +02:00
goto emmcaddh ;
2010-05-26 14:41:59 -07:00
2012-03-13 01:02:15 +01:00
dev_pm_qos_expose_latency_limit ( & pdev - > dev , 100 ) ;
2010-11-24 10:05:18 +00:00
dev_info ( & pdev - > dev , " driver version %s \n " , DRIVER_VERSION ) ;
dev_dbg ( & pdev - > dev , " chip ver H'%04x \n " ,
2010-05-18 14:42:51 +00:00
sh_mmcif_readl ( host - > addr , MMCIF_CE_VERSION ) & 0x0000ffff ) ;
2010-05-26 14:41:59 -07:00
return ret ;
2012-04-19 16:15:52 +02:00
emmcaddh :
2012-06-14 14:24:35 +02:00
erqcd :
2013-01-14 14:12:36 -05:00
if ( irq [ 1 ] > = 0 )
free_irq ( irq [ 1 ] , host ) ;
2012-04-19 16:15:52 +02:00
ereqirq1 :
2012-01-21 00:41:28 +01:00
free_irq ( irq [ 0 ] , host ) ;
2012-04-19 16:15:52 +02:00
ereqirq0 :
2011-05-05 16:20:48 +00:00
pm_runtime_suspend ( & pdev - > dev ) ;
2012-04-19 16:15:52 +02:00
eresume :
2010-05-26 14:41:59 -07:00
clk_disable ( host - > hclk ) ;
2012-04-19 18:02:50 +02:00
eclkupdate :
2012-04-19 18:02:05 +02:00
clk_put ( host - > hclk ) ;
2012-04-19 16:15:52 +02:00
eclkget :
2012-04-19 18:02:05 +02:00
pm_runtime_disable ( & pdev - > dev ) ;
2013-06-09 22:14:12 +02:00
eofparse :
2010-05-26 14:41:59 -07:00
mmc_free_host ( mmc ) ;
2012-04-19 16:15:52 +02:00
ealloch :
iounmap ( reg ) ;
2010-05-26 14:41:59 -07:00
return ret ;
}
2012-11-19 13:26:03 -05:00
static int sh_mmcif_remove ( struct platform_device * pdev )
2010-05-26 14:41:59 -07:00
{
struct sh_mmcif_host * host = platform_get_drvdata ( pdev ) ;
int irq [ 2 ] ;
2011-12-25 21:07:52 +01:00
host - > dying = true ;
2012-04-19 18:02:05 +02:00
clk_enable ( host - > hclk ) ;
2011-05-05 16:20:48 +00:00
pm_runtime_get_sync ( & pdev - > dev ) ;
2010-05-26 14:41:59 -07:00
2012-03-13 01:02:15 +01:00
dev_pm_qos_hide_latency_limit ( & pdev - > dev ) ;
2011-05-05 16:20:48 +00:00
mmc_remove_host ( host - > mmc ) ;
2011-04-15 18:30:47 +00:00
sh_mmcif_writel ( host - > addr , MMCIF_CE_INT_MASK , MASK_ALL ) ;
2011-12-25 21:07:52 +01:00
/*
* FIXME : cancel_delayed_work ( _sync ) ( ) and free_irq ( ) race with the
* mmc_remove_host ( ) call above . But swapping order doesn ' t help either
* ( a query on the linux - mmc mailing list didn ' t bring any replies ) .
*/
cancel_delayed_work_sync ( & host - > timeout_work ) ;
2010-05-26 14:41:59 -07:00
if ( host - > addr )
iounmap ( host - > addr ) ;
2010-11-24 10:05:12 +00:00
irq [ 0 ] = platform_get_irq ( pdev , 0 ) ;
irq [ 1 ] = platform_get_irq ( pdev , 1 ) ;
2010-05-26 14:41:59 -07:00
free_irq ( irq [ 0 ] , host ) ;
2013-01-14 14:12:36 -05:00
if ( irq [ 1 ] > = 0 )
free_irq ( irq [ 1 ] , host ) ;
2010-05-26 14:41:59 -07:00
2012-10-23 14:08:52 +02:00
clk_disable ( host - > hclk ) ;
2010-05-26 14:41:59 -07:00
mmc_free_host ( host - > mmc ) ;
2011-05-05 16:20:48 +00:00
pm_runtime_put_sync ( & pdev - > dev ) ;
pm_runtime_disable ( & pdev - > dev ) ;
2010-05-26 14:41:59 -07:00
return 0 ;
}
2011-05-05 16:20:48 +00:00
# ifdef CONFIG_PM
static int sh_mmcif_suspend ( struct device * dev )
{
2012-04-19 18:02:05 +02:00
struct sh_mmcif_host * host = dev_get_drvdata ( dev ) ;
2011-05-05 16:20:48 +00:00
int ret = mmc_suspend_host ( host - > mmc ) ;
2012-04-19 18:02:05 +02:00
if ( ! ret )
2011-05-05 16:20:48 +00:00
sh_mmcif_writel ( host - > addr , MMCIF_CE_INT_MASK , MASK_ALL ) ;
return ret ;
}
static int sh_mmcif_resume ( struct device * dev )
{
2012-04-19 18:02:05 +02:00
struct sh_mmcif_host * host = dev_get_drvdata ( dev ) ;
2011-05-05 16:20:48 +00:00
return mmc_resume_host ( host - > mmc ) ;
}
# else
# define sh_mmcif_suspend NULL
# define sh_mmcif_resume NULL
# endif /* CONFIG_PM */
2012-05-01 18:18:16 +02:00
static const struct of_device_id mmcif_of_match [ ] = {
{ . compatible = " renesas,sh-mmcif " } ,
{ }
} ;
MODULE_DEVICE_TABLE ( of , mmcif_of_match ) ;
2011-05-05 16:20:48 +00:00
static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
. suspend = sh_mmcif_suspend ,
. resume = sh_mmcif_resume ,
} ;
2010-05-26 14:41:59 -07:00
static struct platform_driver sh_mmcif_driver = {
. probe = sh_mmcif_probe ,
. remove = sh_mmcif_remove ,
. driver = {
. name = DRIVER_NAME ,
2011-05-05 16:20:48 +00:00
. pm = & sh_mmcif_dev_pm_ops ,
2012-05-01 18:18:16 +02:00
. owner = THIS_MODULE ,
. of_match_table = mmcif_of_match ,
2010-05-26 14:41:59 -07:00
} ,
} ;
2011-11-26 12:55:43 +08:00
module_platform_driver ( sh_mmcif_driver ) ;
2010-05-26 14:41:59 -07:00
MODULE_DESCRIPTION ( " SuperH on-chip MMC/eMMC interface driver " ) ;
MODULE_LICENSE ( " GPL " ) ;
2010-11-24 10:05:12 +00:00
MODULE_ALIAS ( " platform: " DRIVER_NAME ) ;
2010-05-26 14:41:59 -07:00
MODULE_AUTHOR ( " Yusuke Goda <yusuke.goda.sx@renesas.com> " ) ;