2011-01-02 09:11:59 +03:00
/*
* Synopsys DesignWare Multimedia Card Interface driver
* ( Based on NXP driver for lpc 31 xx )
*
* Copyright ( C ) 2009 NXP Semiconductors
* Copyright ( C ) 2009 , 2010 Imagination Technologies Ltd .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*/
# include <linux/blkdev.h>
# include <linux/clk.h>
# include <linux/debugfs.h>
# include <linux/device.h>
# include <linux/dma-mapping.h>
# include <linux/err.h>
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/ioport.h>
# include <linux/module.h>
# include <linux/platform_device.h>
# include <linux/seq_file.h>
# include <linux/slab.h>
# include <linux/stat.h>
# include <linux/delay.h>
# include <linux/irq.h>
2014-12-03 02:42:46 +03:00
# include <linux/mmc/card.h>
2011-01-02 09:11:59 +03:00
# include <linux/mmc/host.h>
# include <linux/mmc/mmc.h>
2014-08-22 17:47:51 +04:00
# include <linux/mmc/sd.h>
2013-08-30 19:14:05 +04:00
# include <linux/mmc/sdio.h>
2011-01-02 09:11:59 +03:00
# include <linux/mmc/dw_mmc.h>
# include <linux/bitops.h>
2011-02-25 05:08:14 +03:00
# include <linux/regulator/consumer.h>
2012-09-17 22:16:40 +04:00
# include <linux/of.h>
2013-01-11 21:03:53 +04:00
# include <linux/of_gpio.h>
2014-01-09 18:35:10 +04:00
# include <linux/mmc/slot-gpio.h>
2011-01-02 09:11:59 +03:00
# include "dw_mmc.h"
/* Common flag combinations */
2013-05-27 08:47:57 +04:00
# define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
2011-01-02 09:11:59 +03:00
SDMMC_INT_HTO | SDMMC_INT_SBE | \
SDMMC_INT_EBE )
# define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
SDMMC_INT_RESP_ERR )
# define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE )
# define DW_MCI_SEND_STATUS 1
# define DW_MCI_RECV_STATUS 2
# define DW_MCI_DMA_THRESHOLD 16
2013-08-30 19:13:31 +04:00
# define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
# define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
2011-01-02 09:11:59 +03:00
# ifdef CONFIG_MMC_DW_IDMAC
2013-04-26 10:35:22 +04:00
# define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
SDMMC_IDMAC_INT_TI )
2014-10-20 11:12:33 +04:00
struct idmac_desc_64addr {
u32 des0 ; /* Control Descriptor */
u32 des1 ; /* Reserved */
u32 des2 ; /*Buffer sizes */
# define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
2015-03-25 14:27:51 +03:00
( ( d ) - > des2 = ( ( d ) - > des2 & cpu_to_le32 ( 0x03ffe000 ) ) | \
( ( cpu_to_le32 ( s ) ) & cpu_to_le32 ( 0x1fff ) ) )
2014-10-20 11:12:33 +04:00
u32 des3 ; /* Reserved */
u32 des4 ; /* Lower 32-bits of Buffer Address Pointer 1*/
u32 des5 ; /* Upper 32-bits of Buffer Address Pointer 1*/
u32 des6 ; /* Lower 32-bits of Next Descriptor Address */
u32 des7 ; /* Upper 32-bits of Next Descriptor Address */
} ;
2011-01-02 09:11:59 +03:00
struct idmac_desc {
2015-03-25 14:27:51 +03:00
__le32 des0 ; /* Control Descriptor */
2011-01-02 09:11:59 +03:00
# define IDMAC_DES0_DIC BIT(1)
# define IDMAC_DES0_LD BIT(2)
# define IDMAC_DES0_FD BIT(3)
# define IDMAC_DES0_CH BIT(4)
# define IDMAC_DES0_ER BIT(5)
# define IDMAC_DES0_CES BIT(30)
# define IDMAC_DES0_OWN BIT(31)
2015-03-25 14:27:51 +03:00
__le32 des1 ; /* Buffer sizes */
2011-01-02 09:11:59 +03:00
# define IDMAC_SET_BUFFER1_SIZE(d, s) \
2011-07-29 16:49:50 +04:00
( ( d ) - > des1 = ( ( d ) - > des1 & 0x03ffe000 ) | ( ( s ) & 0x1fff ) )
2011-01-02 09:11:59 +03:00
2015-03-25 14:27:51 +03:00
__le32 des2 ; /* buffer 1 physical address */
2011-01-02 09:11:59 +03:00
2015-03-25 14:27:51 +03:00
__le32 des3 ; /* buffer 2 physical address */
2011-01-02 09:11:59 +03:00
} ;
2015-06-25 11:25:07 +03:00
/* Each descriptor can transfer up to 4KB of data in chained mode */
# define DW_MCI_DESC_DATA_LENGTH 0x1000
2011-01-02 09:11:59 +03:00
# endif /* CONFIG_MMC_DW_IDMAC */
2014-08-05 05:19:50 +04:00
static bool dw_mci_reset ( struct dw_mci * host ) ;
2014-10-16 20:58:05 +04:00
static bool dw_mci_ctrl_reset ( struct dw_mci * host , u32 reset ) ;
2015-02-20 23:31:56 +03:00
static int dw_mci_card_busy ( struct mmc_host * mmc ) ;
2013-08-30 19:14:23 +04:00
2011-01-02 09:11:59 +03:00
# if defined(CONFIG_DEBUG_FS)
static int dw_mci_req_show ( struct seq_file * s , void * v )
{
struct dw_mci_slot * slot = s - > private ;
struct mmc_request * mrq ;
struct mmc_command * cmd ;
struct mmc_command * stop ;
struct mmc_data * data ;
/* Make sure we get a consistent snapshot */
spin_lock_bh ( & slot - > host - > lock ) ;
mrq = slot - > mrq ;
if ( mrq ) {
cmd = mrq - > cmd ;
data = mrq - > data ;
stop = mrq - > stop ;
if ( cmd )
seq_printf ( s ,
" CMD%u(0x%x) flg %x rsp %x %x %x %x err %d \n " ,
cmd - > opcode , cmd - > arg , cmd - > flags ,
cmd - > resp [ 0 ] , cmd - > resp [ 1 ] , cmd - > resp [ 2 ] ,
cmd - > resp [ 2 ] , cmd - > error ) ;
if ( data )
seq_printf ( s , " DATA %u / %u * %u flg %x err %d \n " ,
data - > bytes_xfered , data - > blocks ,
data - > blksz , data - > flags , data - > error ) ;
if ( stop )
seq_printf ( s ,
" CMD%u(0x%x) flg %x rsp %x %x %x %x err %d \n " ,
stop - > opcode , stop - > arg , stop - > flags ,
stop - > resp [ 0 ] , stop - > resp [ 1 ] , stop - > resp [ 2 ] ,
stop - > resp [ 2 ] , stop - > error ) ;
}
spin_unlock_bh ( & slot - > host - > lock ) ;
return 0 ;
}
static int dw_mci_req_open ( struct inode * inode , struct file * file )
{
return single_open ( file , dw_mci_req_show , inode - > i_private ) ;
}
static const struct file_operations dw_mci_req_fops = {
. owner = THIS_MODULE ,
. open = dw_mci_req_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = single_release ,
} ;
static int dw_mci_regs_show ( struct seq_file * s , void * v )
{
seq_printf ( s , " STATUS: \t 0x%08x \n " , SDMMC_STATUS ) ;
seq_printf ( s , " RINTSTS: \t 0x%08x \n " , SDMMC_RINTSTS ) ;
seq_printf ( s , " CMD: \t 0x%08x \n " , SDMMC_CMD ) ;
seq_printf ( s , " CTRL: \t 0x%08x \n " , SDMMC_CTRL ) ;
seq_printf ( s , " INTMASK: \t 0x%08x \n " , SDMMC_INTMASK ) ;
seq_printf ( s , " CLKENA: \t 0x%08x \n " , SDMMC_CLKENA ) ;
return 0 ;
}
static int dw_mci_regs_open ( struct inode * inode , struct file * file )
{
return single_open ( file , dw_mci_regs_show , inode - > i_private ) ;
}
static const struct file_operations dw_mci_regs_fops = {
. owner = THIS_MODULE ,
. open = dw_mci_regs_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = single_release ,
} ;
static void dw_mci_init_debugfs ( struct dw_mci_slot * slot )
{
struct mmc_host * mmc = slot - > mmc ;
struct dw_mci * host = slot - > host ;
struct dentry * root ;
struct dentry * node ;
root = mmc - > debugfs_root ;
if ( ! root )
return ;
node = debugfs_create_file ( " regs " , S_IRUSR , root , host ,
& dw_mci_regs_fops ) ;
if ( ! node )
goto err ;
node = debugfs_create_file ( " req " , S_IRUSR , root , slot ,
& dw_mci_req_fops ) ;
if ( ! node )
goto err ;
node = debugfs_create_u32 ( " state " , S_IRUSR , root , ( u32 * ) & host - > state ) ;
if ( ! node )
goto err ;
node = debugfs_create_x32 ( " pending_events " , S_IRUSR , root ,
( u32 * ) & host - > pending_events ) ;
if ( ! node )
goto err ;
node = debugfs_create_x32 ( " completed_events " , S_IRUSR , root ,
( u32 * ) & host - > completed_events ) ;
if ( ! node )
goto err ;
return ;
err :
dev_err ( & mmc - > class_dev , " failed to initialize debugfs for slot \n " ) ;
}
# endif /* defined(CONFIG_DEBUG_FS) */
2014-08-22 17:47:51 +04:00
static void mci_send_cmd ( struct dw_mci_slot * slot , u32 cmd , u32 arg ) ;
2011-01-02 09:11:59 +03:00
static u32 dw_mci_prepare_command ( struct mmc_host * mmc , struct mmc_command * cmd )
{
struct mmc_data * data ;
2012-09-17 22:16:42 +04:00
struct dw_mci_slot * slot = mmc_priv ( mmc ) ;
2014-08-22 17:47:51 +04:00
struct dw_mci * host = slot - > host ;
2012-11-08 18:26:11 +04:00
const struct dw_mci_drv_data * drv_data = slot - > host - > drv_data ;
2011-01-02 09:11:59 +03:00
u32 cmdr ;
2015-08-03 10:07:21 +03:00
cmd - > error = - EINPROGRESS ;
2011-01-02 09:11:59 +03:00
cmdr = cmd - > opcode ;
2013-08-30 19:14:05 +04:00
if ( cmd - > opcode = = MMC_STOP_TRANSMISSION | |
cmd - > opcode = = MMC_GO_IDLE_STATE | |
cmd - > opcode = = MMC_GO_INACTIVE_STATE | |
( cmd - > opcode = = SD_IO_RW_DIRECT & &
( ( cmd - > arg > > 9 ) & 0x1FFFF ) = = SDIO_CCCR_ABORT ) )
2011-01-02 09:11:59 +03:00
cmdr | = SDMMC_CMD_STOP ;
2014-03-03 06:36:44 +04:00
else if ( cmd - > opcode ! = MMC_SEND_STATUS & & cmd - > data )
cmdr | = SDMMC_CMD_PRV_DAT_WAIT ;
2011-01-02 09:11:59 +03:00
2014-08-22 17:47:51 +04:00
if ( cmd - > opcode = = SD_SWITCH_VOLTAGE ) {
u32 clk_en_a ;
/* Special bit makes CMD11 not die */
cmdr | = SDMMC_CMD_VOLT_SWITCH ;
/* Change state to continue to handle CMD11 weirdness */
WARN_ON ( slot - > host - > state ! = STATE_SENDING_CMD ) ;
slot - > host - > state = STATE_SENDING_CMD11 ;
/*
* We need to disable low power mode ( automatic clock stop )
* while doing voltage switch so we don ' t confuse the card ,
* since stopping the clock is a specific part of the UHS
* voltage change dance .
*
* Note that low power mode ( SDMMC_CLKEN_LOW_PWR ) will be
* unconditionally turned back on in dw_mci_setup_bus ( ) if it ' s
* ever called with a non - zero clock . That shouldn ' t happen
* until the voltage change is all done .
*/
clk_en_a = mci_readl ( host , CLKENA ) ;
clk_en_a & = ~ ( SDMMC_CLKEN_LOW_PWR < < slot - > id ) ;
mci_writel ( host , CLKENA , clk_en_a ) ;
mci_send_cmd ( slot , SDMMC_CMD_UPD_CLK |
SDMMC_CMD_PRV_DAT_WAIT , 0 ) ;
}
2011-01-02 09:11:59 +03:00
if ( cmd - > flags & MMC_RSP_PRESENT ) {
/* We expect a response, so set this bit */
cmdr | = SDMMC_CMD_RESP_EXP ;
if ( cmd - > flags & MMC_RSP_136 )
cmdr | = SDMMC_CMD_RESP_LONG ;
}
if ( cmd - > flags & MMC_RSP_CRC )
cmdr | = SDMMC_CMD_RESP_CRC ;
data = cmd - > data ;
if ( data ) {
cmdr | = SDMMC_CMD_DAT_EXP ;
if ( data - > flags & MMC_DATA_STREAM )
cmdr | = SDMMC_CMD_STRM_MODE ;
if ( data - > flags & MMC_DATA_WRITE )
cmdr | = SDMMC_CMD_DAT_WR ;
}
2012-10-16 12:43:08 +04:00
if ( drv_data & & drv_data - > prepare_command )
drv_data - > prepare_command ( slot - > host , & cmdr ) ;
2012-09-17 22:16:42 +04:00
2011-01-02 09:11:59 +03:00
return cmdr ;
}
2013-08-30 19:14:05 +04:00
static u32 dw_mci_prep_stop_abort ( struct dw_mci * host , struct mmc_command * cmd )
{
struct mmc_command * stop ;
u32 cmdr ;
if ( ! cmd - > data )
return 0 ;
stop = & host - > stop_abort ;
cmdr = cmd - > opcode ;
memset ( stop , 0 , sizeof ( struct mmc_command ) ) ;
if ( cmdr = = MMC_READ_SINGLE_BLOCK | |
cmdr = = MMC_READ_MULTIPLE_BLOCK | |
cmdr = = MMC_WRITE_BLOCK | |
2014-12-01 18:13:39 +03:00
cmdr = = MMC_WRITE_MULTIPLE_BLOCK | |
cmdr = = MMC_SEND_TUNING_BLOCK | |
cmdr = = MMC_SEND_TUNING_BLOCK_HS200 ) {
2013-08-30 19:14:05 +04:00
stop - > opcode = MMC_STOP_TRANSMISSION ;
stop - > arg = 0 ;
stop - > flags = MMC_RSP_R1B | MMC_CMD_AC ;
} else if ( cmdr = = SD_IO_RW_EXTENDED ) {
stop - > opcode = SD_IO_RW_DIRECT ;
stop - > arg | = ( 1 < < 31 ) | ( 0 < < 28 ) | ( SDIO_CCCR_ABORT < < 9 ) |
( ( cmd - > arg > > 28 ) & 0x7 ) ;
stop - > flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC ;
} else {
return 0 ;
}
cmdr = stop - > opcode | SDMMC_CMD_STOP |
SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP ;
return cmdr ;
}
2015-02-20 23:31:56 +03:00
static void dw_mci_wait_while_busy ( struct dw_mci * host , u32 cmd_flags )
{
unsigned long timeout = jiffies + msecs_to_jiffies ( 500 ) ;
/*
* Databook says that before issuing a new data transfer command
* we need to check to see if the card is busy . Data transfer commands
* all have SDMMC_CMD_PRV_DAT_WAIT set , so we ' ll key off that .
*
* . . . also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
* expected .
*/
if ( ( cmd_flags & SDMMC_CMD_PRV_DAT_WAIT ) & &
! ( cmd_flags & SDMMC_CMD_VOLT_SWITCH ) ) {
while ( mci_readl ( host , STATUS ) & SDMMC_STATUS_BUSY ) {
if ( time_after ( jiffies , timeout ) ) {
/* Command will fail; we'll pass error then */
dev_err ( host - > dev , " Busy; trying anyway \n " ) ;
break ;
}
udelay ( 10 ) ;
}
}
}
2011-01-02 09:11:59 +03:00
static void dw_mci_start_command ( struct dw_mci * host ,
struct mmc_command * cmd , u32 cmd_flags )
{
host - > cmd = cmd ;
2012-09-17 22:16:35 +04:00
dev_vdbg ( host - > dev ,
2011-01-02 09:11:59 +03:00
" start command: ARGR=0x%08x CMDR=0x%08x \n " ,
cmd - > arg , cmd_flags ) ;
mci_writel ( host , CMDARG , cmd - > arg ) ;
2015-08-03 10:07:21 +03:00
wmb ( ) ; /* drain writebuffer */
2015-02-20 23:31:56 +03:00
dw_mci_wait_while_busy ( host , cmd_flags ) ;
2011-01-02 09:11:59 +03:00
mci_writel ( host , CMD , cmd_flags | SDMMC_CMD_START ) ;
}
2013-08-30 19:14:05 +04:00
static inline void send_stop_abort ( struct dw_mci * host , struct mmc_data * data )
2011-01-02 09:11:59 +03:00
{
2013-08-30 19:14:05 +04:00
struct mmc_command * stop = data - > stop ? data - > stop : & host - > stop_abort ;
2015-08-03 10:07:21 +03:00
2013-08-30 19:14:05 +04:00
dw_mci_start_command ( host , stop , host - > stop_cmdr ) ;
2011-01-02 09:11:59 +03:00
}
/* DMA interface functions */
static void dw_mci_stop_dma ( struct dw_mci * host )
{
2011-06-29 12:28:43 +04:00
if ( host - > using_dma ) {
2011-01-02 09:11:59 +03:00
host - > dma_ops - > stop ( host ) ;
host - > dma_ops - > cleanup ( host ) ;
}
2013-08-30 19:14:38 +04:00
/* Data transfer was stopped by the interrupt handler */
set_bit ( EVENT_XFER_COMPLETE , & host - > pending_events ) ;
2011-01-02 09:11:59 +03:00
}
2012-02-06 11:55:07 +04:00
static int dw_mci_get_dma_dir ( struct mmc_data * data )
{
if ( data - > flags & MMC_DATA_WRITE )
return DMA_TO_DEVICE ;
else
return DMA_FROM_DEVICE ;
}
2012-02-16 06:19:38 +04:00
# ifdef CONFIG_MMC_DW_IDMAC
2011-01-02 09:11:59 +03:00
static void dw_mci_dma_cleanup ( struct dw_mci * host )
{
struct mmc_data * data = host - > data ;
if ( data )
2012-02-06 11:55:07 +04:00
if ( ! data - > host_cookie )
2012-09-17 22:16:35 +04:00
dma_unmap_sg ( host - > dev ,
2012-02-06 11:55:07 +04:00
data - > sg ,
data - > sg_len ,
dw_mci_get_dma_dir ( data ) ) ;
2011-01-02 09:11:59 +03:00
}
2013-08-30 19:14:33 +04:00
static void dw_mci_idmac_reset ( struct dw_mci * host )
{
u32 bmod = mci_readl ( host , BMOD ) ;
/* Software reset of DMA */
bmod | = SDMMC_IDMAC_SWRESET ;
mci_writel ( host , BMOD , bmod ) ;
}
2011-01-02 09:11:59 +03:00
static void dw_mci_idmac_stop_dma ( struct dw_mci * host )
{
u32 temp ;
/* Disable and reset the IDMAC interface */
temp = mci_readl ( host , CTRL ) ;
temp & = ~ SDMMC_CTRL_USE_IDMAC ;
temp | = SDMMC_CTRL_DMA_RESET ;
mci_writel ( host , CTRL , temp ) ;
/* Stop the IDMAC running */
temp = mci_readl ( host , BMOD ) ;
2011-02-25 05:08:13 +03:00
temp & = ~ ( SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB ) ;
2013-08-30 19:14:33 +04:00
temp | = SDMMC_IDMAC_SWRESET ;
2011-01-02 09:11:59 +03:00
mci_writel ( host , BMOD , temp ) ;
}
static void dw_mci_idmac_complete_dma ( struct dw_mci * host )
{
struct mmc_data * data = host - > data ;
2012-09-17 22:16:35 +04:00
dev_vdbg ( host - > dev , " DMA complete \n " ) ;
2011-01-02 09:11:59 +03:00
host - > dma_ops - > cleanup ( host ) ;
/*
* If the card was removed , data will be NULL . No point in trying to
* send the stop command or waiting for NBUSY in this case .
*/
if ( data ) {
set_bit ( EVENT_XFER_COMPLETE , & host - > pending_events ) ;
tasklet_schedule ( & host - > tasklet ) ;
}
}
static void dw_mci_translate_sglist ( struct dw_mci * host , struct mmc_data * data ,
unsigned int sg_len )
{
2015-06-25 11:25:07 +03:00
unsigned int desc_len ;
2011-01-02 09:11:59 +03:00
int i ;
2015-08-03 10:07:21 +03:00
2014-10-20 11:12:33 +04:00
if ( host - > dma_64bit_address = = 1 ) {
2015-06-25 11:25:07 +03:00
struct idmac_desc_64addr * desc_first , * desc_last , * desc ;
desc_first = desc_last = desc = host - > sg_cpu ;
2014-10-20 11:12:33 +04:00
2015-06-25 11:25:07 +03:00
for ( i = 0 ; i < sg_len ; i + + ) {
2014-10-20 11:12:33 +04:00
unsigned int length = sg_dma_len ( & data - > sg [ i ] ) ;
2015-08-03 10:07:21 +03:00
2014-10-20 11:12:33 +04:00
u64 mem_addr = sg_dma_address ( & data - > sg [ i ] ) ;
2011-01-02 09:11:59 +03:00
2015-06-25 11:25:07 +03:00
for ( ; length ; desc + + ) {
desc_len = ( length < = DW_MCI_DESC_DATA_LENGTH ) ?
length : DW_MCI_DESC_DATA_LENGTH ;
length - = desc_len ;
/*
* Set the OWN bit and disable interrupts
* for this descriptor
*/
desc - > des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
IDMAC_DES0_CH ;
/* Buffer length */
IDMAC_64ADDR_SET_BUFFER1_SIZE ( desc , desc_len ) ;
/* Physical address to DMA to/from */
desc - > des4 = mem_addr & 0xffffffff ;
desc - > des5 = mem_addr > > 32 ;
/* Update physical address for the next desc */
mem_addr + = desc_len ;
/* Save pointer to the last descriptor */
desc_last = desc ;
}
2014-10-20 11:12:33 +04:00
}
2011-01-02 09:11:59 +03:00
2014-10-20 11:12:33 +04:00
/* Set first descriptor */
2015-06-25 11:25:07 +03:00
desc_first - > des0 | = IDMAC_DES0_FD ;
2011-01-02 09:11:59 +03:00
2014-10-20 11:12:33 +04:00
/* Set last descriptor */
2015-06-25 11:25:07 +03:00
desc_last - > des0 & = ~ ( IDMAC_DES0_CH | IDMAC_DES0_DIC ) ;
desc_last - > des0 | = IDMAC_DES0_LD ;
2011-01-02 09:11:59 +03:00
2014-10-20 11:12:33 +04:00
} else {
2015-06-25 11:25:07 +03:00
struct idmac_desc * desc_first , * desc_last , * desc ;
desc_first = desc_last = desc = host - > sg_cpu ;
2014-10-20 11:12:33 +04:00
2015-06-25 11:25:07 +03:00
for ( i = 0 ; i < sg_len ; i + + ) {
2014-10-20 11:12:33 +04:00
unsigned int length = sg_dma_len ( & data - > sg [ i ] ) ;
2015-08-03 10:07:21 +03:00
2014-10-20 11:12:33 +04:00
u32 mem_addr = sg_dma_address ( & data - > sg [ i ] ) ;
2015-06-25 11:25:07 +03:00
for ( ; length ; desc + + ) {
desc_len = ( length < = DW_MCI_DESC_DATA_LENGTH ) ?
length : DW_MCI_DESC_DATA_LENGTH ;
length - = desc_len ;
/*
* Set the OWN bit and disable interrupts
* for this descriptor
*/
desc - > des0 = cpu_to_le32 ( IDMAC_DES0_OWN |
IDMAC_DES0_DIC |
IDMAC_DES0_CH ) ;
/* Buffer length */
IDMAC_SET_BUFFER1_SIZE ( desc , desc_len ) ;
2011-01-02 09:11:59 +03:00
2015-06-25 11:25:07 +03:00
/* Physical address to DMA to/from */
desc - > des2 = cpu_to_le32 ( mem_addr ) ;
/* Update physical address for the next desc */
mem_addr + = desc_len ;
/* Save pointer to the last descriptor */
desc_last = desc ;
}
2014-10-20 11:12:33 +04:00
}
/* Set first descriptor */
2015-06-25 11:25:07 +03:00
desc_first - > des0 | = cpu_to_le32 ( IDMAC_DES0_FD ) ;
2011-01-02 09:11:59 +03:00
2014-10-20 11:12:33 +04:00
/* Set last descriptor */
2015-06-25 11:25:07 +03:00
desc_last - > des0 & = cpu_to_le32 ( ~ ( IDMAC_DES0_CH |
IDMAC_DES0_DIC ) ) ;
desc_last - > des0 | = cpu_to_le32 ( IDMAC_DES0_LD ) ;
2014-10-20 11:12:33 +04:00
}
2011-01-02 09:11:59 +03:00
2015-08-03 10:07:21 +03:00
wmb ( ) ; /* drain writebuffer */
2011-01-02 09:11:59 +03:00
}
static void dw_mci_idmac_start_dma ( struct dw_mci * host , unsigned int sg_len )
{
u32 temp ;
dw_mci_translate_sglist ( host , host - > data , sg_len ) ;
2014-10-16 20:58:05 +04:00
/* Make sure to reset DMA in case we did PIO before this */
dw_mci_ctrl_reset ( host , SDMMC_CTRL_DMA_RESET ) ;
dw_mci_idmac_reset ( host ) ;
2011-01-02 09:11:59 +03:00
/* Select IDMAC interface */
temp = mci_readl ( host , CTRL ) ;
temp | = SDMMC_CTRL_USE_IDMAC ;
mci_writel ( host , CTRL , temp ) ;
2015-08-03 10:07:21 +03:00
/* drain writebuffer */
2011-01-02 09:11:59 +03:00
wmb ( ) ;
/* Enable the IDMAC */
temp = mci_readl ( host , BMOD ) ;
2011-02-25 05:08:13 +03:00
temp | = SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB ;
2011-01-02 09:11:59 +03:00
mci_writel ( host , BMOD , temp ) ;
/* Start it running */
mci_writel ( host , PLDMND , 1 ) ;
}
static int dw_mci_idmac_init ( struct dw_mci * host )
{
2012-09-19 09:58:31 +04:00
int i ;
2011-01-02 09:11:59 +03:00
2014-10-20 11:12:33 +04:00
if ( host - > dma_64bit_address = = 1 ) {
struct idmac_desc_64addr * p ;
/* Number of descriptors in the ring buffer */
host - > ring_size = PAGE_SIZE / sizeof ( struct idmac_desc_64addr ) ;
/* Forward link the descriptor list */
for ( i = 0 , p = host - > sg_cpu ; i < host - > ring_size - 1 ;
i + + , p + + ) {
p - > des6 = ( host - > sg_dma +
( sizeof ( struct idmac_desc_64addr ) *
( i + 1 ) ) ) & 0xffffffff ;
p - > des7 = ( u64 ) ( host - > sg_dma +
( sizeof ( struct idmac_desc_64addr ) *
( i + 1 ) ) ) > > 32 ;
/* Initialize reserved and buffer size fields to "0" */
p - > des1 = 0 ;
p - > des2 = 0 ;
p - > des3 = 0 ;
}
2011-01-02 09:11:59 +03:00
2014-10-20 11:12:33 +04:00
/* Set the last descriptor as the end-of-ring descriptor */
p - > des6 = host - > sg_dma & 0xffffffff ;
p - > des7 = ( u64 ) host - > sg_dma > > 32 ;
p - > des0 = IDMAC_DES0_ER ;
2011-01-02 09:11:59 +03:00
2014-10-20 11:12:33 +04:00
} else {
struct idmac_desc * p ;
/* Number of descriptors in the ring buffer */
host - > ring_size = PAGE_SIZE / sizeof ( struct idmac_desc ) ;
/* Forward link the descriptor list */
2015-08-03 10:07:21 +03:00
for ( i = 0 , p = host - > sg_cpu ;
i < host - > ring_size - 1 ;
i + + , p + + ) {
2015-03-25 14:27:51 +03:00
p - > des3 = cpu_to_le32 ( host - > sg_dma +
( sizeof ( struct idmac_desc ) * ( i + 1 ) ) ) ;
2015-04-30 17:16:28 +03:00
p - > des1 = 0 ;
}
2014-10-20 11:12:33 +04:00
/* Set the last descriptor as the end-of-ring descriptor */
2015-03-25 14:27:51 +03:00
p - > des3 = cpu_to_le32 ( host - > sg_dma ) ;
p - > des0 = cpu_to_le32 ( IDMAC_DES0_ER ) ;
2014-10-20 11:12:33 +04:00
}
2011-01-02 09:11:59 +03:00
2013-08-30 19:14:33 +04:00
dw_mci_idmac_reset ( host ) ;
2012-05-22 08:01:03 +04:00
2014-10-20 11:12:33 +04:00
if ( host - > dma_64bit_address = = 1 ) {
/* Mask out interrupts - get Tx & Rx complete only */
mci_writel ( host , IDSTS64 , IDMAC_INT_CLR ) ;
mci_writel ( host , IDINTEN64 , SDMMC_IDMAC_INT_NI |
SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI ) ;
/* Set the descriptor base address */
mci_writel ( host , DBADDRL , host - > sg_dma & 0xffffffff ) ;
mci_writel ( host , DBADDRU , ( u64 ) host - > sg_dma > > 32 ) ;
} else {
/* Mask out interrupts - get Tx & Rx complete only */
mci_writel ( host , IDSTS , IDMAC_INT_CLR ) ;
mci_writel ( host , IDINTEN , SDMMC_IDMAC_INT_NI |
SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI ) ;
/* Set the descriptor base address */
mci_writel ( host , DBADDR , host - > sg_dma ) ;
}
2011-01-02 09:11:59 +03:00
return 0 ;
}
2012-11-07 01:55:31 +04:00
static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
2012-02-20 06:01:43 +04:00
. init = dw_mci_idmac_init ,
. start = dw_mci_idmac_start_dma ,
. stop = dw_mci_idmac_stop_dma ,
. complete = dw_mci_idmac_complete_dma ,
. cleanup = dw_mci_dma_cleanup ,
} ;
# endif /* CONFIG_MMC_DW_IDMAC */
2012-02-06 11:55:07 +04:00
static int dw_mci_pre_dma_transfer ( struct dw_mci * host ,
struct mmc_data * data ,
bool next )
2011-01-02 09:11:59 +03:00
{
struct scatterlist * sg ;
2012-02-06 11:55:07 +04:00
unsigned int i , sg_len ;
2011-06-29 12:28:43 +04:00
2012-02-06 11:55:07 +04:00
if ( ! next & & data - > host_cookie )
return data - > host_cookie ;
2011-01-02 09:11:59 +03:00
/*
* We don ' t do DMA on " complex " transfers , i . e . with
* non - word - aligned buffers or lengths . Also , we don ' t bother
* with all the DMA setup overhead for short transfers .
*/
if ( data - > blocks * data - > blksz < DW_MCI_DMA_THRESHOLD )
return - EINVAL ;
2012-02-06 11:55:07 +04:00
2011-01-02 09:11:59 +03:00
if ( data - > blksz & 3 )
return - EINVAL ;
for_each_sg ( data - > sg , sg , data - > sg_len , i ) {
if ( sg - > offset & 3 | | sg - > length & 3 )
return - EINVAL ;
}
2012-09-17 22:16:35 +04:00
sg_len = dma_map_sg ( host - > dev ,
2012-02-06 11:55:07 +04:00
data - > sg ,
data - > sg_len ,
dw_mci_get_dma_dir ( data ) ) ;
if ( sg_len = = 0 )
return - EINVAL ;
2011-06-29 12:28:43 +04:00
2012-02-06 11:55:07 +04:00
if ( next )
data - > host_cookie = sg_len ;
2011-01-02 09:11:59 +03:00
2012-02-06 11:55:07 +04:00
return sg_len ;
}
static void dw_mci_pre_req ( struct mmc_host * mmc ,
struct mmc_request * mrq ,
bool is_first_req )
{
struct dw_mci_slot * slot = mmc_priv ( mmc ) ;
struct mmc_data * data = mrq - > data ;
if ( ! slot - > host - > use_dma | | ! data )
return ;
if ( data - > host_cookie ) {
data - > host_cookie = 0 ;
return ;
}
if ( dw_mci_pre_dma_transfer ( slot - > host , mrq - > data , 1 ) < 0 )
data - > host_cookie = 0 ;
}
static void dw_mci_post_req ( struct mmc_host * mmc ,
struct mmc_request * mrq ,
int err )
{
struct dw_mci_slot * slot = mmc_priv ( mmc ) ;
struct mmc_data * data = mrq - > data ;
if ( ! slot - > host - > use_dma | | ! data )
return ;
if ( data - > host_cookie )
2012-09-17 22:16:35 +04:00
dma_unmap_sg ( slot - > host - > dev ,
2012-02-06 11:55:07 +04:00
data - > sg ,
data - > sg_len ,
dw_mci_get_dma_dir ( data ) ) ;
data - > host_cookie = 0 ;
}
2013-08-30 19:13:42 +04:00
static void dw_mci_adjust_fifoth ( struct dw_mci * host , struct mmc_data * data )
{
# ifdef CONFIG_MMC_DW_IDMAC
unsigned int blksz = data - > blksz ;
const u32 mszs [ ] = { 1 , 4 , 8 , 16 , 32 , 64 , 128 , 256 } ;
u32 fifo_width = 1 < < host - > data_shift ;
u32 blksz_depth = blksz / fifo_width , fifoth_val ;
u32 msize = 0 , rx_wmark = 1 , tx_wmark , tx_wmark_invers ;
2015-08-03 10:07:21 +03:00
int idx = ARRAY_SIZE ( mszs ) - 1 ;
2013-08-30 19:13:42 +04:00
tx_wmark = ( host - > fifo_depth ) / 2 ;
tx_wmark_invers = host - > fifo_depth - tx_wmark ;
/*
* MSIZE is ' 1 ' ,
* if blksz is not a multiple of the FIFO width
*/
if ( blksz % fifo_width ) {
msize = 0 ;
rx_wmark = 1 ;
goto done ;
}
do {
if ( ! ( ( blksz_depth % mszs [ idx ] ) | |
( tx_wmark_invers % mszs [ idx ] ) ) ) {
msize = idx ;
rx_wmark = mszs [ idx ] - 1 ;
break ;
}
} while ( - - idx > 0 ) ;
/*
* If idx is ' 0 ' , it won ' t be tried
* Thus , initial values are uesed
*/
done :
fifoth_val = SDMMC_SET_FIFOTH ( msize , rx_wmark , tx_wmark ) ;
mci_writel ( host , FIFOTH , fifoth_val ) ;
# endif
}
2013-08-30 19:13:55 +04:00
static void dw_mci_ctrl_rd_thld ( struct dw_mci * host , struct mmc_data * data )
{
unsigned int blksz = data - > blksz ;
u32 blksz_depth , fifo_depth ;
u16 thld_size ;
WARN_ON ( ! ( data - > flags & MMC_DATA_READ ) ) ;
2014-11-17 20:49:05 +03:00
/*
* CDTHRCTL doesn ' t exist prior to 240 A ( in fact that register offset is
* in the FIFO region , so we really shouldn ' t access it ) .
*/
if ( host - > verid < DW_MMC_240A )
return ;
2013-08-30 19:13:55 +04:00
if ( host - > timing ! = MMC_TIMING_MMC_HS200 & &
2015-03-05 13:45:21 +03:00
host - > timing ! = MMC_TIMING_MMC_HS400 & &
2013-08-30 19:13:55 +04:00
host - > timing ! = MMC_TIMING_UHS_SDR104 )
goto disable ;
blksz_depth = blksz / ( 1 < < host - > data_shift ) ;
fifo_depth = host - > fifo_depth ;
if ( blksz_depth > fifo_depth )
goto disable ;
/*
* If ( blksz_depth ) > = ( fifo_depth > > 1 ) , should be ' thld_size < = blksz '
* If ( blksz_depth ) < ( fifo_depth > > 1 ) , should be thld_size = blksz
* Currently just choose blksz .
*/
thld_size = blksz ;
mci_writel ( host , CDTHRCTL , SDMMC_SET_RD_THLD ( thld_size , 1 ) ) ;
return ;
disable :
mci_writel ( host , CDTHRCTL , SDMMC_SET_RD_THLD ( 0 , 0 ) ) ;
}
2012-02-06 11:55:07 +04:00
static int dw_mci_submit_data_dma ( struct dw_mci * host , struct mmc_data * data )
{
2014-12-03 02:42:47 +03:00
unsigned long irqflags ;
2012-02-06 11:55:07 +04:00
int sg_len ;
u32 temp ;
host - > using_dma = 0 ;
/* If we don't have a channel, we can't do DMA */
if ( ! host - > use_dma )
return - ENODEV ;
sg_len = dw_mci_pre_dma_transfer ( host , data , 0 ) ;
2012-04-10 04:53:32 +04:00
if ( sg_len < 0 ) {
host - > dma_ops - > stop ( host ) ;
2012-02-06 11:55:07 +04:00
return sg_len ;
2012-04-10 04:53:32 +04:00
}
2012-02-06 11:55:07 +04:00
host - > using_dma = 1 ;
2011-01-02 09:11:59 +03:00
2012-09-17 22:16:35 +04:00
dev_vdbg ( host - > dev ,
2011-01-02 09:11:59 +03:00
" sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d \n " ,
( unsigned long ) host - > sg_cpu , ( unsigned long ) host - > sg_dma ,
sg_len ) ;
2013-08-30 19:13:42 +04:00
/*
* Decide the MSIZE and RX / TX Watermark .
* If current block size is same with previous size ,
* no need to update fifoth .
*/
if ( host - > prev_blksz ! = data - > blksz )
dw_mci_adjust_fifoth ( host , data ) ;
2011-01-02 09:11:59 +03:00
/* Enable the DMA interface */
temp = mci_readl ( host , CTRL ) ;
temp | = SDMMC_CTRL_DMA_ENABLE ;
mci_writel ( host , CTRL , temp ) ;
/* Disable RX/TX IRQs, let DMA handle it */
2014-12-03 02:42:47 +03:00
spin_lock_irqsave ( & host - > irq_lock , irqflags ) ;
2011-01-02 09:11:59 +03:00
temp = mci_readl ( host , INTMASK ) ;
temp & = ~ ( SDMMC_INT_RXDR | SDMMC_INT_TXDR ) ;
mci_writel ( host , INTMASK , temp ) ;
2014-12-03 02:42:47 +03:00
spin_unlock_irqrestore ( & host - > irq_lock , irqflags ) ;
2011-01-02 09:11:59 +03:00
host - > dma_ops - > start ( host , sg_len ) ;
return 0 ;
}
static void dw_mci_submit_data ( struct dw_mci * host , struct mmc_data * data )
{
2014-12-03 02:42:47 +03:00
unsigned long irqflags ;
2015-08-03 10:07:21 +03:00
int flags = SG_MITER_ATOMIC ;
2011-01-02 09:11:59 +03:00
u32 temp ;
data - > error = - EINPROGRESS ;
WARN_ON ( host - > data ) ;
host - > sg = NULL ;
host - > data = data ;
2013-08-30 19:13:55 +04:00
if ( data - > flags & MMC_DATA_READ ) {
2011-06-29 12:29:58 +04:00
host - > dir_status = DW_MCI_RECV_STATUS ;
2013-08-30 19:13:55 +04:00
dw_mci_ctrl_rd_thld ( host , data ) ;
} else {
2011-06-29 12:29:58 +04:00
host - > dir_status = DW_MCI_SEND_STATUS ;
2013-08-30 19:13:55 +04:00
}
2011-06-29 12:29:58 +04:00
2011-01-02 09:11:59 +03:00
if ( dw_mci_submit_data_dma ( host , data ) ) {
2012-02-09 09:32:43 +04:00
if ( host - > data - > flags & MMC_DATA_READ )
flags | = SG_MITER_TO_SG ;
else
flags | = SG_MITER_FROM_SG ;
sg_miter_start ( & host - > sg_miter , data - > sg , data - > sg_len , flags ) ;
2011-01-02 09:11:59 +03:00
host - > sg = data - > sg ;
2011-06-24 16:57:56 +04:00
host - > part_buf_start = 0 ;
host - > part_buf_count = 0 ;
2011-01-02 09:11:59 +03:00
2011-06-24 16:54:06 +04:00
mci_writel ( host , RINTSTS , SDMMC_INT_TXDR | SDMMC_INT_RXDR ) ;
2014-12-03 02:42:47 +03:00
spin_lock_irqsave ( & host - > irq_lock , irqflags ) ;
2011-01-02 09:11:59 +03:00
temp = mci_readl ( host , INTMASK ) ;
temp | = SDMMC_INT_TXDR | SDMMC_INT_RXDR ;
mci_writel ( host , INTMASK , temp ) ;
2014-12-03 02:42:47 +03:00
spin_unlock_irqrestore ( & host - > irq_lock , irqflags ) ;
2011-01-02 09:11:59 +03:00
temp = mci_readl ( host , CTRL ) ;
temp & = ~ SDMMC_CTRL_DMA_ENABLE ;
mci_writel ( host , CTRL , temp ) ;
2013-08-30 19:13:42 +04:00
/*
* Use the initial fifoth_val for PIO mode .
* If next issued data may be transfered by DMA mode ,
* prev_blksz should be invalidated .
*/
mci_writel ( host , FIFOTH , host - > fifoth_val ) ;
host - > prev_blksz = 0 ;
} else {
/*
* Keep the current block size .
* It will be used to decide whether to update
* fifoth register next time .
*/
host - > prev_blksz = data - > blksz ;
2011-01-02 09:11:59 +03:00
}
}
static void mci_send_cmd ( struct dw_mci_slot * slot , u32 cmd , u32 arg )
{
struct dw_mci * host = slot - > host ;
unsigned long timeout = jiffies + msecs_to_jiffies ( 500 ) ;
unsigned int cmd_status = 0 ;
mci_writel ( host , CMDARG , arg ) ;
2015-08-03 10:07:21 +03:00
wmb ( ) ; /* drain writebuffer */
2015-02-20 23:31:56 +03:00
dw_mci_wait_while_busy ( host , cmd ) ;
2011-01-02 09:11:59 +03:00
mci_writel ( host , CMD , SDMMC_CMD_START | cmd ) ;
while ( time_before ( jiffies , timeout ) ) {
cmd_status = mci_readl ( host , CMD ) ;
if ( ! ( cmd_status & SDMMC_CMD_START ) )
return ;
}
dev_err ( & slot - > mmc - > class_dev ,
" Timeout sending command (cmd %#x arg %#x status %#x) \n " ,
cmd , arg , cmd_status ) ;
}
2012-11-19 08:56:21 +04:00
static void dw_mci_setup_bus ( struct dw_mci_slot * slot , bool force_clkinit )
2011-01-02 09:11:59 +03:00
{
struct dw_mci * host = slot - > host ;
2013-08-30 19:11:43 +04:00
unsigned int clock = slot - > clock ;
2011-01-02 09:11:59 +03:00
u32 div ;
2012-07-25 19:33:17 +04:00
u32 clk_en_a ;
2014-08-22 17:47:51 +04:00
u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT ;
/* We must continue to set bit 28 in CMD until the change is complete */
if ( host - > state = = STATE_WAITING_CMD11_DONE )
sdmmc_cmd_bits | = SDMMC_CMD_VOLT_SWITCH ;
2011-01-02 09:11:59 +03:00
2013-08-30 19:11:43 +04:00
if ( ! clock ) {
mci_writel ( host , CLKENA , 0 ) ;
2014-08-22 17:47:51 +04:00
mci_send_cmd ( slot , sdmmc_cmd_bits , 0 ) ;
2013-08-30 19:11:43 +04:00
} else if ( clock ! = host - > current_speed | | force_clkinit ) {
div = host - > bus_hz / clock ;
if ( host - > bus_hz % clock & & host - > bus_hz > clock )
2011-01-02 09:11:59 +03:00
/*
* move the + 1 after the divide to prevent
* over - clocking the card .
*/
2012-05-22 08:01:21 +04:00
div + = 1 ;
2013-08-30 19:11:43 +04:00
div = ( host - > bus_hz ! = clock ) ? DIV_ROUND_UP ( div , 2 ) : 0 ;
2011-01-02 09:11:59 +03:00
2013-08-30 19:11:43 +04:00
if ( ( clock < < div ) ! = slot - > __clk_old | | force_clkinit )
dev_info ( & slot - > mmc - > class_dev ,
" Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d) \n " ,
slot - > id , host - > bus_hz , clock ,
div ? ( ( host - > bus_hz / div ) > > 1 ) :
host - > bus_hz , div ) ;
2011-01-02 09:11:59 +03:00
/* disable clock */
mci_writel ( host , CLKENA , 0 ) ;
mci_writel ( host , CLKSRC , 0 ) ;
/* inform CIU */
2014-08-22 17:47:51 +04:00
mci_send_cmd ( slot , sdmmc_cmd_bits , 0 ) ;
2011-01-02 09:11:59 +03:00
/* set clock to desired speed */
mci_writel ( host , CLKDIV , div ) ;
/* inform CIU */
2014-08-22 17:47:51 +04:00
mci_send_cmd ( slot , sdmmc_cmd_bits , 0 ) ;
2011-01-02 09:11:59 +03:00
2012-07-25 19:33:17 +04:00
/* enable clock; only low power if no SDIO */
clk_en_a = SDMMC_CLKEN_ENABLE < < slot - > id ;
2014-12-03 02:42:46 +03:00
if ( ! test_bit ( DW_MMC_CARD_NO_LOW_PWR , & slot - > flags ) )
2012-07-25 19:33:17 +04:00
clk_en_a | = SDMMC_CLKEN_LOW_PWR < < slot - > id ;
mci_writel ( host , CLKENA , clk_en_a ) ;
2011-01-02 09:11:59 +03:00
/* inform CIU */
2014-08-22 17:47:51 +04:00
mci_send_cmd ( slot , sdmmc_cmd_bits , 0 ) ;
2011-01-02 09:11:59 +03:00
2013-08-30 19:11:43 +04:00
/* keep the clock with reflecting clock dividor */
slot - > __clk_old = clock < < div ;
2011-01-02 09:11:59 +03:00
}
2013-08-30 19:11:43 +04:00
host - > current_speed = clock ;
2011-01-02 09:11:59 +03:00
/* Set the current slot bus width */
2011-06-20 12:23:53 +04:00
mci_writel ( host , CTYPE , ( slot - > ctype < < slot - > id ) ) ;
2011-01-02 09:11:59 +03:00
}
2011-12-22 13:01:29 +04:00
static void __dw_mci_start_request ( struct dw_mci * host ,
struct dw_mci_slot * slot ,
struct mmc_command * cmd )
2011-01-02 09:11:59 +03:00
{
struct mmc_request * mrq ;
struct mmc_data * data ;
u32 cmdflags ;
mrq = slot - > mrq ;
host - > cur_slot = slot ;
host - > mrq = mrq ;
host - > pending_events = 0 ;
host - > completed_events = 0 ;
2013-08-30 19:14:17 +04:00
host - > cmd_status = 0 ;
2011-01-02 09:11:59 +03:00
host - > data_status = 0 ;
2013-08-30 19:14:17 +04:00
host - > dir_status = 0 ;
2011-01-02 09:11:59 +03:00
2011-12-22 13:01:29 +04:00
data = cmd - > data ;
2011-01-02 09:11:59 +03:00
if ( data ) {
2014-03-03 06:36:45 +04:00
mci_writel ( host , TMOUT , 0xFFFFFFFF ) ;
2011-01-02 09:11:59 +03:00
mci_writel ( host , BYTCNT , data - > blksz * data - > blocks ) ;
mci_writel ( host , BLKSIZ , data - > blksz ) ;
}
cmdflags = dw_mci_prepare_command ( slot - > mmc , cmd ) ;
/* this is the first command, send the initialization clock */
if ( test_and_clear_bit ( DW_MMC_CARD_NEED_INIT , & slot - > flags ) )
cmdflags | = SDMMC_CMD_INIT ;
if ( data ) {
dw_mci_submit_data ( host , data ) ;
2015-08-03 10:07:21 +03:00
wmb ( ) ; /* drain writebuffer */
2011-01-02 09:11:59 +03:00
}
dw_mci_start_command ( host , cmd , cmdflags ) ;
2015-03-10 02:18:21 +03:00
if ( cmd - > opcode = = SD_SWITCH_VOLTAGE ) {
2015-04-03 21:13:07 +03:00
unsigned long irqflags ;
2015-03-10 02:18:21 +03:00
/*
2015-04-03 21:13:05 +03:00
* Databook says to fail after 2 ms w / no response , but evidence
* shows that sometimes the cmd11 interrupt takes over 130 ms .
* We ' ll set to 500 ms , plus an extra jiffy just in case jiffies
* is just about to roll over .
2015-04-03 21:13:07 +03:00
*
* We do this whole thing under spinlock and only if the
* command hasn ' t already completed ( indicating the the irq
* already ran so we don ' t want the timeout ) .
2015-03-10 02:18:21 +03:00
*/
2015-04-03 21:13:07 +03:00
spin_lock_irqsave ( & host - > irq_lock , irqflags ) ;
if ( ! test_bit ( EVENT_CMD_COMPLETE , & host - > pending_events ) )
mod_timer ( & host - > cmd11_timer ,
jiffies + msecs_to_jiffies ( 500 ) + 1 ) ;
spin_unlock_irqrestore ( & host - > irq_lock , irqflags ) ;
2015-03-10 02:18:21 +03:00
}
2011-01-02 09:11:59 +03:00
if ( mrq - > stop )
host - > stop_cmdr = dw_mci_prepare_command ( slot - > mmc , mrq - > stop ) ;
2013-08-30 19:14:05 +04:00
else
host - > stop_cmdr = dw_mci_prep_stop_abort ( host , cmd ) ;
2011-01-02 09:11:59 +03:00
}
2011-12-22 13:01:29 +04:00
static void dw_mci_start_request ( struct dw_mci * host ,
struct dw_mci_slot * slot )
{
struct mmc_request * mrq = slot - > mrq ;
struct mmc_command * cmd ;
cmd = mrq - > sbc ? mrq - > sbc : mrq - > cmd ;
__dw_mci_start_request ( host , slot , cmd ) ;
}
2011-06-24 16:55:10 +04:00
/* must be called with host->lock held */
2011-01-02 09:11:59 +03:00
static void dw_mci_queue_request ( struct dw_mci * host , struct dw_mci_slot * slot ,
struct mmc_request * mrq )
{
dev_vdbg ( & slot - > mmc - > class_dev , " queue request: state=%d \n " ,
host - > state ) ;
slot - > mrq = mrq ;
2014-08-22 17:47:51 +04:00
if ( host - > state = = STATE_WAITING_CMD11_DONE ) {
dev_warn ( & slot - > mmc - > class_dev ,
" Voltage change didn't complete \n " ) ;
/*
* this case isn ' t expected to happen , so we can
* either crash here or just try to continue on
* in the closest possible state
*/
host - > state = STATE_IDLE ;
}
2011-01-02 09:11:59 +03:00
if ( host - > state = = STATE_IDLE ) {
host - > state = STATE_SENDING_CMD ;
dw_mci_start_request ( host , slot ) ;
} else {
list_add_tail ( & slot - > queue_node , & host - > queue ) ;
}
}
static void dw_mci_request ( struct mmc_host * mmc , struct mmc_request * mrq )
{
struct dw_mci_slot * slot = mmc_priv ( mmc ) ;
struct dw_mci * host = slot - > host ;
WARN_ON ( slot - > mrq ) ;
2011-06-24 16:55:10 +04:00
/*
* The check for card presence and queueing of the request must be
* atomic , otherwise the card could be removed in between and the
* request wouldn ' t fail until another card was inserted .
*/
spin_lock_bh ( & host - > lock ) ;
2011-01-02 09:11:59 +03:00
if ( ! test_bit ( DW_MMC_CARD_PRESENT , & slot - > flags ) ) {
2011-06-24 16:55:10 +04:00
spin_unlock_bh ( & host - > lock ) ;
2011-01-02 09:11:59 +03:00
mrq - > cmd - > error = - ENOMEDIUM ;
mmc_request_done ( mmc , mrq ) ;
return ;
}
dw_mci_queue_request ( host , slot , mrq ) ;
2011-06-24 16:55:10 +04:00
spin_unlock_bh ( & host - > lock ) ;
2011-01-02 09:11:59 +03:00
}
static void dw_mci_set_ios ( struct mmc_host * mmc , struct mmc_ios * ios )
{
struct dw_mci_slot * slot = mmc_priv ( mmc ) ;
2012-11-08 18:26:11 +04:00
const struct dw_mci_drv_data * drv_data = slot - > host - > drv_data ;
2011-02-24 07:46:11 +03:00
u32 regs ;
2014-08-22 17:47:50 +04:00
int ret ;
2011-01-02 09:11:59 +03:00
switch ( ios - > bus_width ) {
case MMC_BUS_WIDTH_4 :
slot - > ctype = SDMMC_CTYPE_4BIT ;
break ;
2011-02-17 10:12:38 +03:00
case MMC_BUS_WIDTH_8 :
slot - > ctype = SDMMC_CTYPE_8BIT ;
break ;
2012-11-08 12:35:31 +04:00
default :
/* set default 1 bit mode */
slot - > ctype = SDMMC_CTYPE_1BIT ;
2011-01-02 09:11:59 +03:00
}
2012-01-02 11:00:02 +04:00
regs = mci_readl ( slot - > host , UHS_REG ) ;
2011-02-24 07:46:11 +03:00
/* DDR mode set */
2015-01-29 05:41:57 +03:00
if ( ios - > timing = = MMC_TIMING_MMC_DDR52 | |
ios - > timing = = MMC_TIMING_MMC_HS400 )
2013-02-22 04:32:46 +04:00
regs | = ( ( 0x1 < < slot - > id ) < < 16 ) ;
2012-01-02 11:00:02 +04:00
else
2013-02-22 04:32:46 +04:00
regs & = ~ ( ( 0x1 < < slot - > id ) < < 16 ) ;
2012-01-02 11:00:02 +04:00
mci_writel ( slot - > host , UHS_REG , regs ) ;
2013-08-30 19:13:55 +04:00
slot - > host - > timing = ios - > timing ;
2011-02-24 07:46:11 +03:00
2013-08-30 19:11:43 +04:00
/*
* Use mirror of ios - > clock to prevent race with mmc
* core ios update when finding the minimum .
*/
slot - > clock = ios - > clock ;
2011-01-02 09:11:59 +03:00
2012-10-16 12:43:08 +04:00
if ( drv_data & & drv_data - > set_ios )
drv_data - > set_ios ( slot - > host , ios ) ;
2012-09-17 22:16:42 +04:00
2011-01-02 09:11:59 +03:00
switch ( ios - > power_mode ) {
case MMC_POWER_UP :
2014-08-22 17:47:50 +04:00
if ( ! IS_ERR ( mmc - > supply . vmmc ) ) {
ret = mmc_regulator_set_ocr ( mmc , mmc - > supply . vmmc ,
ios - > vdd ) ;
if ( ret ) {
dev_err ( slot - > host - > dev ,
" failed to enable vmmc regulator \n " ) ;
/*return, if failed turn on vmmc*/
return ;
}
}
2015-01-14 02:58:44 +03:00
set_bit ( DW_MMC_CARD_NEED_INIT , & slot - > flags ) ;
regs = mci_readl ( slot - > host , PWREN ) ;
regs | = ( 1 < < slot - > id ) ;
mci_writel ( slot - > host , PWREN , regs ) ;
break ;
case MMC_POWER_ON :
2015-02-20 21:57:19 +03:00
if ( ! slot - > host - > vqmmc_enabled ) {
if ( ! IS_ERR ( mmc - > supply . vqmmc ) ) {
ret = regulator_enable ( mmc - > supply . vqmmc ) ;
if ( ret < 0 )
dev_err ( slot - > host - > dev ,
" failed to enable vqmmc \n " ) ;
else
slot - > host - > vqmmc_enabled = true ;
} else {
/* Keep track so we don't reset again */
2014-08-22 17:47:50 +04:00
slot - > host - > vqmmc_enabled = true ;
2015-02-20 21:57:19 +03:00
}
/* Reset our state machine after powering on */
dw_mci_ctrl_reset ( slot - > host ,
SDMMC_CTRL_ALL_RESET_FLAGS ) ;
2014-08-22 17:47:50 +04:00
}
2015-02-20 21:57:18 +03:00
/* Adjust clock / bus width after power is up */
dw_mci_setup_bus ( slot , false ) ;
2013-03-12 14:43:32 +04:00
break ;
case MMC_POWER_OFF :
2015-02-20 21:57:18 +03:00
/* Turn clock off before power goes down */
dw_mci_setup_bus ( slot , false ) ;
2014-08-22 17:47:50 +04:00
if ( ! IS_ERR ( mmc - > supply . vmmc ) )
mmc_regulator_set_ocr ( mmc , mmc - > supply . vmmc , 0 ) ;
2015-02-20 21:57:19 +03:00
if ( ! IS_ERR ( mmc - > supply . vqmmc ) & & slot - > host - > vqmmc_enabled )
2014-08-22 17:47:50 +04:00
regulator_disable ( mmc - > supply . vqmmc ) ;
2015-02-20 21:57:19 +03:00
slot - > host - > vqmmc_enabled = false ;
2014-08-22 17:47:50 +04:00
2013-03-26 16:36:14 +04:00
regs = mci_readl ( slot - > host , PWREN ) ;
regs & = ~ ( 1 < < slot - > id ) ;
mci_writel ( slot - > host , PWREN , regs ) ;
2011-01-02 09:11:59 +03:00
break ;
default :
break ;
}
2015-02-20 21:57:18 +03:00
if ( slot - > host - > state = = STATE_WAITING_CMD11_DONE & & ios - > clock ! = 0 )
slot - > host - > state = STATE_IDLE ;
2011-01-02 09:11:59 +03:00
}
2014-08-22 17:47:51 +04:00
static int dw_mci_card_busy ( struct mmc_host * mmc )
{
struct dw_mci_slot * slot = mmc_priv ( mmc ) ;
u32 status ;
/*
* Check the busy bit which is low when DAT [ 3 : 0 ]
* ( the data lines ) are 0000
*/
status = mci_readl ( slot - > host , STATUS ) ;
return ! ! ( status & SDMMC_STATUS_BUSY ) ;
}
static int dw_mci_switch_voltage ( struct mmc_host * mmc , struct mmc_ios * ios )
{
struct dw_mci_slot * slot = mmc_priv ( mmc ) ;
struct dw_mci * host = slot - > host ;
2015-05-14 11:45:18 +03:00
const struct dw_mci_drv_data * drv_data = host - > drv_data ;
2014-08-22 17:47:51 +04:00
u32 uhs ;
u32 v18 = SDMMC_UHS_18V < < slot - > id ;
int min_uv , max_uv ;
int ret ;
2015-05-14 11:45:18 +03:00
if ( drv_data & & drv_data - > switch_voltage )
return drv_data - > switch_voltage ( mmc , ios ) ;
2014-08-22 17:47:51 +04:00
/*
* Program the voltage . Note that some instances of dw_mmc may use
* the UHS_REG for this . For other instances ( like exynos ) the UHS_REG
* does no harm but you need to set the regulator directly . Try both .
*/
uhs = mci_readl ( host , UHS_REG ) ;
if ( ios - > signal_voltage = = MMC_SIGNAL_VOLTAGE_330 ) {
min_uv = 2700000 ;
max_uv = 3600000 ;
uhs & = ~ v18 ;
} else {
min_uv = 1700000 ;
max_uv = 1950000 ;
uhs | = v18 ;
}
if ( ! IS_ERR ( mmc - > supply . vqmmc ) ) {
ret = regulator_set_voltage ( mmc - > supply . vqmmc , min_uv , max_uv ) ;
if ( ret ) {
2014-10-11 08:16:16 +04:00
dev_dbg ( & mmc - > class_dev ,
2014-08-22 17:47:51 +04:00
" Regulator set error %d: %d - %d \n " ,
ret , min_uv , max_uv ) ;
return ret ;
}
}
mci_writel ( host , UHS_REG , uhs ) ;
return 0 ;
}
2011-01-02 09:11:59 +03:00
static int dw_mci_get_ro ( struct mmc_host * mmc )
{
int read_only ;
struct dw_mci_slot * slot = mmc_priv ( mmc ) ;
2014-03-03 06:36:46 +04:00
int gpio_ro = mmc_gpio_get_ro ( mmc ) ;
2011-01-02 09:11:59 +03:00
/* Use platform get_ro function, else try on board write protect */
2015-05-06 21:31:22 +03:00
if ( ! IS_ERR_VALUE ( gpio_ro ) )
2014-03-03 06:36:46 +04:00
read_only = gpio_ro ;
2011-01-02 09:11:59 +03:00
else
read_only =
mci_readl ( slot - > host , WRTPRT ) & ( 1 < < slot - > id ) ? 1 : 0 ;
dev_dbg ( & mmc - > class_dev , " card is %s \n " ,
read_only ? " read-only " : " read-write " ) ;
return read_only ;
}
static int dw_mci_get_cd ( struct mmc_host * mmc )
{
int present ;
struct dw_mci_slot * slot = mmc_priv ( mmc ) ;
struct dw_mci_board * brd = slot - > host - > pdata ;
2014-01-16 16:48:47 +04:00
struct dw_mci * host = slot - > host ;
int gpio_cd = mmc_gpio_get_cd ( mmc ) ;
2011-01-02 09:11:59 +03:00
/* Use platform get_cd function, else try onboard card detect */
2015-05-05 11:54:49 +03:00
if ( ( brd - > quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION ) | |
( mmc - > caps & MMC_CAP_NONREMOVABLE ) )
2011-02-25 05:08:15 +03:00
present = 1 ;
2014-01-09 18:35:10 +04:00
else if ( ! IS_ERR_VALUE ( gpio_cd ) )
2014-01-16 16:48:47 +04:00
present = gpio_cd ;
2011-01-02 09:11:59 +03:00
else
present = ( mci_readl ( slot - > host , CDETECT ) & ( 1 < < slot - > id ) )
= = 0 ? 1 : 0 ;
2014-01-16 16:48:47 +04:00
spin_lock_bh ( & host - > lock ) ;
2014-01-09 18:35:10 +04:00
if ( present ) {
set_bit ( DW_MMC_CARD_PRESENT , & slot - > flags ) ;
2011-01-02 09:11:59 +03:00
dev_dbg ( & mmc - > class_dev , " card is present \n " ) ;
2014-01-09 18:35:10 +04:00
} else {
clear_bit ( DW_MMC_CARD_PRESENT , & slot - > flags ) ;
2011-01-02 09:11:59 +03:00
dev_dbg ( & mmc - > class_dev , " card is not present \n " ) ;
2014-01-09 18:35:10 +04:00
}
2014-01-16 16:48:47 +04:00
spin_unlock_bh ( & host - > lock ) ;
2011-01-02 09:11:59 +03:00
return present ;
}
2014-12-03 02:42:46 +03:00
static void dw_mci_init_card ( struct mmc_host * mmc , struct mmc_card * card )
2012-07-25 19:33:17 +04:00
{
2014-12-03 02:42:46 +03:00
struct dw_mci_slot * slot = mmc_priv ( mmc ) ;
2012-07-25 19:33:17 +04:00
struct dw_mci * host = slot - > host ;
2014-12-03 02:42:46 +03:00
/*
* Low power mode will stop the card clock when idle . According to the
* description of the CLKENA register we should disable low power mode
* for SDIO cards if we need SDIO interrupts to work .
*/
if ( mmc - > caps & MMC_CAP_SDIO_IRQ ) {
const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR < < slot - > id ;
u32 clk_en_a_old ;
u32 clk_en_a ;
2012-07-25 19:33:17 +04:00
2014-12-03 02:42:46 +03:00
clk_en_a_old = mci_readl ( host , CLKENA ) ;
if ( card - > type = = MMC_TYPE_SDIO | |
card - > type = = MMC_TYPE_SD_COMBO ) {
set_bit ( DW_MMC_CARD_NO_LOW_PWR , & slot - > flags ) ;
clk_en_a = clk_en_a_old & ~ clken_low_pwr ;
} else {
clear_bit ( DW_MMC_CARD_NO_LOW_PWR , & slot - > flags ) ;
clk_en_a = clk_en_a_old | clken_low_pwr ;
}
if ( clk_en_a ! = clk_en_a_old ) {
mci_writel ( host , CLKENA , clk_en_a ) ;
mci_send_cmd ( slot , SDMMC_CMD_UPD_CLK |
SDMMC_CMD_PRV_DAT_WAIT , 0 ) ;
}
2012-07-25 19:33:17 +04:00
}
}
2011-08-29 11:41:46 +04:00
static void dw_mci_enable_sdio_irq ( struct mmc_host * mmc , int enb )
{
struct dw_mci_slot * slot = mmc_priv ( mmc ) ;
struct dw_mci * host = slot - > host ;
2014-12-03 02:42:47 +03:00
unsigned long irqflags ;
2011-08-29 11:41:46 +04:00
u32 int_mask ;
2014-12-03 02:42:47 +03:00
spin_lock_irqsave ( & host - > irq_lock , irqflags ) ;
2011-08-29 11:41:46 +04:00
/* Enable/disable Slot Specific SDIO interrupt */
int_mask = mci_readl ( host , INTMASK ) ;
2014-12-03 02:42:46 +03:00
if ( enb )
int_mask | = SDMMC_INT_SDIO ( slot - > sdio_id ) ;
else
int_mask & = ~ SDMMC_INT_SDIO ( slot - > sdio_id ) ;
mci_writel ( host , INTMASK , int_mask ) ;
2014-12-03 02:42:47 +03:00
spin_unlock_irqrestore ( & host - > irq_lock , irqflags ) ;
2011-08-29 11:41:46 +04:00
}
2013-08-30 19:12:42 +04:00
static int dw_mci_execute_tuning ( struct mmc_host * mmc , u32 opcode )
{
struct dw_mci_slot * slot = mmc_priv ( mmc ) ;
struct dw_mci * host = slot - > host ;
const struct dw_mci_drv_data * drv_data = host - > drv_data ;
2015-08-03 10:07:21 +03:00
int err = - EINVAL ;
2013-08-30 19:12:42 +04:00
if ( drv_data & & drv_data - > execute_tuning )
2014-12-01 18:13:39 +03:00
err = drv_data - > execute_tuning ( slot ) ;
2013-08-30 19:12:42 +04:00
return err ;
}
2015-08-03 10:07:21 +03:00
static int dw_mci_prepare_hs400_tuning ( struct mmc_host * mmc ,
struct mmc_ios * ios )
2015-01-29 05:41:57 +03:00
{
struct dw_mci_slot * slot = mmc_priv ( mmc ) ;
struct dw_mci * host = slot - > host ;
const struct dw_mci_drv_data * drv_data = host - > drv_data ;
if ( drv_data & & drv_data - > prepare_hs400_tuning )
return drv_data - > prepare_hs400_tuning ( host , ios ) ;
return 0 ;
}
2011-01-02 09:11:59 +03:00
static const struct mmc_host_ops dw_mci_ops = {
2011-08-29 11:41:46 +04:00
. request = dw_mci_request ,
2012-02-06 11:55:07 +04:00
. pre_req = dw_mci_pre_req ,
. post_req = dw_mci_post_req ,
2011-08-29 11:41:46 +04:00
. set_ios = dw_mci_set_ios ,
. get_ro = dw_mci_get_ro ,
. get_cd = dw_mci_get_cd ,
. enable_sdio_irq = dw_mci_enable_sdio_irq ,
2013-08-30 19:12:42 +04:00
. execute_tuning = dw_mci_execute_tuning ,
2014-08-22 17:47:51 +04:00
. card_busy = dw_mci_card_busy ,
. start_signal_voltage_switch = dw_mci_switch_voltage ,
2014-12-03 02:42:46 +03:00
. init_card = dw_mci_init_card ,
2015-01-29 05:41:57 +03:00
. prepare_hs400_tuning = dw_mci_prepare_hs400_tuning ,
2011-01-02 09:11:59 +03:00
} ;
static void dw_mci_request_end ( struct dw_mci * host , struct mmc_request * mrq )
__releases ( & host - > lock )
__acquires ( & host - > lock )
{
struct dw_mci_slot * slot ;
struct mmc_host * prev_mmc = host - > cur_slot - > mmc ;
WARN_ON ( host - > cmd | | host - > data ) ;
host - > cur_slot - > mrq = NULL ;
host - > mrq = NULL ;
if ( ! list_empty ( & host - > queue ) ) {
slot = list_entry ( host - > queue . next ,
struct dw_mci_slot , queue_node ) ;
list_del ( & slot - > queue_node ) ;
2012-09-17 22:16:35 +04:00
dev_vdbg ( host - > dev , " list not empty: %s is next \n " ,
2011-01-02 09:11:59 +03:00
mmc_hostname ( slot - > mmc ) ) ;
host - > state = STATE_SENDING_CMD ;
dw_mci_start_request ( host , slot ) ;
} else {
2012-09-17 22:16:35 +04:00
dev_vdbg ( host - > dev , " list empty \n " ) ;
2014-08-22 17:47:51 +04:00
if ( host - > state = = STATE_SENDING_CMD11 )
host - > state = STATE_WAITING_CMD11_DONE ;
else
host - > state = STATE_IDLE ;
2011-01-02 09:11:59 +03:00
}
spin_unlock ( & host - > lock ) ;
mmc_request_done ( prev_mmc , mrq ) ;
spin_lock ( & host - > lock ) ;
}
2013-08-30 19:14:17 +04:00
static int dw_mci_command_complete ( struct dw_mci * host , struct mmc_command * cmd )
2011-01-02 09:11:59 +03:00
{
u32 status = host - > cmd_status ;
host - > cmd_status = 0 ;
/* Read the response from the card (up to 16 bytes) */
if ( cmd - > flags & MMC_RSP_PRESENT ) {
if ( cmd - > flags & MMC_RSP_136 ) {
cmd - > resp [ 3 ] = mci_readl ( host , RESP0 ) ;
cmd - > resp [ 2 ] = mci_readl ( host , RESP1 ) ;
cmd - > resp [ 1 ] = mci_readl ( host , RESP2 ) ;
cmd - > resp [ 0 ] = mci_readl ( host , RESP3 ) ;
} else {
cmd - > resp [ 0 ] = mci_readl ( host , RESP0 ) ;
cmd - > resp [ 1 ] = 0 ;
cmd - > resp [ 2 ] = 0 ;
cmd - > resp [ 3 ] = 0 ;
}
}
if ( status & SDMMC_INT_RTO )
cmd - > error = - ETIMEDOUT ;
else if ( ( cmd - > flags & MMC_RSP_CRC ) & & ( status & SDMMC_INT_RCRC ) )
cmd - > error = - EILSEQ ;
else if ( status & SDMMC_INT_RESP_ERR )
cmd - > error = - EIO ;
else
cmd - > error = 0 ;
if ( cmd - > error ) {
/* newer ip versions need a delay between retries */
if ( host - > quirks & DW_MCI_QUIRK_RETRY_DELAY )
mdelay ( 20 ) ;
}
2013-08-30 19:14:17 +04:00
return cmd - > error ;
}
static int dw_mci_data_complete ( struct dw_mci * host , struct mmc_data * data )
{
2013-08-30 19:14:23 +04:00
u32 status = host - > data_status ;
2013-08-30 19:14:17 +04:00
if ( status & DW_MCI_DATA_ERROR_FLAGS ) {
if ( status & SDMMC_INT_DRTO ) {
data - > error = - ETIMEDOUT ;
} else if ( status & SDMMC_INT_DCRC ) {
data - > error = - EILSEQ ;
} else if ( status & SDMMC_INT_EBE ) {
if ( host - > dir_status = =
DW_MCI_SEND_STATUS ) {
/*
* No data CRC status was returned .
* The number of bytes transferred
* will be exaggerated in PIO mode .
*/
data - > bytes_xfered = 0 ;
data - > error = - ETIMEDOUT ;
} else if ( host - > dir_status = =
DW_MCI_RECV_STATUS ) {
data - > error = - EIO ;
}
} else {
/* SDMMC_INT_SBE is included */
data - > error = - EIO ;
}
2014-04-23 03:51:21 +04:00
dev_dbg ( host - > dev , " data error, status 0x%08x \n " , status ) ;
2013-08-30 19:14:17 +04:00
/*
* After an error , there may be data lingering
2013-08-30 19:14:23 +04:00
* in the FIFO
2013-08-30 19:14:17 +04:00
*/
2014-08-05 05:19:50 +04:00
dw_mci_reset ( host ) ;
2013-08-30 19:14:17 +04:00
} else {
data - > bytes_xfered = data - > blocks * data - > blksz ;
data - > error = 0 ;
}
return data - > error ;
2011-01-02 09:11:59 +03:00
}
static void dw_mci_tasklet_func ( unsigned long priv )
{
struct dw_mci * host = ( struct dw_mci * ) priv ;
struct mmc_data * data ;
struct mmc_command * cmd ;
2013-08-30 19:14:17 +04:00
struct mmc_request * mrq ;
2011-01-02 09:11:59 +03:00
enum dw_mci_state state ;
enum dw_mci_state prev_state ;
2013-08-30 19:14:17 +04:00
unsigned int err ;
2011-01-02 09:11:59 +03:00
spin_lock ( & host - > lock ) ;
state = host - > state ;
data = host - > data ;
2013-08-30 19:14:17 +04:00
mrq = host - > mrq ;
2011-01-02 09:11:59 +03:00
do {
prev_state = state ;
switch ( state ) {
case STATE_IDLE :
2014-08-22 17:47:51 +04:00
case STATE_WAITING_CMD11_DONE :
2011-01-02 09:11:59 +03:00
break ;
2014-08-22 17:47:51 +04:00
case STATE_SENDING_CMD11 :
2011-01-02 09:11:59 +03:00
case STATE_SENDING_CMD :
if ( ! test_and_clear_bit ( EVENT_CMD_COMPLETE ,
& host - > pending_events ) )
break ;
cmd = host - > cmd ;
host - > cmd = NULL ;
set_bit ( EVENT_CMD_COMPLETE , & host - > completed_events ) ;
2013-08-30 19:14:17 +04:00
err = dw_mci_command_complete ( host , cmd ) ;
if ( cmd = = mrq - > sbc & & ! err ) {
2011-12-22 13:01:29 +04:00
prev_state = state = STATE_SENDING_CMD ;
__dw_mci_start_request ( host , host - > cur_slot ,
2013-08-30 19:14:17 +04:00
mrq - > cmd ) ;
2011-12-22 13:01:29 +04:00
goto unlock ;
}
2013-08-30 19:14:17 +04:00
if ( cmd - > data & & err ) {
2013-08-30 19:13:59 +04:00
dw_mci_stop_dma ( host ) ;
2013-08-30 19:14:05 +04:00
send_stop_abort ( host , data ) ;
state = STATE_SENDING_STOP ;
break ;
2013-08-30 19:13:59 +04:00
}
2013-08-30 19:14:17 +04:00
if ( ! cmd - > data | | err ) {
dw_mci_request_end ( host , mrq ) ;
2011-01-02 09:11:59 +03:00
goto unlock ;
}
prev_state = state = STATE_SENDING_DATA ;
/* fall through */
case STATE_SENDING_DATA :
2014-08-13 19:13:43 +04:00
/*
* We could get a data error and never a transfer
* complete so we ' d better check for it here .
*
* Note that we don ' t really care if we also got a
* transfer complete ; stopping the DMA and sending an
* abort won ' t hurt .
*/
2011-01-02 09:11:59 +03:00
if ( test_and_clear_bit ( EVENT_DATA_ERROR ,
& host - > pending_events ) ) {
dw_mci_stop_dma ( host ) ;
2015-02-20 05:55:25 +03:00
if ( data - > stop | |
! ( host - > data_status & ( SDMMC_INT_DRTO |
SDMMC_INT_EBE ) ) )
send_stop_abort ( host , data ) ;
2011-01-02 09:11:59 +03:00
state = STATE_DATA_ERROR ;
break ;
}
if ( ! test_and_clear_bit ( EVENT_XFER_COMPLETE ,
& host - > pending_events ) )
break ;
set_bit ( EVENT_XFER_COMPLETE , & host - > completed_events ) ;
2014-08-13 19:13:43 +04:00
/*
* Handle an EVENT_DATA_ERROR that might have shown up
* before the transfer completed . This might not have
* been caught by the check above because the interrupt
* could have gone off between the previous check and
* the check for transfer complete .
*
* Technically this ought not be needed assuming we
* get a DATA_COMPLETE eventually ( we ' ll notice the
* error and end the request ) , but it shouldn ' t hurt .
*
* This has the advantage of sending the stop command .
*/
if ( test_and_clear_bit ( EVENT_DATA_ERROR ,
& host - > pending_events ) ) {
dw_mci_stop_dma ( host ) ;
2015-02-20 05:55:25 +03:00
if ( data - > stop | |
! ( host - > data_status & ( SDMMC_INT_DRTO |
SDMMC_INT_EBE ) ) )
send_stop_abort ( host , data ) ;
2014-08-13 19:13:43 +04:00
state = STATE_DATA_ERROR ;
break ;
}
2011-01-02 09:11:59 +03:00
prev_state = state = STATE_DATA_BUSY ;
2014-08-13 19:13:43 +04:00
2011-01-02 09:11:59 +03:00
/* fall through */
case STATE_DATA_BUSY :
if ( ! test_and_clear_bit ( EVENT_DATA_COMPLETE ,
& host - > pending_events ) )
break ;
host - > data = NULL ;
set_bit ( EVENT_DATA_COMPLETE , & host - > completed_events ) ;
2013-08-30 19:14:17 +04:00
err = dw_mci_data_complete ( host , data ) ;
if ( ! err ) {
if ( ! data - > stop | | mrq - > sbc ) {
2014-02-25 13:48:28 +04:00
if ( mrq - > sbc & & data - > stop )
2013-08-30 19:14:17 +04:00
data - > stop - > error = 0 ;
dw_mci_request_end ( host , mrq ) ;
goto unlock ;
2011-01-02 09:11:59 +03:00
}
2013-08-30 19:14:17 +04:00
/* stop command for open-ended transfer*/
if ( data - > stop )
send_stop_abort ( host , data ) ;
2014-08-13 19:13:43 +04:00
} else {
/*
* If we don ' t have a command complete now we ' ll
* never get one since we just reset everything ;
* better end the request .
*
* If we do have a command complete we ' ll fall
* through to the SENDING_STOP command and
* everything will be peachy keen .
*/
if ( ! test_bit ( EVENT_CMD_COMPLETE ,
& host - > pending_events ) ) {
host - > cmd = NULL ;
dw_mci_request_end ( host , mrq ) ;
goto unlock ;
}
2011-12-22 13:01:29 +04:00
}
2013-08-30 19:14:17 +04:00
/*
* If err has non - zero ,
* stop - abort command has been already issued .
*/
2011-01-02 09:11:59 +03:00
prev_state = state = STATE_SENDING_STOP ;
2013-08-30 19:14:17 +04:00
2011-01-02 09:11:59 +03:00
/* fall through */
case STATE_SENDING_STOP :
if ( ! test_and_clear_bit ( EVENT_CMD_COMPLETE ,
& host - > pending_events ) )
break ;
2013-08-30 19:13:59 +04:00
/* CMD error in data command */
2013-08-30 19:14:23 +04:00
if ( mrq - > cmd - > error & & mrq - > data )
2014-08-05 05:19:50 +04:00
dw_mci_reset ( host ) ;
2013-08-30 19:13:59 +04:00
2011-01-02 09:11:59 +03:00
host - > cmd = NULL ;
2013-08-30 19:13:59 +04:00
host - > data = NULL ;
2013-08-30 19:14:05 +04:00
2013-08-30 19:14:17 +04:00
if ( mrq - > stop )
dw_mci_command_complete ( host , mrq - > stop ) ;
2013-08-30 19:14:05 +04:00
else
host - > cmd_status = 0 ;
2013-08-30 19:14:17 +04:00
dw_mci_request_end ( host , mrq ) ;
2011-01-02 09:11:59 +03:00
goto unlock ;
case STATE_DATA_ERROR :
if ( ! test_and_clear_bit ( EVENT_XFER_COMPLETE ,
& host - > pending_events ) )
break ;
state = STATE_DATA_BUSY ;
break ;
}
} while ( state ! = prev_state ) ;
host - > state = state ;
unlock :
spin_unlock ( & host - > lock ) ;
}
2011-06-24 16:57:56 +04:00
/* push final bytes to part_buf, only use during push */
static void dw_mci_set_part_bytes ( struct dw_mci * host , void * buf , int cnt )
2011-01-02 09:11:59 +03:00
{
2011-06-24 16:57:56 +04:00
memcpy ( ( void * ) & host - > part_buf , buf , cnt ) ;
host - > part_buf_count = cnt ;
}
2011-01-02 09:11:59 +03:00
2011-06-24 16:57:56 +04:00
/* append bytes to part_buf, only use during push */
static int dw_mci_push_part_bytes ( struct dw_mci * host , void * buf , int cnt )
{
cnt = min ( cnt , ( 1 < < host - > data_shift ) - host - > part_buf_count ) ;
memcpy ( ( void * ) & host - > part_buf + host - > part_buf_count , buf , cnt ) ;
host - > part_buf_count + = cnt ;
return cnt ;
}
2011-01-02 09:11:59 +03:00
2011-06-24 16:57:56 +04:00
/* pull first bytes from part_buf, only use during pull */
static int dw_mci_pull_part_bytes ( struct dw_mci * host , void * buf , int cnt )
{
2015-08-03 10:07:21 +03:00
cnt = min_t ( int , cnt , host - > part_buf_count ) ;
2011-06-24 16:57:56 +04:00
if ( cnt ) {
memcpy ( buf , ( void * ) & host - > part_buf + host - > part_buf_start ,
cnt ) ;
host - > part_buf_count - = cnt ;
host - > part_buf_start + = cnt ;
2011-01-02 09:11:59 +03:00
}
2011-06-24 16:57:56 +04:00
return cnt ;
2011-01-02 09:11:59 +03:00
}
2011-06-24 16:57:56 +04:00
/* pull final bytes from the part_buf, assuming it's just been filled */
static void dw_mci_pull_final_bytes ( struct dw_mci * host , void * buf , int cnt )
2011-01-02 09:11:59 +03:00
{
2011-06-24 16:57:56 +04:00
memcpy ( buf , & host - > part_buf , cnt ) ;
host - > part_buf_start = cnt ;
host - > part_buf_count = ( 1 < < host - > data_shift ) - cnt ;
}
2011-01-02 09:11:59 +03:00
2011-06-24 16:57:56 +04:00
static void dw_mci_push_data16 ( struct dw_mci * host , void * buf , int cnt )
{
2013-03-12 14:53:13 +04:00
struct mmc_data * data = host - > data ;
int init_cnt = cnt ;
2011-06-24 16:57:56 +04:00
/* try and push anything in the part_buf */
if ( unlikely ( host - > part_buf_count ) ) {
int len = dw_mci_push_part_bytes ( host , buf , cnt ) ;
2015-08-03 10:07:21 +03:00
2011-06-24 16:57:56 +04:00
buf + = len ;
cnt - = len ;
2013-03-12 14:53:13 +04:00
if ( host - > part_buf_count = = 2 ) {
2015-03-25 14:27:52 +03:00
mci_fifo_writew ( host - > fifo_reg , host - > part_buf16 ) ;
2011-06-24 16:57:56 +04:00
host - > part_buf_count = 0 ;
}
}
# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if ( unlikely ( ( unsigned long ) buf & 0x1 ) ) {
while ( cnt > = 2 ) {
u16 aligned_buf [ 64 ] ;
int len = min ( cnt & - 2 , ( int ) sizeof ( aligned_buf ) ) ;
int items = len > > 1 ;
int i ;
/* memcpy from input buffer into aligned buffer */
memcpy ( aligned_buf , buf , len ) ;
buf + = len ;
cnt - = len ;
/* push data from aligned buffer into fifo */
for ( i = 0 ; i < items ; + + i )
2015-03-25 14:27:52 +03:00
mci_fifo_writew ( host - > fifo_reg , aligned_buf [ i ] ) ;
2011-06-24 16:57:56 +04:00
}
} else
# endif
{
u16 * pdata = buf ;
2015-08-03 10:07:21 +03:00
2011-06-24 16:57:56 +04:00
for ( ; cnt > = 2 ; cnt - = 2 )
2015-03-25 14:27:52 +03:00
mci_fifo_writew ( host - > fifo_reg , * pdata + + ) ;
2011-06-24 16:57:56 +04:00
buf = pdata ;
}
/* put anything remaining in the part_buf */
if ( cnt ) {
dw_mci_set_part_bytes ( host , buf , cnt ) ;
2013-03-12 14:53:13 +04:00
/* Push data if we have reached the expected data length */
if ( ( data - > bytes_xfered + init_cnt ) = =
( data - > blksz * data - > blocks ) )
2015-03-25 14:27:52 +03:00
mci_fifo_writew ( host - > fifo_reg , host - > part_buf16 ) ;
2011-06-24 16:57:56 +04:00
}
}
2011-01-02 09:11:59 +03:00
2011-06-24 16:57:56 +04:00
static void dw_mci_pull_data16 ( struct dw_mci * host , void * buf , int cnt )
{
# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if ( unlikely ( ( unsigned long ) buf & 0x1 ) ) {
while ( cnt > = 2 ) {
/* pull data from fifo into aligned buffer */
u16 aligned_buf [ 64 ] ;
int len = min ( cnt & - 2 , ( int ) sizeof ( aligned_buf ) ) ;
int items = len > > 1 ;
int i ;
2015-08-03 10:07:21 +03:00
2011-06-24 16:57:56 +04:00
for ( i = 0 ; i < items ; + + i )
2015-03-25 14:27:52 +03:00
aligned_buf [ i ] = mci_fifo_readw ( host - > fifo_reg ) ;
2011-06-24 16:57:56 +04:00
/* memcpy from aligned buffer into output buffer */
memcpy ( buf , aligned_buf , len ) ;
buf + = len ;
cnt - = len ;
}
} else
# endif
{
u16 * pdata = buf ;
2015-08-03 10:07:21 +03:00
2011-06-24 16:57:56 +04:00
for ( ; cnt > = 2 ; cnt - = 2 )
2015-03-25 14:27:52 +03:00
* pdata + + = mci_fifo_readw ( host - > fifo_reg ) ;
2011-06-24 16:57:56 +04:00
buf = pdata ;
}
if ( cnt ) {
2015-03-25 14:27:52 +03:00
host - > part_buf16 = mci_fifo_readw ( host - > fifo_reg ) ;
2011-06-24 16:57:56 +04:00
dw_mci_pull_final_bytes ( host , buf , cnt ) ;
2011-01-02 09:11:59 +03:00
}
}
static void dw_mci_push_data32 ( struct dw_mci * host , void * buf , int cnt )
{
2013-03-12 14:53:13 +04:00
struct mmc_data * data = host - > data ;
int init_cnt = cnt ;
2011-06-24 16:57:56 +04:00
/* try and push anything in the part_buf */
if ( unlikely ( host - > part_buf_count ) ) {
int len = dw_mci_push_part_bytes ( host , buf , cnt ) ;
2015-08-03 10:07:21 +03:00
2011-06-24 16:57:56 +04:00
buf + = len ;
cnt - = len ;
2013-03-12 14:53:13 +04:00
if ( host - > part_buf_count = = 4 ) {
2015-03-25 14:27:52 +03:00
mci_fifo_writel ( host - > fifo_reg , host - > part_buf32 ) ;
2011-06-24 16:57:56 +04:00
host - > part_buf_count = 0 ;
}
}
# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if ( unlikely ( ( unsigned long ) buf & 0x3 ) ) {
while ( cnt > = 4 ) {
u32 aligned_buf [ 32 ] ;
int len = min ( cnt & - 4 , ( int ) sizeof ( aligned_buf ) ) ;
int items = len > > 2 ;
int i ;
/* memcpy from input buffer into aligned buffer */
memcpy ( aligned_buf , buf , len ) ;
buf + = len ;
cnt - = len ;
/* push data from aligned buffer into fifo */
for ( i = 0 ; i < items ; + + i )
2015-03-25 14:27:52 +03:00
mci_fifo_writel ( host - > fifo_reg , aligned_buf [ i ] ) ;
2011-06-24 16:57:56 +04:00
}
} else
# endif
{
u32 * pdata = buf ;
2015-08-03 10:07:21 +03:00
2011-06-24 16:57:56 +04:00
for ( ; cnt > = 4 ; cnt - = 4 )
2015-03-25 14:27:52 +03:00
mci_fifo_writel ( host - > fifo_reg , * pdata + + ) ;
2011-06-24 16:57:56 +04:00
buf = pdata ;
}
/* put anything remaining in the part_buf */
if ( cnt ) {
dw_mci_set_part_bytes ( host , buf , cnt ) ;
2013-03-12 14:53:13 +04:00
/* Push data if we have reached the expected data length */
if ( ( data - > bytes_xfered + init_cnt ) = =
( data - > blksz * data - > blocks ) )
2015-03-25 14:27:52 +03:00
mci_fifo_writel ( host - > fifo_reg , host - > part_buf32 ) ;
2011-01-02 09:11:59 +03:00
}
}
static void dw_mci_pull_data32 ( struct dw_mci * host , void * buf , int cnt )
{
2011-06-24 16:57:56 +04:00
# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if ( unlikely ( ( unsigned long ) buf & 0x3 ) ) {
while ( cnt > = 4 ) {
/* pull data from fifo into aligned buffer */
u32 aligned_buf [ 32 ] ;
int len = min ( cnt & - 4 , ( int ) sizeof ( aligned_buf ) ) ;
int items = len > > 2 ;
int i ;
2015-08-03 10:07:21 +03:00
2011-06-24 16:57:56 +04:00
for ( i = 0 ; i < items ; + + i )
2015-03-25 14:27:52 +03:00
aligned_buf [ i ] = mci_fifo_readl ( host - > fifo_reg ) ;
2011-06-24 16:57:56 +04:00
/* memcpy from aligned buffer into output buffer */
memcpy ( buf , aligned_buf , len ) ;
buf + = len ;
cnt - = len ;
}
} else
# endif
{
u32 * pdata = buf ;
2015-08-03 10:07:21 +03:00
2011-06-24 16:57:56 +04:00
for ( ; cnt > = 4 ; cnt - = 4 )
2015-03-25 14:27:52 +03:00
* pdata + + = mci_fifo_readl ( host - > fifo_reg ) ;
2011-06-24 16:57:56 +04:00
buf = pdata ;
}
if ( cnt ) {
2015-03-25 14:27:52 +03:00
host - > part_buf32 = mci_fifo_readl ( host - > fifo_reg ) ;
2011-06-24 16:57:56 +04:00
dw_mci_pull_final_bytes ( host , buf , cnt ) ;
2011-01-02 09:11:59 +03:00
}
}
static void dw_mci_push_data64 ( struct dw_mci * host , void * buf , int cnt )
{
2013-03-12 14:53:13 +04:00
struct mmc_data * data = host - > data ;
int init_cnt = cnt ;
2011-06-24 16:57:56 +04:00
/* try and push anything in the part_buf */
if ( unlikely ( host - > part_buf_count ) ) {
int len = dw_mci_push_part_bytes ( host , buf , cnt ) ;
2015-08-03 10:07:21 +03:00
2011-06-24 16:57:56 +04:00
buf + = len ;
cnt - = len ;
2013-03-25 11:28:22 +04:00
2013-03-12 14:53:13 +04:00
if ( host - > part_buf_count = = 8 ) {
2015-03-25 14:27:52 +03:00
mci_fifo_writeq ( host - > fifo_reg , host - > part_buf ) ;
2011-06-24 16:57:56 +04:00
host - > part_buf_count = 0 ;
}
}
# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if ( unlikely ( ( unsigned long ) buf & 0x7 ) ) {
while ( cnt > = 8 ) {
u64 aligned_buf [ 16 ] ;
int len = min ( cnt & - 8 , ( int ) sizeof ( aligned_buf ) ) ;
int items = len > > 3 ;
int i ;
/* memcpy from input buffer into aligned buffer */
memcpy ( aligned_buf , buf , len ) ;
buf + = len ;
cnt - = len ;
/* push data from aligned buffer into fifo */
for ( i = 0 ; i < items ; + + i )
2015-03-25 14:27:52 +03:00
mci_fifo_writeq ( host - > fifo_reg , aligned_buf [ i ] ) ;
2011-06-24 16:57:56 +04:00
}
} else
# endif
{
u64 * pdata = buf ;
2015-08-03 10:07:21 +03:00
2011-06-24 16:57:56 +04:00
for ( ; cnt > = 8 ; cnt - = 8 )
2015-03-25 14:27:52 +03:00
mci_fifo_writeq ( host - > fifo_reg , * pdata + + ) ;
2011-06-24 16:57:56 +04:00
buf = pdata ;
}
/* put anything remaining in the part_buf */
if ( cnt ) {
dw_mci_set_part_bytes ( host , buf , cnt ) ;
2013-03-12 14:53:13 +04:00
/* Push data if we have reached the expected data length */
if ( ( data - > bytes_xfered + init_cnt ) = =
( data - > blksz * data - > blocks ) )
2015-03-25 14:27:52 +03:00
mci_fifo_writeq ( host - > fifo_reg , host - > part_buf ) ;
2011-01-02 09:11:59 +03:00
}
}
static void dw_mci_pull_data64 ( struct dw_mci * host , void * buf , int cnt )
{
2011-06-24 16:57:56 +04:00
# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if ( unlikely ( ( unsigned long ) buf & 0x7 ) ) {
while ( cnt > = 8 ) {
/* pull data from fifo into aligned buffer */
u64 aligned_buf [ 16 ] ;
int len = min ( cnt & - 8 , ( int ) sizeof ( aligned_buf ) ) ;
int items = len > > 3 ;
int i ;
2015-08-03 10:07:21 +03:00
2011-06-24 16:57:56 +04:00
for ( i = 0 ; i < items ; + + i )
2015-03-25 14:27:52 +03:00
aligned_buf [ i ] = mci_fifo_readq ( host - > fifo_reg ) ;
2011-06-24 16:57:56 +04:00
/* memcpy from aligned buffer into output buffer */
memcpy ( buf , aligned_buf , len ) ;
buf + = len ;
cnt - = len ;
}
} else
# endif
{
u64 * pdata = buf ;
2015-08-03 10:07:21 +03:00
2011-06-24 16:57:56 +04:00
for ( ; cnt > = 8 ; cnt - = 8 )
2015-03-25 14:27:52 +03:00
* pdata + + = mci_fifo_readq ( host - > fifo_reg ) ;
2011-06-24 16:57:56 +04:00
buf = pdata ;
}
if ( cnt ) {
2015-03-25 14:27:52 +03:00
host - > part_buf = mci_fifo_readq ( host - > fifo_reg ) ;
2011-06-24 16:57:56 +04:00
dw_mci_pull_final_bytes ( host , buf , cnt ) ;
}
}
2011-01-02 09:11:59 +03:00
2011-06-24 16:57:56 +04:00
static void dw_mci_pull_data ( struct dw_mci * host , void * buf , int cnt )
{
int len ;
2011-01-02 09:11:59 +03:00
2011-06-24 16:57:56 +04:00
/* get remaining partial bytes */
len = dw_mci_pull_part_bytes ( host , buf , cnt ) ;
if ( unlikely ( len = = cnt ) )
return ;
buf + = len ;
cnt - = len ;
/* get the rest of the data */
host - > pull_data ( host , buf , cnt ) ;
2011-01-02 09:11:59 +03:00
}
2013-01-22 11:46:30 +04:00
static void dw_mci_read_data_pio ( struct dw_mci * host , bool dto )
2011-01-02 09:11:59 +03:00
{
2012-02-09 09:32:43 +04:00
struct sg_mapping_iter * sg_miter = & host - > sg_miter ;
void * buf ;
unsigned int offset ;
2011-01-02 09:11:59 +03:00
struct mmc_data * data = host - > data ;
int shift = host - > data_shift ;
u32 status ;
2013-03-22 20:50:05 +04:00
unsigned int len ;
2012-02-09 09:32:43 +04:00
unsigned int remain , fcnt ;
2011-01-02 09:11:59 +03:00
do {
2012-02-09 09:32:43 +04:00
if ( ! sg_miter_next ( sg_miter ) )
goto done ;
2013-02-28 05:02:57 +04:00
host - > sg = sg_miter - > piter . sg ;
2012-02-09 09:32:43 +04:00
buf = sg_miter - > addr ;
remain = sg_miter - > length ;
offset = 0 ;
do {
fcnt = ( SDMMC_GET_FCNT ( mci_readl ( host , STATUS ) )
< < shift ) + host - > part_buf_count ;
len = min ( remain , fcnt ) ;
if ( ! len )
break ;
2011-06-24 16:57:56 +04:00
dw_mci_pull_data ( host , ( void * ) ( buf + offset ) , len ) ;
2013-03-22 20:50:05 +04:00
data - > bytes_xfered + = len ;
2011-01-02 09:11:59 +03:00
offset + = len ;
2012-02-09 09:32:43 +04:00
remain - = len ;
} while ( remain ) ;
2011-01-02 09:11:59 +03:00
2012-08-01 04:30:46 +04:00
sg_miter - > consumed = offset ;
2011-01-02 09:11:59 +03:00
status = mci_readl ( host , MINTSTS ) ;
mci_writel ( host , RINTSTS , SDMMC_INT_RXDR ) ;
2013-01-22 11:46:30 +04:00
/* if the RXDR is ready read again */
} while ( ( status & SDMMC_INT_RXDR ) | |
( dto & & SDMMC_GET_FCNT ( mci_readl ( host , STATUS ) ) ) ) ;
2012-02-09 09:32:43 +04:00
if ( ! remain ) {
if ( ! sg_miter_next ( sg_miter ) )
goto done ;
sg_miter - > consumed = 0 ;
}
sg_miter_stop ( sg_miter ) ;
2011-01-02 09:11:59 +03:00
return ;
done :
2012-02-09 09:32:43 +04:00
sg_miter_stop ( sg_miter ) ;
host - > sg = NULL ;
2015-08-03 10:07:21 +03:00
smp_wmb ( ) ; /* drain writebuffer */
2011-01-02 09:11:59 +03:00
set_bit ( EVENT_XFER_COMPLETE , & host - > pending_events ) ;
}
static void dw_mci_write_data_pio ( struct dw_mci * host )
{
2012-02-09 09:32:43 +04:00
struct sg_mapping_iter * sg_miter = & host - > sg_miter ;
void * buf ;
unsigned int offset ;
2011-01-02 09:11:59 +03:00
struct mmc_data * data = host - > data ;
int shift = host - > data_shift ;
u32 status ;
2013-03-22 20:50:05 +04:00
unsigned int len ;
2012-02-09 09:32:43 +04:00
unsigned int fifo_depth = host - > fifo_depth ;
unsigned int remain , fcnt ;
2011-01-02 09:11:59 +03:00
do {
2012-02-09 09:32:43 +04:00
if ( ! sg_miter_next ( sg_miter ) )
goto done ;
2013-02-28 05:02:57 +04:00
host - > sg = sg_miter - > piter . sg ;
2012-02-09 09:32:43 +04:00
buf = sg_miter - > addr ;
remain = sg_miter - > length ;
offset = 0 ;
do {
fcnt = ( ( fifo_depth -
SDMMC_GET_FCNT ( mci_readl ( host , STATUS ) ) )
< < shift ) - host - > part_buf_count ;
len = min ( remain , fcnt ) ;
if ( ! len )
break ;
2011-01-02 09:11:59 +03:00
host - > push_data ( host , ( void * ) ( buf + offset ) , len ) ;
2013-03-22 20:50:05 +04:00
data - > bytes_xfered + = len ;
2011-01-02 09:11:59 +03:00
offset + = len ;
2012-02-09 09:32:43 +04:00
remain - = len ;
} while ( remain ) ;
2011-01-02 09:11:59 +03:00
2012-08-01 04:30:46 +04:00
sg_miter - > consumed = offset ;
2011-01-02 09:11:59 +03:00
status = mci_readl ( host , MINTSTS ) ;
mci_writel ( host , RINTSTS , SDMMC_INT_TXDR ) ;
} while ( status & SDMMC_INT_TXDR ) ; /* if TXDR write again */
2012-02-09 09:32:43 +04:00
if ( ! remain ) {
if ( ! sg_miter_next ( sg_miter ) )
goto done ;
sg_miter - > consumed = 0 ;
}
sg_miter_stop ( sg_miter ) ;
2011-01-02 09:11:59 +03:00
return ;
done :
2012-02-09 09:32:43 +04:00
sg_miter_stop ( sg_miter ) ;
host - > sg = NULL ;
2015-08-03 10:07:21 +03:00
smp_wmb ( ) ; /* drain writebuffer */
2011-01-02 09:11:59 +03:00
set_bit ( EVENT_XFER_COMPLETE , & host - > pending_events ) ;
}
static void dw_mci_cmd_interrupt ( struct dw_mci * host , u32 status )
{
if ( ! host - > cmd_status )
host - > cmd_status = status ;
2015-08-03 10:07:21 +03:00
smp_wmb ( ) ; /* drain writebuffer */
2011-01-02 09:11:59 +03:00
set_bit ( EVENT_CMD_COMPLETE , & host - > pending_events ) ;
tasklet_schedule ( & host - > tasklet ) ;
}
2014-10-14 20:33:09 +04:00
static void dw_mci_handle_cd ( struct dw_mci * host )
{
int i ;
for ( i = 0 ; i < host - > num_slots ; i + + ) {
struct dw_mci_slot * slot = host - > slot [ i ] ;
if ( ! slot )
continue ;
if ( slot - > mmc - > ops - > card_event )
slot - > mmc - > ops - > card_event ( slot - > mmc ) ;
mmc_detect_change ( slot - > mmc ,
msecs_to_jiffies ( host - > pdata - > detect_delay_ms ) ) ;
}
}
2011-01-02 09:11:59 +03:00
static irqreturn_t dw_mci_interrupt ( int irq , void * dev_id )
{
struct dw_mci * host = dev_id ;
2012-08-01 04:30:30 +04:00
u32 pending ;
2011-08-29 11:41:46 +04:00
int i ;
2011-01-02 09:11:59 +03:00
2013-03-12 14:53:11 +04:00
pending = mci_readl ( host , MINTSTS ) ; /* read-only mask reg */
2013-07-10 00:04:40 +04:00
/*
* DTO fix - version 2.10 a and below , and only if internal DMA
* is configured .
*/
if ( host - > quirks & DW_MCI_QUIRK_IDMAC_DTO ) {
if ( ! pending & &
( ( mci_readl ( host , STATUS ) > > 17 ) & 0x1fff ) )
pending | = SDMMC_INT_DATA_OVER ;
}
2011-01-02 09:11:59 +03:00
2013-07-10 00:04:40 +04:00
if ( pending ) {
2014-08-22 17:47:51 +04:00
/* Check volt switch first, since it can look like an error */
if ( ( host - > state = = STATE_SENDING_CMD11 ) & &
( pending & SDMMC_INT_VOLT_SWITCH ) ) {
2015-04-03 21:13:07 +03:00
unsigned long irqflags ;
2015-03-10 02:18:21 +03:00
2014-08-22 17:47:51 +04:00
mci_writel ( host , RINTSTS , SDMMC_INT_VOLT_SWITCH ) ;
pending & = ~ SDMMC_INT_VOLT_SWITCH ;
2015-04-03 21:13:07 +03:00
/*
* Hold the lock ; we know cmd11_timer can ' t be kicked
* off after the lock is released , so safe to delete .
*/
spin_lock_irqsave ( & host - > irq_lock , irqflags ) ;
2014-08-22 17:47:51 +04:00
dw_mci_cmd_interrupt ( host , pending ) ;
2015-04-03 21:13:07 +03:00
spin_unlock_irqrestore ( & host - > irq_lock , irqflags ) ;
del_timer ( & host - > cmd11_timer ) ;
2014-08-22 17:47:51 +04:00
}
2011-01-02 09:11:59 +03:00
if ( pending & DW_MCI_CMD_ERROR_FLAGS ) {
mci_writel ( host , RINTSTS , DW_MCI_CMD_ERROR_FLAGS ) ;
2012-08-01 04:30:30 +04:00
host - > cmd_status = pending ;
2015-08-03 10:07:21 +03:00
smp_wmb ( ) ; /* drain writebuffer */
2011-01-02 09:11:59 +03:00
set_bit ( EVENT_CMD_COMPLETE , & host - > pending_events ) ;
}
if ( pending & DW_MCI_DATA_ERROR_FLAGS ) {
/* if there is an error report DATA_ERROR */
mci_writel ( host , RINTSTS , DW_MCI_DATA_ERROR_FLAGS ) ;
2012-08-01 04:30:30 +04:00
host - > data_status = pending ;
2015-08-03 10:07:21 +03:00
smp_wmb ( ) ; /* drain writebuffer */
2011-01-02 09:11:59 +03:00
set_bit ( EVENT_DATA_ERROR , & host - > pending_events ) ;
2012-08-01 04:30:40 +04:00
tasklet_schedule ( & host - > tasklet ) ;
2011-01-02 09:11:59 +03:00
}
if ( pending & SDMMC_INT_DATA_OVER ) {
mci_writel ( host , RINTSTS , SDMMC_INT_DATA_OVER ) ;
if ( ! host - > data_status )
2012-08-01 04:30:30 +04:00
host - > data_status = pending ;
2015-08-03 10:07:21 +03:00
smp_wmb ( ) ; /* drain writebuffer */
2011-01-02 09:11:59 +03:00
if ( host - > dir_status = = DW_MCI_RECV_STATUS ) {
if ( host - > sg ! = NULL )
2013-01-22 11:46:30 +04:00
dw_mci_read_data_pio ( host , true ) ;
2011-01-02 09:11:59 +03:00
}
set_bit ( EVENT_DATA_COMPLETE , & host - > pending_events ) ;
tasklet_schedule ( & host - > tasklet ) ;
}
if ( pending & SDMMC_INT_RXDR ) {
mci_writel ( host , RINTSTS , SDMMC_INT_RXDR ) ;
2011-06-24 16:54:06 +04:00
if ( host - > dir_status = = DW_MCI_RECV_STATUS & & host - > sg )
2013-01-22 11:46:30 +04:00
dw_mci_read_data_pio ( host , false ) ;
2011-01-02 09:11:59 +03:00
}
if ( pending & SDMMC_INT_TXDR ) {
mci_writel ( host , RINTSTS , SDMMC_INT_TXDR ) ;
2011-06-24 16:54:06 +04:00
if ( host - > dir_status = = DW_MCI_SEND_STATUS & & host - > sg )
2011-01-02 09:11:59 +03:00
dw_mci_write_data_pio ( host ) ;
}
if ( pending & SDMMC_INT_CMD_DONE ) {
mci_writel ( host , RINTSTS , SDMMC_INT_CMD_DONE ) ;
2012-08-01 04:30:30 +04:00
dw_mci_cmd_interrupt ( host , pending ) ;
2011-01-02 09:11:59 +03:00
}
if ( pending & SDMMC_INT_CD ) {
mci_writel ( host , RINTSTS , SDMMC_INT_CD ) ;
2014-10-14 20:33:09 +04:00
dw_mci_handle_cd ( host ) ;
2011-01-02 09:11:59 +03:00
}
2011-08-29 11:41:46 +04:00
/* Handle SDIO Interrupts */
for ( i = 0 ; i < host - > num_slots ; i + + ) {
struct dw_mci_slot * slot = host - > slot [ i ] ;
2015-02-25 21:11:52 +03:00
if ( ! slot )
continue ;
2014-11-04 17:03:09 +03:00
if ( pending & SDMMC_INT_SDIO ( slot - > sdio_id ) ) {
mci_writel ( host , RINTSTS ,
SDMMC_INT_SDIO ( slot - > sdio_id ) ) ;
2011-08-29 11:41:46 +04:00
mmc_signal_sdio_irq ( slot - > mmc ) ;
}
}
2013-03-12 14:53:11 +04:00
}
2011-01-02 09:11:59 +03:00
# ifdef CONFIG_MMC_DW_IDMAC
/* Handle DMA interrupts */
2014-10-20 11:12:33 +04:00
if ( host - > dma_64bit_address = = 1 ) {
pending = mci_readl ( host , IDSTS64 ) ;
if ( pending & ( SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI ) ) {
mci_writel ( host , IDSTS64 , SDMMC_IDMAC_INT_TI |
SDMMC_IDMAC_INT_RI ) ;
mci_writel ( host , IDSTS64 , SDMMC_IDMAC_INT_NI ) ;
host - > dma_ops - > complete ( host ) ;
}
} else {
pending = mci_readl ( host , IDSTS ) ;
if ( pending & ( SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI ) ) {
mci_writel ( host , IDSTS , SDMMC_IDMAC_INT_TI |
SDMMC_IDMAC_INT_RI ) ;
mci_writel ( host , IDSTS , SDMMC_IDMAC_INT_NI ) ;
host - > dma_ops - > complete ( host ) ;
}
2011-01-02 09:11:59 +03:00
}
# endif
return IRQ_HANDLED ;
}
2012-09-17 22:16:40 +04:00
# ifdef CONFIG_OF
2015-05-06 21:31:22 +03:00
/* given a slot, find out the device node representing that slot */
static struct device_node * dw_mci_of_find_slot_node ( struct dw_mci_slot * slot )
2012-09-17 22:16:40 +04:00
{
2015-05-06 21:31:22 +03:00
struct device * dev = slot - > mmc - > parent ;
2012-09-17 22:16:40 +04:00
struct device_node * np ;
const __be32 * addr ;
int len ;
if ( ! dev | | ! dev - > of_node )
return NULL ;
for_each_child_of_node ( dev - > of_node , np ) {
addr = of_get_property ( np , " reg " , & len ) ;
if ( ! addr | | ( len < sizeof ( int ) ) )
continue ;
2015-05-06 21:31:22 +03:00
if ( be32_to_cpup ( addr ) = = slot - > id )
2012-09-17 22:16:40 +04:00
return np ;
}
return NULL ;
}
2015-05-06 21:31:22 +03:00
static void dw_mci_slot_of_parse ( struct dw_mci_slot * slot )
2013-01-11 21:03:50 +04:00
{
2015-05-06 21:31:22 +03:00
struct device_node * np = dw_mci_of_find_slot_node ( slot ) ;
2013-01-11 21:03:50 +04:00
2015-05-06 21:31:22 +03:00
if ( ! np )
return ;
2013-01-11 21:03:50 +04:00
2015-05-06 21:31:22 +03:00
if ( of_property_read_bool ( np , " disable-wp " ) ) {
slot - > mmc - > caps2 | = MMC_CAP2_NO_WRITE_PROTECT ;
dev_warn ( slot - > mmc - > parent ,
" Slot quirk 'disable-wp' is deprecated \n " ) ;
}
2013-01-11 21:03:50 +04:00
}
2012-09-17 22:16:40 +04:00
# else /* CONFIG_OF */
2015-05-06 21:31:22 +03:00
static void dw_mci_slot_of_parse ( struct dw_mci_slot * slot )
2013-01-11 21:03:50 +04:00
{
}
2012-09-17 22:16:40 +04:00
# endif /* CONFIG_OF */
2012-08-23 15:31:48 +04:00
static int dw_mci_init_slot ( struct dw_mci * host , unsigned int id )
2011-01-02 09:11:59 +03:00
{
struct mmc_host * mmc ;
struct dw_mci_slot * slot ;
2012-11-08 18:26:11 +04:00
const struct dw_mci_drv_data * drv_data = host - > drv_data ;
2012-09-17 22:16:42 +04:00
int ctrl_id , ret ;
2013-08-30 19:13:31 +04:00
u32 freq [ 2 ] ;
2011-01-02 09:11:59 +03:00
2012-09-17 22:16:35 +04:00
mmc = mmc_alloc_host ( sizeof ( struct dw_mci_slot ) , host - > dev ) ;
2011-01-02 09:11:59 +03:00
if ( ! mmc )
return - ENOMEM ;
slot = mmc_priv ( mmc ) ;
slot - > id = id ;
2014-11-04 17:03:09 +03:00
slot - > sdio_id = host - > sdio_id0 + id ;
2011-01-02 09:11:59 +03:00
slot - > mmc = mmc ;
slot - > host = host ;
2012-09-17 22:16:40 +04:00
host - > slot [ id ] = slot ;
2011-01-02 09:11:59 +03:00
mmc - > ops = & dw_mci_ops ;
2013-08-30 19:13:31 +04:00
if ( of_property_read_u32_array ( host - > dev - > of_node ,
" clock-freq-min-max " , freq , 2 ) ) {
mmc - > f_min = DW_MCI_FREQ_MIN ;
mmc - > f_max = DW_MCI_FREQ_MAX ;
} else {
mmc - > f_min = freq [ 0 ] ;
mmc - > f_max = freq [ 1 ] ;
}
2011-01-02 09:11:59 +03:00
2014-08-22 17:47:50 +04:00
/*if there are external regulators, get them*/
ret = mmc_regulator_get_supply ( mmc ) ;
if ( ret = = - EPROBE_DEFER )
2014-08-25 22:19:04 +04:00
goto err_host_allocated ;
2014-08-22 17:47:50 +04:00
if ( ! mmc - > ocr_avail )
mmc - > ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 ;
2011-01-02 09:11:59 +03:00
2011-02-25 05:08:15 +03:00
if ( host - > pdata - > caps )
mmc - > caps = host - > pdata - > caps ;
2012-11-19 08:56:21 +04:00
if ( host - > pdata - > pm_caps )
mmc - > pm_caps = host - > pdata - > pm_caps ;
2012-09-17 22:16:42 +04:00
if ( host - > dev - > of_node ) {
ctrl_id = of_alias_get_id ( host - > dev - > of_node , " mshc " ) ;
if ( ctrl_id < 0 )
ctrl_id = 0 ;
} else {
ctrl_id = to_platform_device ( host - > dev ) - > id ;
}
2012-10-16 12:43:08 +04:00
if ( drv_data & & drv_data - > caps )
mmc - > caps | = drv_data - > caps [ ctrl_id ] ;
2012-09-17 22:16:42 +04:00
2011-12-09 09:55:52 +04:00
if ( host - > pdata - > caps2 )
mmc - > caps2 = host - > pdata - > caps2 ;
2015-05-06 21:31:22 +03:00
dw_mci_slot_of_parse ( slot ) ;
2014-08-25 22:19:04 +04:00
ret = mmc_of_parse ( mmc ) ;
if ( ret )
goto err_host_allocated ;
2011-01-02 09:11:59 +03:00
if ( host - > pdata - > blk_settings ) {
mmc - > max_segs = host - > pdata - > blk_settings - > max_segs ;
mmc - > max_blk_size = host - > pdata - > blk_settings - > max_blk_size ;
mmc - > max_blk_count = host - > pdata - > blk_settings - > max_blk_count ;
mmc - > max_req_size = host - > pdata - > blk_settings - > max_req_size ;
mmc - > max_seg_size = host - > pdata - > blk_settings - > max_seg_size ;
} else {
/* Useful defaults if platform data is unset. */
2015-08-03 18:04:10 +03:00
if ( host - > use_dma ) {
mmc - > max_segs = host - > ring_size ;
mmc - > max_blk_size = 65536 ;
mmc - > max_seg_size = 0x1000 ;
mmc - > max_req_size = mmc - > max_seg_size * host - > ring_size ;
mmc - > max_blk_count = mmc - > max_req_size / 512 ;
} else {
mmc - > max_segs = 64 ;
mmc - > max_blk_size = 65536 ; /* BLKSIZ is 16 bits */
mmc - > max_blk_count = 512 ;
mmc - > max_req_size = mmc - > max_blk_size *
mmc - > max_blk_count ;
mmc - > max_seg_size = mmc - > max_req_size ;
}
2012-02-05 02:00:27 +04:00
}
2011-01-02 09:11:59 +03:00
2014-03-03 06:36:48 +04:00
if ( dw_mci_get_cd ( mmc ) )
set_bit ( DW_MMC_CARD_PRESENT , & slot - > flags ) ;
else
clear_bit ( DW_MMC_CARD_PRESENT , & slot - > flags ) ;
2013-02-15 18:45:45 +04:00
ret = mmc_add_host ( mmc ) ;
if ( ret )
2014-08-25 22:19:04 +04:00
goto err_host_allocated ;
2011-01-02 09:11:59 +03:00
# if defined(CONFIG_DEBUG_FS)
dw_mci_init_debugfs ( slot ) ;
# endif
return 0 ;
2012-09-17 22:16:42 +04:00
2014-08-25 22:19:04 +04:00
err_host_allocated :
2012-09-17 22:16:42 +04:00
mmc_free_host ( mmc ) ;
2014-08-22 17:47:50 +04:00
return ret ;
2011-01-02 09:11:59 +03:00
}
static void dw_mci_cleanup_slot ( struct dw_mci_slot * slot , unsigned int id )
{
/* Debugfs stuff is cleaned up by mmc core */
mmc_remove_host ( slot - > mmc ) ;
slot - > host - > slot [ id ] = NULL ;
mmc_free_host ( slot - > mmc ) ;
}
static void dw_mci_init_dma ( struct dw_mci * host )
{
2014-10-20 11:12:33 +04:00
int addr_config ;
/* Check ADDR_CONFIG bit in HCON to find IDMAC address bus width */
addr_config = ( mci_readl ( host , HCON ) > > 27 ) & 0x01 ;
if ( addr_config = = 1 ) {
/* host supports IDMAC in 64-bit address mode */
host - > dma_64bit_address = 1 ;
dev_info ( host - > dev , " IDMAC supports 64-bit address mode. \n " ) ;
if ( ! dma_set_mask ( host - > dev , DMA_BIT_MASK ( 64 ) ) )
dma_set_coherent_mask ( host - > dev , DMA_BIT_MASK ( 64 ) ) ;
} else {
/* host supports IDMAC in 32-bit address mode */
host - > dma_64bit_address = 0 ;
dev_info ( host - > dev , " IDMAC supports 32-bit address mode. \n " ) ;
}
2011-01-02 09:11:59 +03:00
/* Alloc memory for sg translation */
2012-11-28 14:26:03 +04:00
host - > sg_cpu = dmam_alloc_coherent ( host - > dev , PAGE_SIZE ,
2011-01-02 09:11:59 +03:00
& host - > sg_dma , GFP_KERNEL ) ;
if ( ! host - > sg_cpu ) {
2012-09-17 22:16:35 +04:00
dev_err ( host - > dev , " %s: could not alloc DMA memory \n " ,
2011-01-02 09:11:59 +03:00
__func__ ) ;
goto no_dma ;
}
/* Determine which DMA interface to use */
# ifdef CONFIG_MMC_DW_IDMAC
host - > dma_ops = & dw_mci_idmac_ops ;
2012-09-28 14:13:11 +04:00
dev_info ( host - > dev , " Using internal DMA controller. \n " ) ;
2011-01-02 09:11:59 +03:00
# endif
if ( ! host - > dma_ops )
goto no_dma ;
2012-04-18 10:42:31 +04:00
if ( host - > dma_ops - > init & & host - > dma_ops - > start & &
host - > dma_ops - > stop & & host - > dma_ops - > cleanup ) {
2011-01-02 09:11:59 +03:00
if ( host - > dma_ops - > init ( host ) ) {
2015-08-03 10:07:21 +03:00
dev_err ( host - > dev , " %s: Unable to initialize DMA Controller. \n " ,
__func__ ) ;
2011-01-02 09:11:59 +03:00
goto no_dma ;
}
} else {
2012-09-17 22:16:35 +04:00
dev_err ( host - > dev , " DMA initialization not found. \n " ) ;
2011-01-02 09:11:59 +03:00
goto no_dma ;
}
host - > use_dma = 1 ;
return ;
no_dma :
2012-09-17 22:16:35 +04:00
dev_info ( host - > dev , " Using PIO mode. \n " ) ;
2011-01-02 09:11:59 +03:00
host - > use_dma = 0 ;
}
2013-08-30 19:14:23 +04:00
static bool dw_mci_ctrl_reset ( struct dw_mci * host , u32 reset )
2011-01-02 09:11:59 +03:00
{
unsigned long timeout = jiffies + msecs_to_jiffies ( 500 ) ;
2013-08-30 19:14:23 +04:00
u32 ctrl ;
2011-01-02 09:11:59 +03:00
2013-08-30 19:14:23 +04:00
ctrl = mci_readl ( host , CTRL ) ;
ctrl | = reset ;
mci_writel ( host , CTRL , ctrl ) ;
2011-01-02 09:11:59 +03:00
/* wait till resets clear */
do {
ctrl = mci_readl ( host , CTRL ) ;
2013-08-30 19:14:23 +04:00
if ( ! ( ctrl & reset ) )
2011-01-02 09:11:59 +03:00
return true ;
} while ( time_before ( jiffies , timeout ) ) ;
2013-08-30 19:14:23 +04:00
dev_err ( host - > dev ,
" Timeout resetting block (ctrl reset %#x) \n " ,
ctrl & reset ) ;
2011-01-02 09:11:59 +03:00
return false ;
}
2014-08-05 05:19:50 +04:00
static bool dw_mci_reset ( struct dw_mci * host )
2013-08-30 19:14:23 +04:00
{
2014-08-05 05:19:50 +04:00
u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET ;
bool ret = false ;
2013-08-30 19:14:23 +04:00
/*
* Reseting generates a block interrupt , hence setting
* the scatter - gather pointer to NULL .
*/
if ( host - > sg ) {
sg_miter_stop ( & host - > sg_miter ) ;
host - > sg = NULL ;
}
2014-08-05 05:19:50 +04:00
if ( host - > use_dma )
flags | = SDMMC_CTRL_DMA_RESET ;
2013-08-30 19:14:23 +04:00
2014-08-05 05:19:50 +04:00
if ( dw_mci_ctrl_reset ( host , flags ) ) {
/*
* In all cases we clear the RAWINTS register to clear any
* interrupts .
*/
mci_writel ( host , RINTSTS , 0xFFFFFFFF ) ;
/* if using dma we wait for dma_req to clear */
if ( host - > use_dma ) {
unsigned long timeout = jiffies + msecs_to_jiffies ( 500 ) ;
u32 status ;
2015-08-03 10:07:21 +03:00
2014-08-05 05:19:50 +04:00
do {
status = mci_readl ( host , STATUS ) ;
if ( ! ( status & SDMMC_STATUS_DMA_REQ ) )
break ;
cpu_relax ( ) ;
} while ( time_before ( jiffies , timeout ) ) ;
if ( status & SDMMC_STATUS_DMA_REQ ) {
dev_err ( host - > dev ,
2015-08-03 10:07:21 +03:00
" %s: Timeout waiting for dma_req to clear during reset \n " ,
__func__ ) ;
2014-08-05 05:19:50 +04:00
goto ciu_out ;
}
/* when using DMA next we reset the fifo again */
if ( ! dw_mci_ctrl_reset ( host , SDMMC_CTRL_FIFO_RESET ) )
goto ciu_out ;
}
} else {
/* if the controller reset bit did clear, then set clock regs */
if ( ! ( mci_readl ( host , CTRL ) & SDMMC_CTRL_RESET ) ) {
2015-08-03 10:07:21 +03:00
dev_err ( host - > dev ,
" %s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update \n " ,
2014-08-05 05:19:50 +04:00
__func__ ) ;
goto ciu_out ;
}
}
# if IS_ENABLED(CONFIG_MMC_DW_IDMAC)
/* It is also recommended that we reset and reprogram idmac */
dw_mci_idmac_reset ( host ) ;
# endif
ret = true ;
ciu_out :
/* After a CTRL reset we need to have CIU set clock registers */
mci_send_cmd ( host - > cur_slot , SDMMC_CMD_UPD_CLK , 0 ) ;
return ret ;
2013-08-30 19:14:23 +04:00
}
2015-03-10 02:18:21 +03:00
static void dw_mci_cmd11_timer ( unsigned long arg )
{
struct dw_mci * host = ( struct dw_mci * ) arg ;
2015-04-03 21:13:06 +03:00
if ( host - > state ! = STATE_SENDING_CMD11 ) {
dev_warn ( host - > dev , " Unexpected CMD11 timeout \n " ) ;
return ;
}
2015-03-10 02:18:21 +03:00
host - > cmd_status = SDMMC_INT_RTO ;
set_bit ( EVENT_CMD_COMPLETE , & host - > pending_events ) ;
tasklet_schedule ( & host - > tasklet ) ;
}
2012-09-17 22:16:40 +04:00
# ifdef CONFIG_OF
static struct dw_mci_of_quirks {
char * quirk ;
int id ;
} of_quirks [ ] = {
{
. quirk = " broken-cd " ,
. id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION ,
} ,
} ;
static struct dw_mci_board * dw_mci_parse_dt ( struct dw_mci * host )
{
struct dw_mci_board * pdata ;
struct device * dev = host - > dev ;
struct device_node * np = dev - > of_node ;
2012-11-08 18:26:11 +04:00
const struct dw_mci_drv_data * drv_data = host - > drv_data ;
2012-09-17 22:16:42 +04:00
int idx , ret ;
2013-06-07 21:28:30 +04:00
u32 clock_frequency ;
2012-09-17 22:16:40 +04:00
pdata = devm_kzalloc ( dev , sizeof ( * pdata ) , GFP_KERNEL ) ;
2014-12-23 15:07:33 +03:00
if ( ! pdata )
2012-09-17 22:16:40 +04:00
return ERR_PTR ( - ENOMEM ) ;
/* find out number of slots supported */
if ( of_property_read_u32 ( dev - > of_node , " num-slots " ,
& pdata - > num_slots ) ) {
2015-08-03 10:07:21 +03:00
dev_info ( dev ,
" num-slots property not found, assuming 1 slot is available \n " ) ;
2012-09-17 22:16:40 +04:00
pdata - > num_slots = 1 ;
}
/* get quirks */
for ( idx = 0 ; idx < ARRAY_SIZE ( of_quirks ) ; idx + + )
if ( of_get_property ( np , of_quirks [ idx ] . quirk , NULL ) )
pdata - > quirks | = of_quirks [ idx ] . id ;
if ( of_property_read_u32 ( np , " fifo-depth " , & pdata - > fifo_depth ) )
2015-08-03 10:07:21 +03:00
dev_info ( dev ,
" fifo-depth property not found, using value of FIFOTH register as default \n " ) ;
2012-09-17 22:16:40 +04:00
of_property_read_u32 ( np , " card-detect-delay " , & pdata - > detect_delay_ms ) ;
2013-06-07 21:28:30 +04:00
if ( ! of_property_read_u32 ( np , " clock-frequency " , & clock_frequency ) )
pdata - > bus_hz = clock_frequency ;
2012-10-16 12:43:08 +04:00
if ( drv_data & & drv_data - > parse_dt ) {
ret = drv_data - > parse_dt ( host ) ;
2012-09-17 22:16:42 +04:00
if ( ret )
return ERR_PTR ( ret ) ;
}
2013-08-30 19:13:22 +04:00
if ( of_find_property ( np , " supports-highspeed " , NULL ) )
pdata - > caps | = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED ;
2012-09-17 22:16:40 +04:00
return pdata ;
}
# else /* CONFIG_OF */
static struct dw_mci_board * dw_mci_parse_dt ( struct dw_mci * host )
{
return ERR_PTR ( - EINVAL ) ;
}
# endif /* CONFIG_OF */
2015-02-25 21:11:51 +03:00
static void dw_mci_enable_cd ( struct dw_mci * host )
{
struct dw_mci_board * brd = host - > pdata ;
unsigned long irqflags ;
u32 temp ;
int i ;
/* No need for CD if broken card detection */
if ( brd - > quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION )
return ;
/* No need for CD if all slots have a non-error GPIO */
for ( i = 0 ; i < host - > num_slots ; i + + ) {
struct dw_mci_slot * slot = host - > slot [ i ] ;
if ( IS_ERR_VALUE ( mmc_gpio_get_cd ( slot - > mmc ) ) )
break ;
}
if ( i = = host - > num_slots )
return ;
spin_lock_irqsave ( & host - > irq_lock , irqflags ) ;
temp = mci_readl ( host , INTMASK ) ;
temp | = SDMMC_INT_CD ;
mci_writel ( host , INTMASK , temp ) ;
spin_unlock_irqrestore ( & host - > irq_lock , irqflags ) ;
}
2012-01-13 14:34:57 +04:00
int dw_mci_probe ( struct dw_mci * host )
2011-01-02 09:11:59 +03:00
{
2012-11-08 18:26:11 +04:00
const struct dw_mci_drv_data * drv_data = host - > drv_data ;
2012-01-13 14:34:57 +04:00
int width , i , ret = 0 ;
2011-01-02 09:11:59 +03:00
u32 fifo_size ;
2012-09-17 22:16:37 +04:00
int init_slots = 0 ;
2011-01-02 09:11:59 +03:00
2012-09-17 22:16:40 +04:00
if ( ! host - > pdata ) {
host - > pdata = dw_mci_parse_dt ( host ) ;
if ( IS_ERR ( host - > pdata ) ) {
dev_err ( host - > dev , " platform data not available \n " ) ;
return - EINVAL ;
}
2011-01-02 09:11:59 +03:00
}
2015-08-06 10:23:24 +03:00
if ( host - > pdata - > num_slots < 1 ) {
2012-09-17 22:16:35 +04:00
dev_err ( host - > dev ,
2014-03-03 06:36:43 +04:00
" Platform data must supply num_slots. \n " ) ;
2012-01-13 14:34:57 +04:00
return - ENODEV ;
2011-01-02 09:11:59 +03:00
}
2012-11-28 14:26:03 +04:00
host - > biu_clk = devm_clk_get ( host - > dev , " biu " ) ;
2012-09-17 22:16:38 +04:00
if ( IS_ERR ( host - > biu_clk ) ) {
dev_dbg ( host - > dev , " biu clock not available \n " ) ;
} else {
ret = clk_prepare_enable ( host - > biu_clk ) ;
if ( ret ) {
dev_err ( host - > dev , " failed to enable biu clock \n " ) ;
return ret ;
}
}
2012-11-28 14:26:03 +04:00
host - > ciu_clk = devm_clk_get ( host - > dev , " ciu " ) ;
2012-09-17 22:16:38 +04:00
if ( IS_ERR ( host - > ciu_clk ) ) {
dev_dbg ( host - > dev , " ciu clock not available \n " ) ;
2013-06-07 21:28:30 +04:00
host - > bus_hz = host - > pdata - > bus_hz ;
2012-09-17 22:16:38 +04:00
} else {
ret = clk_prepare_enable ( host - > ciu_clk ) ;
if ( ret ) {
dev_err ( host - > dev , " failed to enable ciu clock \n " ) ;
goto err_clk_biu ;
}
2013-06-07 21:28:30 +04:00
if ( host - > pdata - > bus_hz ) {
ret = clk_set_rate ( host - > ciu_clk , host - > pdata - > bus_hz ) ;
if ( ret )
dev_warn ( host - > dev ,
2014-03-03 06:36:42 +04:00
" Unable to set bus rate to %uHz \n " ,
2013-06-07 21:28:30 +04:00
host - > pdata - > bus_hz ) ;
}
2012-09-17 22:16:38 +04:00
host - > bus_hz = clk_get_rate ( host - > ciu_clk ) ;
2013-06-07 21:28:30 +04:00
}
2012-09-17 22:16:38 +04:00
2014-03-03 06:36:42 +04:00
if ( ! host - > bus_hz ) {
dev_err ( host - > dev ,
" Platform data must supply bus speed \n " ) ;
ret = - ENODEV ;
goto err_clk_ciu ;
}
2013-08-30 19:12:19 +04:00
if ( drv_data & & drv_data - > init ) {
ret = drv_data - > init ( host ) ;
if ( ret ) {
dev_err ( host - > dev ,
" implementation specific init failed \n " ) ;
goto err_clk_ciu ;
}
}
2012-10-16 12:43:08 +04:00
if ( drv_data & & drv_data - > setup_clock ) {
ret = drv_data - > setup_clock ( host ) ;
2012-09-17 22:16:42 +04:00
if ( ret ) {
dev_err ( host - > dev ,
" implementation specific clock setup failed \n " ) ;
goto err_clk_ciu ;
}
}
2015-03-10 02:18:21 +03:00
setup_timer ( & host - > cmd11_timer ,
dw_mci_cmd11_timer , ( unsigned long ) host ) ;
2012-01-13 14:34:57 +04:00
host - > quirks = host - > pdata - > quirks ;
2011-01-02 09:11:59 +03:00
spin_lock_init ( & host - > lock ) ;
2014-12-03 02:42:47 +03:00
spin_lock_init ( & host - > irq_lock ) ;
2011-01-02 09:11:59 +03:00
INIT_LIST_HEAD ( & host - > queue ) ;
/*
* Get the host data width - this assumes that HCON has been set with
* the correct values .
*/
i = ( mci_readl ( host , HCON ) > > 7 ) & 0x7 ;
if ( ! i ) {
host - > push_data = dw_mci_push_data16 ;
host - > pull_data = dw_mci_pull_data16 ;
width = 16 ;
host - > data_shift = 1 ;
} else if ( i = = 2 ) {
host - > push_data = dw_mci_push_data64 ;
host - > pull_data = dw_mci_pull_data64 ;
width = 64 ;
host - > data_shift = 3 ;
} else {
/* Check for a reserved value, and warn if it is */
WARN ( ( i ! = 1 ) ,
" HCON reports a reserved host data width! \n "
" Defaulting to 32-bit access. \n " ) ;
host - > push_data = dw_mci_push_data32 ;
host - > pull_data = dw_mci_pull_data32 ;
width = 32 ;
host - > data_shift = 2 ;
}
/* Reset all blocks */
2014-08-05 05:19:50 +04:00
if ( ! dw_mci_ctrl_reset ( host , SDMMC_CTRL_ALL_RESET_FLAGS ) )
2012-05-22 08:01:03 +04:00
return - ENODEV ;
host - > dma_ops = host - > pdata - > dma_ops ;
dw_mci_init_dma ( host ) ;
2011-01-02 09:11:59 +03:00
/* Clear the interrupts for the host controller */
mci_writel ( host , RINTSTS , 0xFFFFFFFF ) ;
mci_writel ( host , INTMASK , 0 ) ; /* disable all mmc interrupt first */
/* Put in max timeout */
mci_writel ( host , TMOUT , 0xFFFFFFFF ) ;
/*
* FIFO threshold settings RxMark = fifo_size / 2 - 1 ,
* Tx Mark = fifo_size / 2 DMA Size = 8
*/
2011-06-24 16:57:18 +04:00
if ( ! host - > pdata - > fifo_depth ) {
/*
* Power - on value of RX_WMark is FIFO_DEPTH - 1 , but this may
* have been overwritten by the bootloader , just like we ' re
* about to do , so if you know the value for your hardware , you
* should put it in the platform data .
*/
fifo_size = mci_readl ( host , FIFOTH ) ;
2012-01-11 13:28:21 +04:00
fifo_size = 1 + ( ( fifo_size > > 16 ) & 0xfff ) ;
2011-06-24 16:57:18 +04:00
} else {
fifo_size = host - > pdata - > fifo_depth ;
}
host - > fifo_depth = fifo_size ;
2013-08-30 19:13:42 +04:00
host - > fifoth_val =
SDMMC_SET_FIFOTH ( 0x2 , fifo_size / 2 - 1 , fifo_size / 2 ) ;
2011-03-17 14:32:33 +03:00
mci_writel ( host , FIFOTH , host - > fifoth_val ) ;
2011-01-02 09:11:59 +03:00
/* disable clock to CIU */
mci_writel ( host , CLKENA , 0 ) ;
mci_writel ( host , CLKSRC , 0 ) ;
2013-03-12 14:43:54 +04:00
/*
* In 2.40 a spec , Data offset is changed .
* Need to check the version - id and set data - offset for DATA register .
*/
host - > verid = SDMMC_GET_VERID ( mci_readl ( host , VERID ) ) ;
dev_info ( host - > dev , " Version ID is %04x \n " , host - > verid ) ;
if ( host - > verid < DW_MMC_240A )
2015-03-25 14:27:52 +03:00
host - > fifo_reg = host - > regs + DATA_OFFSET ;
2013-03-12 14:43:54 +04:00
else
2015-03-25 14:27:52 +03:00
host - > fifo_reg = host - > regs + DATA_240A_OFFSET ;
2013-03-12 14:43:54 +04:00
2011-01-02 09:11:59 +03:00
tasklet_init ( & host - > tasklet , dw_mci_tasklet_func , ( unsigned long ) host ) ;
2012-11-28 14:26:03 +04:00
ret = devm_request_irq ( host - > dev , host - > irq , dw_mci_interrupt ,
host - > irq_flags , " dw-mci " , host ) ;
2011-01-02 09:11:59 +03:00
if ( ret )
2014-10-14 20:33:09 +04:00
goto err_dmaunmap ;
2011-01-02 09:11:59 +03:00
if ( host - > pdata - > num_slots )
host - > num_slots = host - > pdata - > num_slots ;
else
host - > num_slots = ( ( mci_readl ( host , HCON ) > > 1 ) & 0x1F ) + 1 ;
2012-10-08 12:59:51 +04:00
/*
2015-02-25 21:11:51 +03:00
* Enable interrupts for command done , data over , data empty ,
2012-10-08 12:59:51 +04:00
* receive ready and error such as transmit , receive timeout , crc error
*/
mci_writel ( host , RINTSTS , 0xFFFFFFFF ) ;
mci_writel ( host , INTMASK , SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2015-02-25 21:11:51 +03:00
DW_MCI_ERROR_FLAGS ) ;
2015-08-03 10:07:21 +03:00
/* Enable mci interrupt */
mci_writel ( host , CTRL , SDMMC_CTRL_INT_ENABLE ) ;
2012-10-08 12:59:51 +04:00
2015-08-03 10:07:21 +03:00
dev_info ( host - > dev ,
" DW MMC controller at irq %d,%d bit host data width,%u deep fifo \n " ,
2012-10-08 12:59:51 +04:00
host - > irq , width , fifo_size ) ;
2011-01-02 09:11:59 +03:00
/* We need at least one slot to succeed */
for ( i = 0 ; i < host - > num_slots ; i + + ) {
ret = dw_mci_init_slot ( host , i ) ;
2012-09-17 22:16:37 +04:00
if ( ret )
dev_dbg ( host - > dev , " slot %d init failed \n " , i ) ;
else
init_slots + + ;
}
if ( init_slots ) {
dev_info ( host - > dev , " %d slots initialized \n " , init_slots ) ;
} else {
2015-08-03 10:07:21 +03:00
dev_dbg ( host - > dev ,
" attempted to initialize %d slots, but failed on all \n " ,
host - > num_slots ) ;
2014-10-14 20:33:09 +04:00
goto err_dmaunmap ;
2011-01-02 09:11:59 +03:00
}
2015-03-12 01:15:14 +03:00
/* Now that slots are all setup, we can enable card detect */
dw_mci_enable_cd ( host ) ;
2011-01-02 09:11:59 +03:00
if ( host - > quirks & DW_MCI_QUIRK_IDMAC_DTO )
2012-09-17 22:16:35 +04:00
dev_info ( host - > dev , " Internal DMAC interrupt fix enabled. \n " ) ;
2011-01-02 09:11:59 +03:00
return 0 ;
err_dmaunmap :
if ( host - > use_dma & & host - > dma_ops - > exit )
host - > dma_ops - > exit ( host ) ;
2012-09-17 22:16:38 +04:00
err_clk_ciu :
2012-11-28 14:26:03 +04:00
if ( ! IS_ERR ( host - > ciu_clk ) )
2012-09-17 22:16:38 +04:00
clk_disable_unprepare ( host - > ciu_clk ) ;
2012-11-28 14:26:03 +04:00
2012-09-17 22:16:38 +04:00
err_clk_biu :
2012-11-28 14:26:03 +04:00
if ( ! IS_ERR ( host - > biu_clk ) )
2012-09-17 22:16:38 +04:00
clk_disable_unprepare ( host - > biu_clk ) ;
2012-11-28 14:26:03 +04:00
2011-01-02 09:11:59 +03:00
return ret ;
}
2012-01-13 14:34:57 +04:00
EXPORT_SYMBOL ( dw_mci_probe ) ;
2011-01-02 09:11:59 +03:00
2012-01-13 14:34:57 +04:00
void dw_mci_remove ( struct dw_mci * host )
2011-01-02 09:11:59 +03:00
{
int i ;
for ( i = 0 ; i < host - > num_slots ; i + + ) {
2012-09-17 22:16:35 +04:00
dev_dbg ( host - > dev , " remove slot %d \n " , i ) ;
2011-01-02 09:11:59 +03:00
if ( host - > slot [ i ] )
dw_mci_cleanup_slot ( host - > slot [ i ] , i ) ;
}
2015-05-28 15:21:06 +03:00
mci_writel ( host , RINTSTS , 0xFFFFFFFF ) ;
mci_writel ( host , INTMASK , 0 ) ; /* disable all mmc interrupt first */
2011-01-02 09:11:59 +03:00
/* disable clock to CIU */
mci_writel ( host , CLKENA , 0 ) ;
mci_writel ( host , CLKSRC , 0 ) ;
if ( host - > use_dma & & host - > dma_ops - > exit )
host - > dma_ops - > exit ( host ) ;
2012-09-17 22:16:38 +04:00
if ( ! IS_ERR ( host - > ciu_clk ) )
clk_disable_unprepare ( host - > ciu_clk ) ;
2012-11-28 14:26:03 +04:00
2012-09-17 22:16:38 +04:00
if ( ! IS_ERR ( host - > biu_clk ) )
clk_disable_unprepare ( host - > biu_clk ) ;
2011-01-02 09:11:59 +03:00
}
2012-01-13 14:34:57 +04:00
EXPORT_SYMBOL ( dw_mci_remove ) ;
2011-01-02 09:11:59 +03:00
2011-12-08 14:23:03 +04:00
# ifdef CONFIG_PM_SLEEP
2011-01-02 09:11:59 +03:00
/*
* TODO : we should probably disable the clock to the card in the suspend path .
*/
2012-01-13 14:34:57 +04:00
int dw_mci_suspend ( struct dw_mci * host )
2011-01-02 09:11:59 +03:00
{
return 0 ;
}
2012-01-13 14:34:57 +04:00
EXPORT_SYMBOL ( dw_mci_suspend ) ;
2011-01-02 09:11:59 +03:00
2012-01-13 14:34:57 +04:00
int dw_mci_resume ( struct dw_mci * host )
2011-01-02 09:11:59 +03:00
{
int i , ret ;
2014-08-05 05:19:50 +04:00
if ( ! dw_mci_ctrl_reset ( host , SDMMC_CTRL_ALL_RESET_FLAGS ) ) {
2011-03-17 14:32:33 +03:00
ret = - ENODEV ;
return ret ;
}
2012-06-14 21:31:55 +04:00
if ( host - > use_dma & & host - > dma_ops - > init )
2012-05-22 08:01:03 +04:00
host - > dma_ops - > init ( host ) ;
2013-08-30 19:13:42 +04:00
/*
* Restore the initial value at FIFOTH register
* And Invalidate the prev_blksz with zero
*/
2011-03-17 14:32:33 +03:00
mci_writel ( host , FIFOTH , host - > fifoth_val ) ;
2013-08-30 19:13:42 +04:00
host - > prev_blksz = 0 ;
2011-03-17 14:32:33 +03:00
2013-08-30 19:11:49 +04:00
/* Put in max timeout */
mci_writel ( host , TMOUT , 0xFFFFFFFF ) ;
2011-03-17 14:32:33 +03:00
mci_writel ( host , RINTSTS , 0xFFFFFFFF ) ;
mci_writel ( host , INTMASK , SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2015-02-25 21:11:51 +03:00
DW_MCI_ERROR_FLAGS ) ;
2011-03-17 14:32:33 +03:00
mci_writel ( host , CTRL , SDMMC_CTRL_INT_ENABLE ) ;
2011-01-02 09:11:59 +03:00
for ( i = 0 ; i < host - > num_slots ; i + + ) {
struct dw_mci_slot * slot = host - > slot [ i ] ;
2015-08-03 10:07:21 +03:00
2011-01-02 09:11:59 +03:00
if ( ! slot )
continue ;
2012-11-19 08:56:21 +04:00
if ( slot - > mmc - > pm_flags & MMC_PM_KEEP_POWER ) {
dw_mci_set_ios ( slot - > mmc , & slot - > mmc - > ios ) ;
dw_mci_setup_bus ( slot , true ) ;
}
2011-01-02 09:11:59 +03:00
}
2015-02-25 21:11:51 +03:00
/* Now that slots are all setup, we can enable card detect */
dw_mci_enable_cd ( host ) ;
2011-01-02 09:11:59 +03:00
return 0 ;
}
2012-01-13 14:34:57 +04:00
EXPORT_SYMBOL ( dw_mci_resume ) ;
2011-12-08 14:23:03 +04:00
# endif /* CONFIG_PM_SLEEP */
2011-01-02 09:11:59 +03:00
static int __init dw_mci_init ( void )
{
2013-04-04 09:55:11 +04:00
pr_info ( " Synopsys Designware Multimedia Card Interface Driver \n " ) ;
2012-01-13 14:34:57 +04:00
return 0 ;
2011-01-02 09:11:59 +03:00
}
static void __exit dw_mci_exit ( void )
{
}
module_init ( dw_mci_init ) ;
module_exit ( dw_mci_exit ) ;
MODULE_DESCRIPTION ( " DW Multimedia Card Interface driver " ) ;
MODULE_AUTHOR ( " NXP Semiconductor VietNam " ) ;
MODULE_AUTHOR ( " Imagination Technologies Ltd " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;