2008-07-15 19:02:21 +04:00
/*
* linux / drivers / mmc / tmio_mmc . c
*
* Copyright ( C ) 2004 Ian Molton
* Copyright ( C ) 2007 Ian Molton
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* Driver for the MMC / SD / SDIO cell found in :
*
2009-06-04 22:12:37 +04:00
* TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
2008-07-15 19:02:21 +04:00
*
* This driver draws mainly on scattered spec sheets , Reverse engineering
* of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver ( 4 bit
* support ) . ( Further 4 bit support from a later datasheet ) .
*
* TODO :
* Investigate using a workqueue for PIO transfers
* Eliminate FIXMEs
* SDIO support
* Better Power management
* Handle MMC errors better
* double buffer support
*
*/
2010-11-23 19:24:11 +03:00
2008-07-15 19:02:21 +04:00
# include <linux/delay.h>
2010-11-23 19:24:11 +03:00
# include <linux/device.h>
2010-05-19 22:34:22 +04:00
# include <linux/dmaengine.h>
2010-11-23 19:24:11 +03:00
# include <linux/highmem.h>
# include <linux/interrupt.h>
# include <linux/io.h>
# include <linux/irq.h>
2008-07-15 19:02:21 +04:00
# include <linux/mfd/core.h>
# include <linux/mfd/tmio.h>
2010-11-23 19:24:11 +03:00
# include <linux/mmc/host.h>
# include <linux/module.h>
# include <linux/pagemap.h>
# include <linux/scatterlist.h>
2011-01-06 01:36:14 +03:00
# include <linux/workqueue.h>
# include <linux/spinlock.h>
2010-11-23 19:24:11 +03:00
# define CTL_SD_CMD 0x00
# define CTL_ARG_REG 0x04
# define CTL_STOP_INTERNAL_ACTION 0x08
# define CTL_XFER_BLK_COUNT 0xa
# define CTL_RESPONSE 0x0c
# define CTL_STATUS 0x1c
# define CTL_IRQ_MASK 0x20
# define CTL_SD_CARD_CLK_CTL 0x24
# define CTL_SD_XFER_LEN 0x26
# define CTL_SD_MEM_CARD_OPT 0x28
# define CTL_SD_ERROR_DETAIL_STATUS 0x2c
# define CTL_SD_DATA_PORT 0x30
# define CTL_TRANSACTION_CTL 0x34
2010-12-29 01:22:31 +03:00
# define CTL_SDIO_STATUS 0x36
# define CTL_SDIO_IRQ_MASK 0x38
2010-11-23 19:24:11 +03:00
# define CTL_RESET_SD 0xe0
# define CTL_SDIO_REGS 0x100
# define CTL_CLK_AND_WAIT_CTL 0x138
# define CTL_RESET_SDIO 0x1e0
/* Definitions for values the CTRL_STATUS register can take. */
# define TMIO_STAT_CMDRESPEND 0x00000001
# define TMIO_STAT_DATAEND 0x00000004
# define TMIO_STAT_CARD_REMOVE 0x00000008
# define TMIO_STAT_CARD_INSERT 0x00000010
# define TMIO_STAT_SIGSTATE 0x00000020
# define TMIO_STAT_WRPROTECT 0x00000080
# define TMIO_STAT_CARD_REMOVE_A 0x00000100
# define TMIO_STAT_CARD_INSERT_A 0x00000200
# define TMIO_STAT_SIGSTATE_A 0x00000400
# define TMIO_STAT_CMD_IDX_ERR 0x00010000
# define TMIO_STAT_CRCFAIL 0x00020000
# define TMIO_STAT_STOPBIT_ERR 0x00040000
# define TMIO_STAT_DATATIMEOUT 0x00080000
# define TMIO_STAT_RXOVERFLOW 0x00100000
# define TMIO_STAT_TXUNDERRUN 0x00200000
# define TMIO_STAT_CMDTIMEOUT 0x00400000
# define TMIO_STAT_RXRDY 0x01000000
# define TMIO_STAT_TXRQ 0x02000000
# define TMIO_STAT_ILL_FUNC 0x20000000
# define TMIO_STAT_CMD_BUSY 0x40000000
# define TMIO_STAT_ILL_ACCESS 0x80000000
2010-12-29 01:22:31 +03:00
/* Definitions for values the CTRL_SDIO_STATUS register can take. */
# define TMIO_SDIO_STAT_IOIRQ 0x0001
# define TMIO_SDIO_STAT_EXPUB52 0x4000
# define TMIO_SDIO_STAT_EXWT 0x8000
# define TMIO_SDIO_MASK_ALL 0xc007
2010-11-23 19:24:11 +03:00
/* Define some IRQ masks */
/* This is the mask used at reset by the chip */
# define TMIO_MASK_ALL 0x837f031d
# define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
# define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
# define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT )
# define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
# define enable_mmc_irqs(host, i) \
do { \
u32 mask ; \
mask = sd_ctrl_read32 ( ( host ) , CTL_IRQ_MASK ) ; \
mask & = ~ ( ( i ) & TMIO_MASK_IRQ ) ; \
sd_ctrl_write32 ( ( host ) , CTL_IRQ_MASK , mask ) ; \
} while ( 0 )
# define disable_mmc_irqs(host, i) \
do { \
u32 mask ; \
mask = sd_ctrl_read32 ( ( host ) , CTL_IRQ_MASK ) ; \
mask | = ( ( i ) & TMIO_MASK_IRQ ) ; \
sd_ctrl_write32 ( ( host ) , CTL_IRQ_MASK , mask ) ; \
} while ( 0 )
# define ack_mmc_irqs(host, i) \
do { \
sd_ctrl_write32 ( ( host ) , CTL_STATUS , ~ ( i ) ) ; \
} while ( 0 )
2010-12-22 14:02:15 +03:00
/* This is arbitrary, just noone needed any higher alignment yet */
# define MAX_ALIGN 4
2010-11-23 19:24:11 +03:00
struct tmio_mmc_host {
void __iomem * ctl ;
unsigned long bus_shift ;
struct mmc_command * cmd ;
struct mmc_request * mrq ;
struct mmc_data * data ;
struct mmc_host * mmc ;
int irq ;
2010-12-29 01:22:31 +03:00
unsigned int sdio_irq_enabled ;
2010-11-23 19:24:11 +03:00
/* Callbacks for clock / power control */
void ( * set_pwr ) ( struct platform_device * host , int state ) ;
void ( * set_clk_div ) ( struct platform_device * host , int state ) ;
/* pio related stuff */
struct scatterlist * sg_ptr ;
2010-12-22 14:02:15 +03:00
struct scatterlist * sg_orig ;
2010-11-23 19:24:11 +03:00
unsigned int sg_len ;
unsigned int sg_off ;
struct platform_device * pdev ;
/* DMA support */
struct dma_chan * chan_rx ;
struct dma_chan * chan_tx ;
struct tasklet_struct dma_complete ;
struct tasklet_struct dma_issue ;
# ifdef CONFIG_TMIO_MMC_DMA
2010-12-22 14:02:15 +03:00
u8 bounce_buf [ PAGE_CACHE_SIZE ] __attribute__ ( ( aligned ( MAX_ALIGN ) ) ) ;
struct scatterlist bounce_sg ;
2010-11-23 19:24:11 +03:00
# endif
2011-01-06 01:36:14 +03:00
/* Track lost interrupts */
struct delayed_work delayed_reset_work ;
spinlock_t lock ;
unsigned long last_req_ts ;
2010-11-23 19:24:11 +03:00
} ;
2010-12-22 14:02:15 +03:00
static void tmio_check_bounce_buffer ( struct tmio_mmc_host * host ) ;
2010-11-23 19:24:11 +03:00
static u16 sd_ctrl_read16 ( struct tmio_mmc_host * host , int addr )
{
return readw ( host - > ctl + ( addr < < host - > bus_shift ) ) ;
}
static void sd_ctrl_read16_rep ( struct tmio_mmc_host * host , int addr ,
u16 * buf , int count )
{
readsw ( host - > ctl + ( addr < < host - > bus_shift ) , buf , count ) ;
}
static u32 sd_ctrl_read32 ( struct tmio_mmc_host * host , int addr )
{
return readw ( host - > ctl + ( addr < < host - > bus_shift ) ) |
readw ( host - > ctl + ( ( addr + 2 ) < < host - > bus_shift ) ) < < 16 ;
}
static void sd_ctrl_write16 ( struct tmio_mmc_host * host , int addr , u16 val )
{
writew ( val , host - > ctl + ( addr < < host - > bus_shift ) ) ;
}
static void sd_ctrl_write16_rep ( struct tmio_mmc_host * host , int addr ,
u16 * buf , int count )
{
writesw ( host - > ctl + ( addr < < host - > bus_shift ) , buf , count ) ;
}
static void sd_ctrl_write32 ( struct tmio_mmc_host * host , int addr , u32 val )
{
writew ( val , host - > ctl + ( addr < < host - > bus_shift ) ) ;
writew ( val > > 16 , host - > ctl + ( ( addr + 2 ) < < host - > bus_shift ) ) ;
}
static void tmio_mmc_init_sg ( struct tmio_mmc_host * host , struct mmc_data * data )
{
host - > sg_len = data - > sg_len ;
host - > sg_ptr = data - > sg ;
2010-12-22 14:02:15 +03:00
host - > sg_orig = data - > sg ;
2010-11-23 19:24:11 +03:00
host - > sg_off = 0 ;
}
static int tmio_mmc_next_sg ( struct tmio_mmc_host * host )
{
host - > sg_ptr = sg_next ( host - > sg_ptr ) ;
host - > sg_off = 0 ;
return - - host - > sg_len ;
}
static char * tmio_mmc_kmap_atomic ( struct scatterlist * sg , unsigned long * flags )
{
local_irq_save ( * flags ) ;
return kmap_atomic ( sg_page ( sg ) , KM_BIO_SRC_IRQ ) + sg - > offset ;
}
2011-03-11 10:30:14 +03:00
static void tmio_mmc_kunmap_atomic ( struct scatterlist * sg , unsigned long * flags , void * virt )
2010-11-23 19:24:11 +03:00
{
2011-03-11 10:30:14 +03:00
kunmap_atomic ( virt - sg - > offset , KM_BIO_SRC_IRQ ) ;
2010-11-23 19:24:11 +03:00
local_irq_restore ( * flags ) ;
}
# ifdef CONFIG_MMC_DEBUG
2011-02-09 01:25:22 +03:00
# define STATUS_TO_TEXT(a, status, i) \
2010-11-23 19:24:11 +03:00
do { \
2011-02-09 01:25:22 +03:00
if ( status & TMIO_STAT_ # # a ) { \
if ( i + + ) \
printk ( " | " ) ; \
2010-11-23 19:24:11 +03:00
printk ( # a ) ; \
2011-02-09 01:25:22 +03:00
} \
2010-11-23 19:24:11 +03:00
} while ( 0 )
void pr_debug_status ( u32 status )
{
2011-02-09 01:25:22 +03:00
int i = 0 ;
2010-11-23 19:24:11 +03:00
printk ( KERN_DEBUG " status: %08x = " , status ) ;
2011-02-09 01:25:22 +03:00
STATUS_TO_TEXT ( CARD_REMOVE , status , i ) ;
STATUS_TO_TEXT ( CARD_INSERT , status , i ) ;
STATUS_TO_TEXT ( SIGSTATE , status , i ) ;
STATUS_TO_TEXT ( WRPROTECT , status , i ) ;
STATUS_TO_TEXT ( CARD_REMOVE_A , status , i ) ;
STATUS_TO_TEXT ( CARD_INSERT_A , status , i ) ;
STATUS_TO_TEXT ( SIGSTATE_A , status , i ) ;
STATUS_TO_TEXT ( CMD_IDX_ERR , status , i ) ;
STATUS_TO_TEXT ( STOPBIT_ERR , status , i ) ;
STATUS_TO_TEXT ( ILL_FUNC , status , i ) ;
STATUS_TO_TEXT ( CMD_BUSY , status , i ) ;
STATUS_TO_TEXT ( CMDRESPEND , status , i ) ;
STATUS_TO_TEXT ( DATAEND , status , i ) ;
STATUS_TO_TEXT ( CRCFAIL , status , i ) ;
STATUS_TO_TEXT ( DATATIMEOUT , status , i ) ;
STATUS_TO_TEXT ( CMDTIMEOUT , status , i ) ;
STATUS_TO_TEXT ( RXOVERFLOW , status , i ) ;
STATUS_TO_TEXT ( TXUNDERRUN , status , i ) ;
STATUS_TO_TEXT ( RXRDY , status , i ) ;
STATUS_TO_TEXT ( TXRQ , status , i ) ;
STATUS_TO_TEXT ( ILL_ACCESS , status , i ) ;
2010-11-23 19:24:11 +03:00
printk ( " \n " ) ;
}
2008-07-15 19:02:21 +04:00
2010-11-23 19:24:11 +03:00
# else
# define pr_debug_status(s) do { } while (0)
# endif
2008-07-15 19:02:21 +04:00
2010-12-29 01:22:31 +03:00
static void tmio_mmc_enable_sdio_irq ( struct mmc_host * mmc , int enable )
{
struct tmio_mmc_host * host = mmc_priv ( mmc ) ;
if ( enable ) {
host - > sdio_irq_enabled = 1 ;
sd_ctrl_write16 ( host , CTL_TRANSACTION_CTL , 0x0001 ) ;
sd_ctrl_write16 ( host , CTL_SDIO_IRQ_MASK ,
( TMIO_SDIO_MASK_ALL & ~ TMIO_SDIO_STAT_IOIRQ ) ) ;
} else {
sd_ctrl_write16 ( host , CTL_SDIO_IRQ_MASK , TMIO_SDIO_MASK_ALL ) ;
sd_ctrl_write16 ( host , CTL_TRANSACTION_CTL , 0x0000 ) ;
host - > sdio_irq_enabled = 0 ;
}
}
2008-07-15 19:02:21 +04:00
static void tmio_mmc_set_clock ( struct tmio_mmc_host * host , int new_clock )
{
2009-06-13 00:53:05 +04:00
u32 clk = 0 , clock ;
2008-07-15 19:02:21 +04:00
if ( new_clock ) {
2009-06-13 00:53:05 +04:00
for ( clock = host - > mmc - > f_min , clk = 0x80000080 ;
new_clock > = ( clock < < 1 ) ; clk > > = 1 )
2008-07-15 19:02:21 +04:00
clock < < = 1 ;
clk | = 0x100 ;
}
2010-01-06 15:51:48 +03:00
if ( host - > set_clk_div )
host - > set_clk_div ( host - > pdev , ( clk > > 22 ) & 1 ) ;
2009-06-13 00:53:05 +04:00
sd_ctrl_write16 ( host , CTL_SD_CARD_CLK_CTL , clk & 0x1ff ) ;
2008-07-15 19:02:21 +04:00
}
static void tmio_mmc_clk_stop ( struct tmio_mmc_host * host )
{
2011-02-18 06:07:29 +03:00
struct tmio_mmc_data * pdata = mfd_get_data ( host - > pdev ) ;
2010-12-29 01:22:31 +03:00
/*
* Testing on sh - mobile showed that SDIO IRQs are unmasked when
* CTL_CLK_AND_WAIT_CTL gets written , so we have to disable the
* device IRQ here and restore the SDIO IRQ mask before
* re - enabling the device IRQ .
*/
if ( pdata - > flags & TMIO_MMC_SDIO_IRQ )
disable_irq ( host - > irq ) ;
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_CLK_AND_WAIT_CTL , 0x0000 ) ;
2008-07-15 19:02:21 +04:00
msleep ( 10 ) ;
2010-12-29 01:22:31 +03:00
if ( pdata - > flags & TMIO_MMC_SDIO_IRQ ) {
tmio_mmc_enable_sdio_irq ( host - > mmc , host - > sdio_irq_enabled ) ;
enable_irq ( host - > irq ) ;
}
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_SD_CARD_CLK_CTL , ~ 0x0100 &
sd_ctrl_read16 ( host , CTL_SD_CARD_CLK_CTL ) ) ;
2008-07-15 19:02:21 +04:00
msleep ( 10 ) ;
}
static void tmio_mmc_clk_start ( struct tmio_mmc_host * host )
{
2011-02-18 06:07:29 +03:00
struct tmio_mmc_data * pdata = mfd_get_data ( host - > pdev ) ;
2010-12-29 01:22:31 +03:00
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_SD_CARD_CLK_CTL , 0x0100 |
sd_ctrl_read16 ( host , CTL_SD_CARD_CLK_CTL ) ) ;
2008-07-15 19:02:21 +04:00
msleep ( 10 ) ;
2010-12-29 01:22:31 +03:00
/* see comment in tmio_mmc_clk_stop above */
if ( pdata - > flags & TMIO_MMC_SDIO_IRQ )
disable_irq ( host - > irq ) ;
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_CLK_AND_WAIT_CTL , 0x0100 ) ;
2008-07-15 19:02:21 +04:00
msleep ( 10 ) ;
2010-12-29 01:22:31 +03:00
if ( pdata - > flags & TMIO_MMC_SDIO_IRQ ) {
tmio_mmc_enable_sdio_irq ( host - > mmc , host - > sdio_irq_enabled ) ;
enable_irq ( host - > irq ) ;
}
2008-07-15 19:02:21 +04:00
}
static void reset ( struct tmio_mmc_host * host )
{
/* FIXME - should we set stop clock reg here */
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_RESET_SD , 0x0000 ) ;
sd_ctrl_write16 ( host , CTL_RESET_SDIO , 0x0000 ) ;
2008-07-15 19:02:21 +04:00
msleep ( 10 ) ;
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_RESET_SD , 0x0001 ) ;
sd_ctrl_write16 ( host , CTL_RESET_SDIO , 0x0001 ) ;
2008-07-15 19:02:21 +04:00
msleep ( 10 ) ;
}
2011-01-06 01:36:14 +03:00
static void tmio_mmc_reset_work ( struct work_struct * work )
{
struct tmio_mmc_host * host = container_of ( work , struct tmio_mmc_host ,
delayed_reset_work . work ) ;
struct mmc_request * mrq ;
unsigned long flags ;
spin_lock_irqsave ( & host - > lock , flags ) ;
mrq = host - > mrq ;
/* request already finished */
if ( ! mrq
| | time_is_after_jiffies ( host - > last_req_ts +
msecs_to_jiffies ( 2000 ) ) ) {
spin_unlock_irqrestore ( & host - > lock , flags ) ;
return ;
}
dev_warn ( & host - > pdev - > dev ,
" timeout waiting for hardware interrupt (CMD%u) \n " ,
mrq - > cmd - > opcode ) ;
if ( host - > data )
host - > data - > error = - ETIMEDOUT ;
else if ( host - > cmd )
host - > cmd - > error = - ETIMEDOUT ;
else
mrq - > cmd - > error = - ETIMEDOUT ;
host - > cmd = NULL ;
host - > data = NULL ;
host - > mrq = NULL ;
spin_unlock_irqrestore ( & host - > lock , flags ) ;
reset ( host ) ;
mmc_request_done ( host - > mmc , mrq ) ;
}
2008-07-15 19:02:21 +04:00
static void
tmio_mmc_finish_request ( struct tmio_mmc_host * host )
{
struct mmc_request * mrq = host - > mrq ;
2011-01-06 01:36:14 +03:00
if ( ! mrq )
return ;
2008-07-15 19:02:21 +04:00
host - > mrq = NULL ;
host - > cmd = NULL ;
host - > data = NULL ;
2011-01-06 01:36:14 +03:00
cancel_delayed_work ( & host - > delayed_reset_work ) ;
2008-07-15 19:02:21 +04:00
mmc_request_done ( host - > mmc , mrq ) ;
}
/* These are the bitmasks the tmio chip requires to implement the MMC response
* types . Note that R1 and R6 are the same in this scheme . */
# define APP_CMD 0x0040
# define RESP_NONE 0x0300
# define RESP_R1 0x0400
# define RESP_R1B 0x0500
# define RESP_R2 0x0600
# define RESP_R3 0x0700
# define DATA_PRESENT 0x0800
# define TRANSFER_READ 0x1000
# define TRANSFER_MULTI 0x2000
# define SECURITY_CMD 0x4000
static int
tmio_mmc_start_command ( struct tmio_mmc_host * host , struct mmc_command * cmd )
{
struct mmc_data * data = host - > data ;
int c = cmd - > opcode ;
/* Command 12 is handled by hardware */
if ( cmd - > opcode = = 12 & & ! cmd - > arg ) {
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_STOP_INTERNAL_ACTION , 0x001 ) ;
2008-07-15 19:02:21 +04:00
return 0 ;
}
switch ( mmc_resp_type ( cmd ) ) {
case MMC_RSP_NONE : c | = RESP_NONE ; break ;
case MMC_RSP_R1 : c | = RESP_R1 ; break ;
case MMC_RSP_R1B : c | = RESP_R1B ; break ;
case MMC_RSP_R2 : c | = RESP_R2 ; break ;
case MMC_RSP_R3 : c | = RESP_R3 ; break ;
default :
pr_debug ( " Unknown response type %d \n " , mmc_resp_type ( cmd ) ) ;
return - EINVAL ;
}
host - > cmd = cmd ;
2010-05-19 22:34:22 +04:00
/* FIXME - this seems to be ok commented out but the spec suggest this bit
* should be set when issuing app commands .
2008-07-15 19:02:21 +04:00
* if ( cmd - > flags & MMC_FLAG_ACMD )
* c | = APP_CMD ;
*/
if ( data ) {
c | = DATA_PRESENT ;
if ( data - > blocks > 1 ) {
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_STOP_INTERNAL_ACTION , 0x100 ) ;
2008-07-15 19:02:21 +04:00
c | = TRANSFER_MULTI ;
}
if ( data - > flags & MMC_DATA_READ )
c | = TRANSFER_READ ;
}
2009-06-04 22:12:32 +04:00
enable_mmc_irqs ( host , TMIO_MASK_CMD ) ;
2008-07-15 19:02:21 +04:00
/* Fire off the command */
2009-06-04 22:12:32 +04:00
sd_ctrl_write32 ( host , CTL_ARG_REG , cmd - > arg ) ;
sd_ctrl_write16 ( host , CTL_SD_CMD , c ) ;
2008-07-15 19:02:21 +04:00
return 0 ;
}
2010-05-19 22:34:22 +04:00
/*
* This chip always returns ( at least ? ) as much data as you ask for .
2008-07-15 19:02:21 +04:00
* I ' m unsure what happens if you ask for less than a block . This should be
* looked into to ensure that a funny length read doesnt hose the controller .
*/
2010-05-19 22:34:22 +04:00
static void tmio_mmc_pio_irq ( struct tmio_mmc_host * host )
2008-07-15 19:02:21 +04:00
{
struct mmc_data * data = host - > data ;
2010-09-10 03:37:43 +04:00
void * sg_virt ;
2008-07-15 19:02:21 +04:00
unsigned short * buf ;
unsigned int count ;
unsigned long flags ;
if ( ! data ) {
pr_debug ( " Spurious PIO IRQ \n " ) ;
return ;
}
2010-09-10 03:37:43 +04:00
sg_virt = tmio_mmc_kmap_atomic ( host - > sg_ptr , & flags ) ;
buf = ( unsigned short * ) ( sg_virt + host - > sg_off ) ;
2008-07-15 19:02:21 +04:00
count = host - > sg_ptr - > length - host - > sg_off ;
if ( count > data - > blksz )
count = data - > blksz ;
pr_debug ( " count: %08x offset: %08x flags %08x \n " ,
2010-05-19 22:34:22 +04:00
count , host - > sg_off , data - > flags ) ;
2008-07-15 19:02:21 +04:00
/* Transfer the data */
if ( data - > flags & MMC_DATA_READ )
2009-06-04 22:12:32 +04:00
sd_ctrl_read16_rep ( host , CTL_SD_DATA_PORT , buf , count > > 1 ) ;
2008-07-15 19:02:21 +04:00
else
2009-06-04 22:12:32 +04:00
sd_ctrl_write16_rep ( host , CTL_SD_DATA_PORT , buf , count > > 1 ) ;
2008-07-15 19:02:21 +04:00
host - > sg_off + = count ;
2011-03-11 10:30:14 +03:00
tmio_mmc_kunmap_atomic ( host - > sg_ptr , & flags , sg_virt ) ;
2008-07-15 19:02:21 +04:00
if ( host - > sg_off = = host - > sg_ptr - > length )
tmio_mmc_next_sg ( host ) ;
return ;
}
2011-01-06 01:36:14 +03:00
/* needs to be called with host->lock held */
2010-05-19 22:34:22 +04:00
static void tmio_mmc_do_data_irq ( struct tmio_mmc_host * host )
2008-07-15 19:02:21 +04:00
{
struct mmc_data * data = host - > data ;
2008-12-16 18:13:09 +03:00
struct mmc_command * stop ;
2008-07-15 19:02:21 +04:00
host - > data = NULL ;
if ( ! data ) {
2010-05-19 22:34:22 +04:00
dev_warn ( & host - > pdev - > dev , " Spurious data end IRQ \n " ) ;
2008-07-15 19:02:21 +04:00
return ;
}
2008-12-16 18:13:09 +03:00
stop = data - > stop ;
2008-07-15 19:02:21 +04:00
/* FIXME - return correct transfer count on errors */
if ( ! data - > error )
data - > bytes_xfered = data - > blocks * data - > blksz ;
else
data - > bytes_xfered = 0 ;
pr_debug ( " Completed data request \n " ) ;
2010-05-19 22:34:22 +04:00
/*
* FIXME : other drivers allow an optional stop command of any given type
2008-07-15 19:02:21 +04:00
* which we dont do , as the chip can auto generate them .
* Perhaps we can be smarter about when to use auto CMD12 and
* only issue the auto request when we know this is the desired
* stop command , allowing fallback to the stop command the
* upper layers expect . For now , we do what works .
*/
2010-05-19 22:34:22 +04:00
if ( data - > flags & MMC_DATA_READ ) {
if ( ! host - > chan_rx )
disable_mmc_irqs ( host , TMIO_MASK_READOP ) ;
2010-12-22 14:02:15 +03:00
else
tmio_check_bounce_buffer ( host ) ;
2010-05-19 22:34:22 +04:00
dev_dbg ( & host - > pdev - > dev , " Complete Rx request %p \n " ,
host - > mrq ) ;
} else {
if ( ! host - > chan_tx )
disable_mmc_irqs ( host , TMIO_MASK_WRITEOP ) ;
dev_dbg ( & host - > pdev - > dev , " Complete Tx request %p \n " ,
host - > mrq ) ;
}
2008-07-15 19:02:21 +04:00
if ( stop ) {
if ( stop - > opcode = = 12 & & ! stop - > arg )
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_STOP_INTERNAL_ACTION , 0x000 ) ;
2008-07-15 19:02:21 +04:00
else
BUG ( ) ;
}
tmio_mmc_finish_request ( host ) ;
}
2010-05-19 22:34:22 +04:00
static void tmio_mmc_data_irq ( struct tmio_mmc_host * host )
{
2011-01-06 01:36:14 +03:00
struct mmc_data * data ;
spin_lock ( & host - > lock ) ;
data = host - > data ;
2010-05-19 22:34:22 +04:00
if ( ! data )
2011-01-06 01:36:14 +03:00
goto out ;
2010-05-19 22:34:22 +04:00
if ( host - > chan_tx & & ( data - > flags & MMC_DATA_WRITE ) ) {
/*
* Has all data been written out yet ? Testing on SuperH showed ,
* that in most cases the first interrupt comes already with the
* BUSY status bit clear , but on some operations , like mount or
* in the beginning of a write / sync / umount , there is one
* DATAEND interrupt with the BUSY bit set , in this cases
* waiting for one more interrupt fixes the problem .
*/
if ( ! ( sd_ctrl_read32 ( host , CTL_STATUS ) & TMIO_STAT_CMD_BUSY ) ) {
disable_mmc_irqs ( host , TMIO_STAT_DATAEND ) ;
tasklet_schedule ( & host - > dma_complete ) ;
}
} else if ( host - > chan_rx & & ( data - > flags & MMC_DATA_READ ) ) {
disable_mmc_irqs ( host , TMIO_STAT_DATAEND ) ;
tasklet_schedule ( & host - > dma_complete ) ;
} else {
tmio_mmc_do_data_irq ( host ) ;
}
2011-01-06 01:36:14 +03:00
out :
spin_unlock ( & host - > lock ) ;
2010-05-19 22:34:22 +04:00
}
static void tmio_mmc_cmd_irq ( struct tmio_mmc_host * host ,
2008-07-15 19:02:21 +04:00
unsigned int stat )
{
struct mmc_command * cmd = host - > cmd ;
2009-06-04 22:12:32 +04:00
int i , addr ;
2008-07-15 19:02:21 +04:00
2011-01-06 01:36:14 +03:00
spin_lock ( & host - > lock ) ;
2008-07-15 19:02:21 +04:00
if ( ! host - > cmd ) {
pr_debug ( " Spurious CMD irq \n " ) ;
2011-01-06 01:36:14 +03:00
goto out ;
2008-07-15 19:02:21 +04:00
}
host - > cmd = NULL ;
/* This controller is sicker than the PXA one. Not only do we need to
* drop the top 8 bits of the first response word , we also need to
* modify the order of the response for short response command types .
*/
2009-06-04 22:12:32 +04:00
for ( i = 3 , addr = CTL_RESPONSE ; i > = 0 ; i - - , addr + = 4 )
cmd - > resp [ i ] = sd_ctrl_read32 ( host , addr ) ;
2008-07-15 19:02:21 +04:00
if ( cmd - > flags & MMC_RSP_136 ) {
cmd - > resp [ 0 ] = ( cmd - > resp [ 0 ] < < 8 ) | ( cmd - > resp [ 1 ] > > 24 ) ;
cmd - > resp [ 1 ] = ( cmd - > resp [ 1 ] < < 8 ) | ( cmd - > resp [ 2 ] > > 24 ) ;
cmd - > resp [ 2 ] = ( cmd - > resp [ 2 ] < < 8 ) | ( cmd - > resp [ 3 ] > > 24 ) ;
cmd - > resp [ 3 ] < < = 8 ;
} else if ( cmd - > flags & MMC_RSP_R3 ) {
cmd - > resp [ 0 ] = cmd - > resp [ 3 ] ;
}
if ( stat & TMIO_STAT_CMDTIMEOUT )
cmd - > error = - ETIMEDOUT ;
else if ( stat & TMIO_STAT_CRCFAIL & & cmd - > flags & MMC_RSP_CRC )
cmd - > error = - EILSEQ ;
/* If there is data to handle we enable data IRQs here, and
* we will ultimatley finish the request in the data_end handler .
* If theres no data or we encountered an error , finish now .
*/
if ( host - > data & & ! cmd - > error ) {
2010-05-19 22:34:22 +04:00
if ( host - > data - > flags & MMC_DATA_READ ) {
if ( ! host - > chan_rx )
enable_mmc_irqs ( host , TMIO_MASK_READOP ) ;
} else {
2010-12-22 14:02:15 +03:00
if ( ! host - > chan_tx )
2010-05-19 22:34:22 +04:00
enable_mmc_irqs ( host , TMIO_MASK_WRITEOP ) ;
else
tasklet_schedule ( & host - > dma_issue ) ;
}
2008-07-15 19:02:21 +04:00
} else {
tmio_mmc_finish_request ( host ) ;
}
2011-01-06 01:36:14 +03:00
out :
spin_unlock ( & host - > lock ) ;
2008-07-15 19:02:21 +04:00
return ;
}
static irqreturn_t tmio_mmc_irq ( int irq , void * devid )
{
struct tmio_mmc_host * host = devid ;
2011-02-18 06:07:29 +03:00
struct tmio_mmc_data * pdata = mfd_get_data ( host - > pdev ) ;
2008-07-15 19:02:21 +04:00
unsigned int ireg , irq_mask , status ;
2010-12-29 01:22:31 +03:00
unsigned int sdio_ireg , sdio_irq_mask , sdio_status ;
2008-07-15 19:02:21 +04:00
pr_debug ( " MMC IRQ begin \n " ) ;
2009-06-04 22:12:32 +04:00
status = sd_ctrl_read32 ( host , CTL_STATUS ) ;
irq_mask = sd_ctrl_read32 ( host , CTL_IRQ_MASK ) ;
2008-07-15 19:02:21 +04:00
ireg = status & TMIO_MASK_IRQ & ~ irq_mask ;
2010-12-29 01:22:31 +03:00
sdio_ireg = 0 ;
if ( ! ireg & & pdata - > flags & TMIO_MMC_SDIO_IRQ ) {
sdio_status = sd_ctrl_read16 ( host , CTL_SDIO_STATUS ) ;
sdio_irq_mask = sd_ctrl_read16 ( host , CTL_SDIO_IRQ_MASK ) ;
sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~ sdio_irq_mask ;
sd_ctrl_write16 ( host , CTL_SDIO_STATUS , sdio_status & ~ TMIO_SDIO_MASK_ALL ) ;
if ( sdio_ireg & & ! host - > sdio_irq_enabled ) {
pr_warning ( " tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x \n " ,
sdio_status , sdio_irq_mask , sdio_ireg ) ;
tmio_mmc_enable_sdio_irq ( host - > mmc , 0 ) ;
goto out ;
}
if ( host - > mmc - > caps & MMC_CAP_SDIO_IRQ & &
sdio_ireg & TMIO_SDIO_STAT_IOIRQ )
mmc_signal_sdio_irq ( host - > mmc ) ;
if ( sdio_ireg )
goto out ;
}
2008-07-15 19:02:21 +04:00
pr_debug_status ( status ) ;
pr_debug_status ( ireg ) ;
if ( ! ireg ) {
2009-06-04 22:12:32 +04:00
disable_mmc_irqs ( host , status & ~ irq_mask ) ;
2008-07-15 19:02:21 +04:00
2010-05-19 22:34:22 +04:00
pr_warning ( " tmio_mmc: Spurious irq, disabling! "
2008-07-15 19:02:21 +04:00
" 0x%08x 0x%08x 0x%08x \n " , status , irq_mask , ireg ) ;
pr_debug_status ( status ) ;
goto out ;
}
while ( ireg ) {
/* Card insert / remove attempts */
if ( ireg & ( TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE ) ) {
2009-06-04 22:12:32 +04:00
ack_mmc_irqs ( host , TMIO_STAT_CARD_INSERT |
2008-07-15 19:02:21 +04:00
TMIO_STAT_CARD_REMOVE ) ;
2010-02-17 10:38:04 +03:00
mmc_detect_change ( host - > mmc , msecs_to_jiffies ( 100 ) ) ;
2008-07-15 19:02:21 +04:00
}
/* CRC and other errors */
/* if (ireg & TMIO_STAT_ERR_IRQ)
* handled | = tmio_error_irq ( host , irq , stat ) ;
*/
/* Command completion */
2010-12-29 16:21:14 +03:00
if ( ireg & ( TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT ) ) {
ack_mmc_irqs ( host ,
TMIO_STAT_CMDRESPEND |
TMIO_STAT_CMDTIMEOUT ) ;
2008-07-15 19:02:21 +04:00
tmio_mmc_cmd_irq ( host , status ) ;
}
/* Data transfer */
if ( ireg & ( TMIO_STAT_RXRDY | TMIO_STAT_TXRQ ) ) {
2009-06-04 22:12:32 +04:00
ack_mmc_irqs ( host , TMIO_STAT_RXRDY | TMIO_STAT_TXRQ ) ;
2008-07-15 19:02:21 +04:00
tmio_mmc_pio_irq ( host ) ;
}
/* Data transfer completion */
if ( ireg & TMIO_STAT_DATAEND ) {
2009-06-04 22:12:32 +04:00
ack_mmc_irqs ( host , TMIO_STAT_DATAEND ) ;
2008-07-15 19:02:21 +04:00
tmio_mmc_data_irq ( host ) ;
}
/* Check status - keep going until we've handled it all */
2009-06-04 22:12:32 +04:00
status = sd_ctrl_read32 ( host , CTL_STATUS ) ;
irq_mask = sd_ctrl_read32 ( host , CTL_IRQ_MASK ) ;
2008-07-15 19:02:21 +04:00
ireg = status & TMIO_MASK_IRQ & ~ irq_mask ;
pr_debug ( " Status at end of loop: %08x \n " , status ) ;
pr_debug_status ( status ) ;
}
pr_debug ( " MMC IRQ end \n " ) ;
out :
return IRQ_HANDLED ;
}
2010-05-19 22:34:22 +04:00
# ifdef CONFIG_TMIO_MMC_DMA
2010-12-22 14:02:15 +03:00
static void tmio_check_bounce_buffer ( struct tmio_mmc_host * host )
{
if ( host - > sg_ptr = = & host - > bounce_sg ) {
unsigned long flags ;
void * sg_vaddr = tmio_mmc_kmap_atomic ( host - > sg_orig , & flags ) ;
memcpy ( sg_vaddr , host - > bounce_buf , host - > bounce_sg . length ) ;
2011-03-11 10:30:14 +03:00
tmio_mmc_kunmap_atomic ( host - > sg_orig , & flags , sg_vaddr ) ;
2010-12-22 14:02:15 +03:00
}
}
2010-05-19 22:34:22 +04:00
static void tmio_mmc_enable_dma ( struct tmio_mmc_host * host , bool enable )
{
# if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
/* Switch DMA mode on or off - SuperH specific? */
sd_ctrl_write16 ( host , 0xd8 , enable ? 2 : 0 ) ;
# endif
}
static void tmio_dma_complete ( void * arg )
{
struct tmio_mmc_host * host = arg ;
dev_dbg ( & host - > pdev - > dev , " Command completed \n " ) ;
if ( ! host - > data )
dev_warn ( & host - > pdev - > dev , " NULL data in DMA completion! \n " ) ;
else
enable_mmc_irqs ( host , TMIO_STAT_DATAEND ) ;
}
2010-11-11 14:19:47 +03:00
static void tmio_mmc_start_dma_rx ( struct tmio_mmc_host * host )
2010-05-19 22:34:22 +04:00
{
2010-12-22 14:02:15 +03:00
struct scatterlist * sg = host - > sg_ptr , * sg_tmp ;
2010-05-19 22:34:22 +04:00
struct dma_async_tx_descriptor * desc = NULL ;
struct dma_chan * chan = host - > chan_rx ;
2011-02-18 06:07:29 +03:00
struct tmio_mmc_data * pdata = mfd_get_data ( host - > pdev ) ;
2010-11-11 14:19:47 +03:00
dma_cookie_t cookie ;
2010-12-22 14:02:15 +03:00
int ret , i ;
bool aligned = true , multiple = true ;
unsigned int align = ( 1 < < pdata - > dma - > alignment_shift ) - 1 ;
for_each_sg ( sg , sg_tmp , host - > sg_len , i ) {
if ( sg_tmp - > offset & align )
aligned = false ;
if ( sg_tmp - > length & align ) {
multiple = false ;
break ;
}
}
if ( ( ! aligned & & ( host - > sg_len > 1 | | sg - > length > PAGE_CACHE_SIZE | |
2010-12-20 00:16:07 +03:00
align > = MAX_ALIGN ) ) | | ! multiple ) {
ret = - EINVAL ;
2010-12-22 14:02:15 +03:00
goto pio ;
2010-12-20 00:16:07 +03:00
}
2010-12-22 14:02:15 +03:00
/* The only sg element can be unaligned, use our bounce buffer then */
if ( ! aligned ) {
sg_init_one ( & host - > bounce_sg , host - > bounce_buf , sg - > length ) ;
host - > sg_ptr = & host - > bounce_sg ;
sg = host - > sg_ptr ;
}
2010-05-19 22:34:22 +04:00
2011-02-10 18:10:37 +03:00
ret = dma_map_sg ( chan - > device - > dev , sg , host - > sg_len , DMA_FROM_DEVICE ) ;
2011-02-10 18:10:56 +03:00
if ( ret > 0 )
2010-05-19 22:34:22 +04:00
desc = chan - > device - > device_prep_slave_sg ( chan , sg , ret ,
DMA_FROM_DEVICE , DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
if ( desc ) {
desc - > callback = tmio_dma_complete ;
desc - > callback_param = host ;
2011-02-10 18:11:07 +03:00
cookie = dmaengine_submit ( desc ) ;
dma_async_issue_pending ( chan ) ;
2010-05-19 22:34:22 +04:00
}
dev_dbg ( & host - > pdev - > dev , " %s(): mapped %d -> %d, cookie %d, rq %p \n " ,
2010-11-11 14:19:47 +03:00
__func__ , host - > sg_len , ret , cookie , host - > mrq ) ;
2010-05-19 22:34:22 +04:00
2010-12-22 14:02:15 +03:00
pio :
2010-11-11 14:19:47 +03:00
if ( ! desc ) {
2010-05-19 22:34:22 +04:00
/* DMA failed, fall back to PIO */
if ( ret > = 0 )
ret = - EIO ;
host - > chan_rx = NULL ;
dma_release_channel ( chan ) ;
/* Free the Tx channel too */
chan = host - > chan_tx ;
if ( chan ) {
host - > chan_tx = NULL ;
dma_release_channel ( chan ) ;
}
dev_warn ( & host - > pdev - > dev ,
" DMA failed: %d, falling back to PIO \n " , ret ) ;
tmio_mmc_enable_dma ( host , false ) ;
}
dev_dbg ( & host - > pdev - > dev , " %s(): desc %p, cookie %d, sg[%d] \n " , __func__ ,
2010-11-11 14:19:47 +03:00
desc , cookie , host - > sg_len ) ;
2010-05-19 22:34:22 +04:00
}
2010-11-11 14:19:47 +03:00
static void tmio_mmc_start_dma_tx ( struct tmio_mmc_host * host )
2010-05-19 22:34:22 +04:00
{
2010-12-22 14:02:15 +03:00
struct scatterlist * sg = host - > sg_ptr , * sg_tmp ;
2010-05-19 22:34:22 +04:00
struct dma_async_tx_descriptor * desc = NULL ;
struct dma_chan * chan = host - > chan_tx ;
2011-02-18 06:07:29 +03:00
struct tmio_mmc_data * pdata = mfd_get_data ( host - > pdev ) ;
2010-11-11 14:19:47 +03:00
dma_cookie_t cookie ;
2010-12-22 14:02:15 +03:00
int ret , i ;
bool aligned = true , multiple = true ;
unsigned int align = ( 1 < < pdata - > dma - > alignment_shift ) - 1 ;
for_each_sg ( sg , sg_tmp , host - > sg_len , i ) {
if ( sg_tmp - > offset & align )
aligned = false ;
if ( sg_tmp - > length & align ) {
multiple = false ;
break ;
}
}
if ( ( ! aligned & & ( host - > sg_len > 1 | | sg - > length > PAGE_CACHE_SIZE | |
2010-12-20 00:16:07 +03:00
align > = MAX_ALIGN ) ) | | ! multiple ) {
ret = - EINVAL ;
2010-12-22 14:02:15 +03:00
goto pio ;
2010-12-20 00:16:07 +03:00
}
2010-12-22 14:02:15 +03:00
/* The only sg element can be unaligned, use our bounce buffer then */
if ( ! aligned ) {
unsigned long flags ;
void * sg_vaddr = tmio_mmc_kmap_atomic ( sg , & flags ) ;
sg_init_one ( & host - > bounce_sg , host - > bounce_buf , sg - > length ) ;
memcpy ( host - > bounce_buf , sg_vaddr , host - > bounce_sg . length ) ;
2011-03-11 10:30:14 +03:00
tmio_mmc_kunmap_atomic ( sg , & flags , sg_vaddr ) ;
2010-12-22 14:02:15 +03:00
host - > sg_ptr = & host - > bounce_sg ;
sg = host - > sg_ptr ;
}
2010-05-19 22:34:22 +04:00
2011-02-10 18:10:37 +03:00
ret = dma_map_sg ( chan - > device - > dev , sg , host - > sg_len , DMA_TO_DEVICE ) ;
2011-02-10 18:10:56 +03:00
if ( ret > 0 )
2010-05-19 22:34:22 +04:00
desc = chan - > device - > device_prep_slave_sg ( chan , sg , ret ,
DMA_TO_DEVICE , DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
if ( desc ) {
desc - > callback = tmio_dma_complete ;
desc - > callback_param = host ;
2011-02-10 18:11:07 +03:00
cookie = dmaengine_submit ( desc ) ;
2010-05-19 22:34:22 +04:00
}
dev_dbg ( & host - > pdev - > dev , " %s(): mapped %d -> %d, cookie %d, rq %p \n " ,
2010-11-11 14:19:47 +03:00
__func__ , host - > sg_len , ret , cookie , host - > mrq ) ;
2010-05-19 22:34:22 +04:00
2010-12-22 14:02:15 +03:00
pio :
2010-11-11 14:19:47 +03:00
if ( ! desc ) {
2010-05-19 22:34:22 +04:00
/* DMA failed, fall back to PIO */
if ( ret > = 0 )
ret = - EIO ;
host - > chan_tx = NULL ;
dma_release_channel ( chan ) ;
/* Free the Rx channel too */
chan = host - > chan_rx ;
if ( chan ) {
host - > chan_rx = NULL ;
dma_release_channel ( chan ) ;
}
dev_warn ( & host - > pdev - > dev ,
" DMA failed: %d, falling back to PIO \n " , ret ) ;
tmio_mmc_enable_dma ( host , false ) ;
}
dev_dbg ( & host - > pdev - > dev , " %s(): desc %p, cookie %d \n " , __func__ ,
2010-11-11 14:19:47 +03:00
desc , cookie ) ;
2010-05-19 22:34:22 +04:00
}
2010-11-11 14:19:47 +03:00
static void tmio_mmc_start_dma ( struct tmio_mmc_host * host ,
2010-05-19 22:34:22 +04:00
struct mmc_data * data )
{
if ( data - > flags & MMC_DATA_READ ) {
if ( host - > chan_rx )
2010-11-11 14:19:47 +03:00
tmio_mmc_start_dma_rx ( host ) ;
2010-05-19 22:34:22 +04:00
} else {
if ( host - > chan_tx )
2010-11-11 14:19:47 +03:00
tmio_mmc_start_dma_tx ( host ) ;
2010-05-19 22:34:22 +04:00
}
}
static void tmio_issue_tasklet_fn ( unsigned long priv )
{
struct tmio_mmc_host * host = ( struct tmio_mmc_host * ) priv ;
struct dma_chan * chan = host - > chan_tx ;
2011-02-10 18:11:07 +03:00
dma_async_issue_pending ( chan ) ;
2010-05-19 22:34:22 +04:00
}
static void tmio_tasklet_fn ( unsigned long arg )
{
struct tmio_mmc_host * host = ( struct tmio_mmc_host * ) arg ;
2011-01-06 01:36:14 +03:00
unsigned long flags ;
spin_lock_irqsave ( & host - > lock , flags ) ;
if ( ! host - > data )
goto out ;
2010-05-19 22:34:22 +04:00
if ( host - > data - > flags & MMC_DATA_READ )
2011-02-10 18:10:37 +03:00
dma_unmap_sg ( host - > chan_rx - > device - > dev ,
2011-02-10 18:10:47 +03:00
host - > sg_ptr , host - > sg_len ,
2010-05-19 22:34:22 +04:00
DMA_FROM_DEVICE ) ;
else
2011-02-10 18:10:37 +03:00
dma_unmap_sg ( host - > chan_tx - > device - > dev ,
2011-02-10 18:10:47 +03:00
host - > sg_ptr , host - > sg_len ,
2010-05-19 22:34:22 +04:00
DMA_TO_DEVICE ) ;
tmio_mmc_do_data_irq ( host ) ;
2011-01-06 01:36:14 +03:00
out :
spin_unlock_irqrestore ( & host - > lock , flags ) ;
2010-05-19 22:34:22 +04:00
}
/* It might be necessary to make filter MFD specific */
static bool tmio_mmc_filter ( struct dma_chan * chan , void * arg )
{
dev_dbg ( chan - > device - > dev , " %s: slave data %p \n " , __func__ , arg ) ;
chan - > private = arg ;
return true ;
}
static void tmio_mmc_request_dma ( struct tmio_mmc_host * host ,
struct tmio_mmc_data * pdata )
{
/* We can only either use DMA for both Tx and Rx or not use it at all */
if ( pdata - > dma ) {
dma_cap_mask_t mask ;
dma_cap_zero ( mask ) ;
dma_cap_set ( DMA_SLAVE , mask ) ;
host - > chan_tx = dma_request_channel ( mask , tmio_mmc_filter ,
pdata - > dma - > chan_priv_tx ) ;
dev_dbg ( & host - > pdev - > dev , " %s: TX: got channel %p \n " , __func__ ,
host - > chan_tx ) ;
if ( ! host - > chan_tx )
return ;
host - > chan_rx = dma_request_channel ( mask , tmio_mmc_filter ,
pdata - > dma - > chan_priv_rx ) ;
dev_dbg ( & host - > pdev - > dev , " %s: RX: got channel %p \n " , __func__ ,
host - > chan_rx ) ;
if ( ! host - > chan_rx ) {
dma_release_channel ( host - > chan_tx ) ;
host - > chan_tx = NULL ;
return ;
}
tasklet_init ( & host - > dma_complete , tmio_tasklet_fn , ( unsigned long ) host ) ;
tasklet_init ( & host - > dma_issue , tmio_issue_tasklet_fn , ( unsigned long ) host ) ;
tmio_mmc_enable_dma ( host , true ) ;
}
}
static void tmio_mmc_release_dma ( struct tmio_mmc_host * host )
{
if ( host - > chan_tx ) {
struct dma_chan * chan = host - > chan_tx ;
host - > chan_tx = NULL ;
dma_release_channel ( chan ) ;
}
if ( host - > chan_rx ) {
struct dma_chan * chan = host - > chan_rx ;
host - > chan_rx = NULL ;
dma_release_channel ( chan ) ;
}
}
# else
2010-12-22 14:02:15 +03:00
static void tmio_check_bounce_buffer ( struct tmio_mmc_host * host )
{
}
2010-11-11 14:19:47 +03:00
static void tmio_mmc_start_dma ( struct tmio_mmc_host * host ,
2010-05-19 22:34:22 +04:00
struct mmc_data * data )
{
}
static void tmio_mmc_request_dma ( struct tmio_mmc_host * host ,
struct tmio_mmc_data * pdata )
{
host - > chan_tx = NULL ;
host - > chan_rx = NULL ;
}
static void tmio_mmc_release_dma ( struct tmio_mmc_host * host )
{
}
# endif
2008-07-15 19:02:21 +04:00
static int tmio_mmc_start_data ( struct tmio_mmc_host * host ,
struct mmc_data * data )
{
2011-02-18 06:07:29 +03:00
struct tmio_mmc_data * pdata = mfd_get_data ( host - > pdev ) ;
2010-08-30 14:50:19 +04:00
2008-07-15 19:02:21 +04:00
pr_debug ( " setup data transfer: blocksize %08x nr_blocks %d \n " ,
2010-05-19 22:34:22 +04:00
data - > blksz , data - > blocks ) ;
2008-07-15 19:02:21 +04:00
2010-08-30 14:50:19 +04:00
/* Some hardware cannot perform 2 byte requests in 4 bit mode */
if ( host - > mmc - > ios . bus_width = = MMC_BUS_WIDTH_4 ) {
int blksz_2bytes = pdata - > flags & TMIO_MMC_BLKSZ_2BYTES ;
if ( data - > blksz < 2 | | ( data - > blksz < 4 & & ! blksz_2bytes ) ) {
pr_err ( " %s: %d byte block unsupported in 4 bit mode \n " ,
mmc_hostname ( host - > mmc ) , data - > blksz ) ;
return - EINVAL ;
}
2008-07-15 19:02:21 +04:00
}
tmio_mmc_init_sg ( host , data ) ;
host - > data = data ;
/* Set transfer length / blocksize */
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_SD_XFER_LEN , data - > blksz ) ;
sd_ctrl_write16 ( host , CTL_XFER_BLK_COUNT , data - > blocks ) ;
2008-07-15 19:02:21 +04:00
2010-11-11 14:19:47 +03:00
tmio_mmc_start_dma ( host , data ) ;
return 0 ;
2008-07-15 19:02:21 +04:00
}
/* Process requests from the MMC layer */
static void tmio_mmc_request ( struct mmc_host * mmc , struct mmc_request * mrq )
{
struct tmio_mmc_host * host = mmc_priv ( mmc ) ;
int ret ;
if ( host - > mrq )
pr_debug ( " request not null \n " ) ;
2011-01-06 01:36:14 +03:00
host - > last_req_ts = jiffies ;
wmb ( ) ;
2008-07-15 19:02:21 +04:00
host - > mrq = mrq ;
if ( mrq - > data ) {
ret = tmio_mmc_start_data ( host , mrq - > data ) ;
if ( ret )
goto fail ;
}
ret = tmio_mmc_start_command ( host , mrq - > cmd ) ;
2011-01-06 01:36:14 +03:00
if ( ! ret ) {
schedule_delayed_work ( & host - > delayed_reset_work ,
msecs_to_jiffies ( 2000 ) ) ;
2008-07-15 19:02:21 +04:00
return ;
2011-01-06 01:36:14 +03:00
}
2008-07-15 19:02:21 +04:00
fail :
2011-01-06 01:36:14 +03:00
host - > mrq = NULL ;
2008-07-15 19:02:21 +04:00
mrq - > cmd - > error = ret ;
mmc_request_done ( mmc , mrq ) ;
}
/* Set MMC clock / power.
* Note : This controller uses a simple divider scheme therefore it cannot
* run a MMC card at full speed ( 20 MHz ) . The max clock is 24 MHz on SD , but as
* MMC wont run that fast , it has to be clocked at 12 MHz which is the next
* slowest setting .
*/
static void tmio_mmc_set_ios ( struct mmc_host * mmc , struct mmc_ios * ios )
{
struct tmio_mmc_host * host = mmc_priv ( mmc ) ;
if ( ios - > clock )
tmio_mmc_set_clock ( host , ios - > clock ) ;
/* Power sequence - OFF -> ON -> UP */
switch ( ios - > power_mode ) {
case MMC_POWER_OFF : /* power down SD bus */
2010-01-06 15:51:48 +03:00
if ( host - > set_pwr )
host - > set_pwr ( host - > pdev , 0 ) ;
2008-07-15 19:02:21 +04:00
tmio_mmc_clk_stop ( host ) ;
break ;
case MMC_POWER_ON : /* power up SD bus */
2010-01-06 15:51:48 +03:00
if ( host - > set_pwr )
host - > set_pwr ( host - > pdev , 1 ) ;
2008-07-15 19:02:21 +04:00
break ;
case MMC_POWER_UP : /* start bus clock */
tmio_mmc_clk_start ( host ) ;
break ;
}
switch ( ios - > bus_width ) {
case MMC_BUS_WIDTH_1 :
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_SD_MEM_CARD_OPT , 0x80e0 ) ;
2008-07-15 19:02:21 +04:00
break ;
case MMC_BUS_WIDTH_4 :
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_SD_MEM_CARD_OPT , 0x00e0 ) ;
2008-07-15 19:02:21 +04:00
break ;
}
/* Let things settle. delay taken from winCE driver */
udelay ( 140 ) ;
}
static int tmio_mmc_get_ro ( struct mmc_host * mmc )
{
struct tmio_mmc_host * host = mmc_priv ( mmc ) ;
2011-02-18 06:07:29 +03:00
struct tmio_mmc_data * pdata = mfd_get_data ( host - > pdev ) ;
2008-07-15 19:02:21 +04:00
2010-05-19 22:36:02 +04:00
return ( ( pdata - > flags & TMIO_MMC_WRPROTECT_DISABLE ) | |
( sd_ctrl_read32 ( host , CTL_STATUS ) & TMIO_STAT_WRPROTECT ) ) ? 0 : 1 ;
2008-07-15 19:02:21 +04:00
}
2010-08-24 19:26:59 +04:00
static int tmio_mmc_get_cd ( struct mmc_host * mmc )
{
struct tmio_mmc_host * host = mmc_priv ( mmc ) ;
2011-02-18 06:07:29 +03:00
struct tmio_mmc_data * pdata = mfd_get_data ( host - > pdev ) ;
2010-08-24 19:26:59 +04:00
if ( ! pdata - > get_cd )
return - ENOSYS ;
else
return pdata - > get_cd ( host - > pdev ) ;
}
2010-05-19 22:34:22 +04:00
static const struct mmc_host_ops tmio_mmc_ops = {
2008-07-15 19:02:21 +04:00
. request = tmio_mmc_request ,
. set_ios = tmio_mmc_set_ios ,
. get_ro = tmio_mmc_get_ro ,
2010-08-24 19:26:59 +04:00
. get_cd = tmio_mmc_get_cd ,
2010-12-29 01:22:31 +03:00
. enable_sdio_irq = tmio_mmc_enable_sdio_irq ,
2008-07-15 19:02:21 +04:00
} ;
# ifdef CONFIG_PM
static int tmio_mmc_suspend ( struct platform_device * dev , pm_message_t state )
{
2011-02-18 06:07:13 +03:00
struct mfd_cell * cell = mfd_get_cell ( dev ) ;
2008-07-15 19:02:21 +04:00
struct mmc_host * mmc = platform_get_drvdata ( dev ) ;
int ret ;
2010-05-27 01:42:08 +04:00
ret = mmc_suspend_host ( mmc ) ;
2008-07-15 19:02:21 +04:00
/* Tell MFD core it can disable us now.*/
if ( ! ret & & cell - > disable )
cell - > disable ( dev ) ;
return ret ;
}
static int tmio_mmc_resume ( struct platform_device * dev )
{
2011-02-18 06:07:13 +03:00
struct mfd_cell * cell = mfd_get_cell ( dev ) ;
2008-07-15 19:02:21 +04:00
struct mmc_host * mmc = platform_get_drvdata ( dev ) ;
int ret = 0 ;
/* Tell the MFD core we are ready to be enabled */
2010-01-06 15:51:48 +03:00
if ( cell - > resume ) {
ret = cell - > resume ( dev ) ;
2008-07-15 19:02:21 +04:00
if ( ret )
goto out ;
}
mmc_resume_host ( mmc ) ;
out :
return ret ;
}
# else
# define tmio_mmc_suspend NULL
# define tmio_mmc_resume NULL
# endif
static int __devinit tmio_mmc_probe ( struct platform_device * dev )
{
2011-02-18 06:07:13 +03:00
struct mfd_cell * cell = mfd_get_cell ( dev ) ;
2009-06-04 22:12:31 +04:00
struct tmio_mmc_data * pdata ;
2010-01-06 15:51:48 +03:00
struct resource * res_ctl ;
2008-07-15 19:02:21 +04:00
struct tmio_mmc_host * host ;
struct mmc_host * mmc ;
2009-06-04 22:12:34 +04:00
int ret = - EINVAL ;
2010-05-19 22:34:22 +04:00
u32 irq_mask = TMIO_MASK_CMD ;
2008-07-15 19:02:21 +04:00
2010-01-06 15:51:48 +03:00
if ( dev - > num_resources ! = 2 )
2008-07-15 19:02:21 +04:00
goto out ;
res_ctl = platform_get_resource ( dev , IORESOURCE_MEM , 0 ) ;
2010-01-06 15:51:48 +03:00
if ( ! res_ctl )
2008-07-15 19:02:21 +04:00
goto out ;
2011-02-18 06:07:29 +03:00
pdata = mfd_get_data ( dev ) ;
2009-06-04 22:12:34 +04:00
if ( ! pdata | | ! pdata - > hclk )
2009-06-04 22:12:31 +04:00
goto out ;
2009-06-04 22:12:34 +04:00
ret = - ENOMEM ;
2009-06-04 22:12:31 +04:00
2008-07-15 19:02:21 +04:00
mmc = mmc_alloc_host ( sizeof ( struct tmio_mmc_host ) , & dev - > dev ) ;
if ( ! mmc )
goto out ;
host = mmc_priv ( mmc ) ;
host - > mmc = mmc ;
2010-01-06 15:51:48 +03:00
host - > pdev = dev ;
2008-07-15 19:02:21 +04:00
platform_set_drvdata ( dev , mmc ) ;
2010-01-06 15:51:48 +03:00
host - > set_pwr = pdata - > set_pwr ;
host - > set_clk_div = pdata - > set_clk_div ;
2009-06-04 22:12:32 +04:00
/* SD control register space size is 0x200, 0x400 for bus_shift=1 */
host - > bus_shift = resource_size ( res_ctl ) > > 10 ;
2009-03-11 15:58:54 +03:00
host - > ctl = ioremap ( res_ctl - > start , resource_size ( res_ctl ) ) ;
2008-07-15 19:02:21 +04:00
if ( ! host - > ctl )
goto host_free ;
mmc - > ops = & tmio_mmc_ops ;
2010-11-11 14:15:06 +03:00
mmc - > caps = MMC_CAP_4_BIT_DATA | pdata - > capabilities ;
2009-06-04 22:12:31 +04:00
mmc - > f_max = pdata - > hclk ;
mmc - > f_min = mmc - > f_max / 512 ;
2010-11-11 14:15:06 +03:00
mmc - > max_segs = 32 ;
mmc - > max_blk_size = 512 ;
mmc - > max_blk_count = ( PAGE_CACHE_SIZE / mmc - > max_blk_size ) *
mmc - > max_segs ;
mmc - > max_req_size = mmc - > max_blk_size * mmc - > max_blk_count ;
mmc - > max_seg_size = mmc - > max_req_size ;
2010-05-19 22:37:25 +04:00
if ( pdata - > ocr_mask )
mmc - > ocr_avail = pdata - > ocr_mask ;
else
mmc - > ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 ;
2008-07-15 19:02:21 +04:00
/* Tell the MFD core we are ready to be enabled */
if ( cell - > enable ) {
ret = cell - > enable ( dev ) ;
if ( ret )
2010-01-06 15:51:48 +03:00
goto unmap_ctl ;
2008-07-15 19:02:21 +04:00
}
tmio_mmc_clk_stop ( host ) ;
reset ( host ) ;
ret = platform_get_irq ( dev , 0 ) ;
if ( ret > = 0 )
host - > irq = ret ;
else
2010-02-17 10:38:23 +03:00
goto cell_disable ;
2008-07-15 19:02:21 +04:00
2009-06-04 22:12:32 +04:00
disable_mmc_irqs ( host , TMIO_MASK_ALL ) ;
2010-12-29 01:22:31 +03:00
if ( pdata - > flags & TMIO_MMC_SDIO_IRQ )
tmio_mmc_enable_sdio_irq ( mmc , 0 ) ;
2008-07-15 19:02:21 +04:00
2009-06-04 22:12:33 +04:00
ret = request_irq ( host - > irq , tmio_mmc_irq , IRQF_DISABLED |
2009-12-15 05:01:33 +03:00
IRQF_TRIGGER_FALLING , dev_name ( & dev - > dev ) , host ) ;
2008-07-15 19:02:21 +04:00
if ( ret )
2010-02-17 10:38:23 +03:00
goto cell_disable ;
2008-07-15 19:02:21 +04:00
2011-01-06 01:36:14 +03:00
spin_lock_init ( & host - > lock ) ;
/* Init delayed work for request timeouts */
INIT_DELAYED_WORK ( & host - > delayed_reset_work , tmio_mmc_reset_work ) ;
2010-05-19 22:34:22 +04:00
/* See if we also get DMA */
tmio_mmc_request_dma ( host , pdata ) ;
2008-07-15 19:02:21 +04:00
mmc_add_host ( mmc ) ;
2010-05-19 22:34:22 +04:00
pr_info ( " %s at 0x%08lx irq %d \n " , mmc_hostname ( host - > mmc ) ,
( unsigned long ) host - > ctl , host - > irq ) ;
2008-07-15 19:02:21 +04:00
/* Unmask the IRQs we want to know about */
2010-05-19 22:34:22 +04:00
if ( ! host - > chan_rx )
irq_mask | = TMIO_MASK_READOP ;
if ( ! host - > chan_tx )
irq_mask | = TMIO_MASK_WRITEOP ;
enable_mmc_irqs ( host , irq_mask ) ;
2008-07-15 19:02:21 +04:00
return 0 ;
2010-02-17 10:38:23 +03:00
cell_disable :
if ( cell - > disable )
cell - > disable ( dev ) ;
2008-07-15 19:02:21 +04:00
unmap_ctl :
iounmap ( host - > ctl ) ;
host_free :
mmc_free_host ( mmc ) ;
out :
return ret ;
}
static int __devexit tmio_mmc_remove ( struct platform_device * dev )
{
2011-02-18 06:07:13 +03:00
struct mfd_cell * cell = mfd_get_cell ( dev ) ;
2008-07-15 19:02:21 +04:00
struct mmc_host * mmc = platform_get_drvdata ( dev ) ;
platform_set_drvdata ( dev , NULL ) ;
if ( mmc ) {
struct tmio_mmc_host * host = mmc_priv ( mmc ) ;
mmc_remove_host ( mmc ) ;
2011-01-06 01:36:14 +03:00
cancel_delayed_work_sync ( & host - > delayed_reset_work ) ;
2010-05-19 22:34:22 +04:00
tmio_mmc_release_dma ( host ) ;
2008-07-15 19:02:21 +04:00
free_irq ( host - > irq , host ) ;
2010-02-17 10:38:23 +03:00
if ( cell - > disable )
cell - > disable ( dev ) ;
2008-07-15 19:02:21 +04:00
iounmap ( host - > ctl ) ;
2009-03-11 15:59:03 +03:00
mmc_free_host ( mmc ) ;
2008-07-15 19:02:21 +04:00
}
return 0 ;
}
/* ------------------- device registration ----------------------- */
static struct platform_driver tmio_mmc_driver = {
. driver = {
. name = " tmio-mmc " ,
. owner = THIS_MODULE ,
} ,
. probe = tmio_mmc_probe ,
. remove = __devexit_p ( tmio_mmc_remove ) ,
. suspend = tmio_mmc_suspend ,
. resume = tmio_mmc_resume ,
} ;
static int __init tmio_mmc_init ( void )
{
return platform_driver_register ( & tmio_mmc_driver ) ;
}
static void __exit tmio_mmc_exit ( void )
{
platform_driver_unregister ( & tmio_mmc_driver ) ;
}
module_init ( tmio_mmc_init ) ;
module_exit ( tmio_mmc_exit ) ;
MODULE_DESCRIPTION ( " Toshiba TMIO SD/MMC driver " ) ;
MODULE_AUTHOR ( " Ian Molton <spyro@f2s.com> " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_ALIAS ( " platform:tmio-mmc " ) ;