2008-07-15 19:02:21 +04:00
/*
* linux / drivers / mmc / tmio_mmc . c
*
* Copyright ( C ) 2004 Ian Molton
* Copyright ( C ) 2007 Ian Molton
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* Driver for the MMC / SD / SDIO cell found in :
*
2009-06-04 22:12:37 +04:00
* TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
2008-07-15 19:02:21 +04:00
*
* This driver draws mainly on scattered spec sheets , Reverse engineering
* of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver ( 4 bit
* support ) . ( Further 4 bit support from a later datasheet ) .
*
* TODO :
* Investigate using a workqueue for PIO transfers
* Eliminate FIXMEs
* SDIO support
* Better Power management
* Handle MMC errors better
* double buffer support
*
*/
# include <linux/module.h>
# include <linux/irq.h>
# include <linux/device.h>
# include <linux/delay.h>
2010-05-19 22:34:22 +04:00
# include <linux/dmaengine.h>
2008-07-15 19:02:21 +04:00
# include <linux/mmc/host.h>
# include <linux/mfd/core.h>
# include <linux/mfd/tmio.h>
# include "tmio_mmc.h"
static void tmio_mmc_set_clock ( struct tmio_mmc_host * host , int new_clock )
{
2009-06-13 00:53:05 +04:00
u32 clk = 0 , clock ;
2008-07-15 19:02:21 +04:00
if ( new_clock ) {
2009-06-13 00:53:05 +04:00
for ( clock = host - > mmc - > f_min , clk = 0x80000080 ;
new_clock > = ( clock < < 1 ) ; clk > > = 1 )
2008-07-15 19:02:21 +04:00
clock < < = 1 ;
clk | = 0x100 ;
}
2010-01-06 15:51:48 +03:00
if ( host - > set_clk_div )
host - > set_clk_div ( host - > pdev , ( clk > > 22 ) & 1 ) ;
2009-06-13 00:53:05 +04:00
sd_ctrl_write16 ( host , CTL_SD_CARD_CLK_CTL , clk & 0x1ff ) ;
2008-07-15 19:02:21 +04:00
}
static void tmio_mmc_clk_stop ( struct tmio_mmc_host * host )
{
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_CLK_AND_WAIT_CTL , 0x0000 ) ;
2008-07-15 19:02:21 +04:00
msleep ( 10 ) ;
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_SD_CARD_CLK_CTL , ~ 0x0100 &
sd_ctrl_read16 ( host , CTL_SD_CARD_CLK_CTL ) ) ;
2008-07-15 19:02:21 +04:00
msleep ( 10 ) ;
}
static void tmio_mmc_clk_start ( struct tmio_mmc_host * host )
{
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_SD_CARD_CLK_CTL , 0x0100 |
sd_ctrl_read16 ( host , CTL_SD_CARD_CLK_CTL ) ) ;
2008-07-15 19:02:21 +04:00
msleep ( 10 ) ;
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_CLK_AND_WAIT_CTL , 0x0100 ) ;
2008-07-15 19:02:21 +04:00
msleep ( 10 ) ;
}
static void reset ( struct tmio_mmc_host * host )
{
/* FIXME - should we set stop clock reg here */
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_RESET_SD , 0x0000 ) ;
sd_ctrl_write16 ( host , CTL_RESET_SDIO , 0x0000 ) ;
2008-07-15 19:02:21 +04:00
msleep ( 10 ) ;
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_RESET_SD , 0x0001 ) ;
sd_ctrl_write16 ( host , CTL_RESET_SDIO , 0x0001 ) ;
2008-07-15 19:02:21 +04:00
msleep ( 10 ) ;
}
static void
tmio_mmc_finish_request ( struct tmio_mmc_host * host )
{
struct mmc_request * mrq = host - > mrq ;
host - > mrq = NULL ;
host - > cmd = NULL ;
host - > data = NULL ;
mmc_request_done ( host - > mmc , mrq ) ;
}
/* These are the bitmasks the tmio chip requires to implement the MMC response
* types . Note that R1 and R6 are the same in this scheme . */
# define APP_CMD 0x0040
# define RESP_NONE 0x0300
# define RESP_R1 0x0400
# define RESP_R1B 0x0500
# define RESP_R2 0x0600
# define RESP_R3 0x0700
# define DATA_PRESENT 0x0800
# define TRANSFER_READ 0x1000
# define TRANSFER_MULTI 0x2000
# define SECURITY_CMD 0x4000
static int
tmio_mmc_start_command ( struct tmio_mmc_host * host , struct mmc_command * cmd )
{
struct mmc_data * data = host - > data ;
int c = cmd - > opcode ;
/* Command 12 is handled by hardware */
if ( cmd - > opcode = = 12 & & ! cmd - > arg ) {
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_STOP_INTERNAL_ACTION , 0x001 ) ;
2008-07-15 19:02:21 +04:00
return 0 ;
}
switch ( mmc_resp_type ( cmd ) ) {
case MMC_RSP_NONE : c | = RESP_NONE ; break ;
case MMC_RSP_R1 : c | = RESP_R1 ; break ;
case MMC_RSP_R1B : c | = RESP_R1B ; break ;
case MMC_RSP_R2 : c | = RESP_R2 ; break ;
case MMC_RSP_R3 : c | = RESP_R3 ; break ;
default :
pr_debug ( " Unknown response type %d \n " , mmc_resp_type ( cmd ) ) ;
return - EINVAL ;
}
host - > cmd = cmd ;
2010-05-19 22:34:22 +04:00
/* FIXME - this seems to be ok commented out but the spec suggest this bit
* should be set when issuing app commands .
2008-07-15 19:02:21 +04:00
* if ( cmd - > flags & MMC_FLAG_ACMD )
* c | = APP_CMD ;
*/
if ( data ) {
c | = DATA_PRESENT ;
if ( data - > blocks > 1 ) {
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_STOP_INTERNAL_ACTION , 0x100 ) ;
2008-07-15 19:02:21 +04:00
c | = TRANSFER_MULTI ;
}
if ( data - > flags & MMC_DATA_READ )
c | = TRANSFER_READ ;
}
2009-06-04 22:12:32 +04:00
enable_mmc_irqs ( host , TMIO_MASK_CMD ) ;
2008-07-15 19:02:21 +04:00
/* Fire off the command */
2009-06-04 22:12:32 +04:00
sd_ctrl_write32 ( host , CTL_ARG_REG , cmd - > arg ) ;
sd_ctrl_write16 ( host , CTL_SD_CMD , c ) ;
2008-07-15 19:02:21 +04:00
return 0 ;
}
2010-05-19 22:34:22 +04:00
/*
* This chip always returns ( at least ? ) as much data as you ask for .
2008-07-15 19:02:21 +04:00
* I ' m unsure what happens if you ask for less than a block . This should be
* looked into to ensure that a funny length read doesnt hose the controller .
*/
2010-05-19 22:34:22 +04:00
static void tmio_mmc_pio_irq ( struct tmio_mmc_host * host )
2008-07-15 19:02:21 +04:00
{
struct mmc_data * data = host - > data ;
unsigned short * buf ;
unsigned int count ;
unsigned long flags ;
if ( ! data ) {
pr_debug ( " Spurious PIO IRQ \n " ) ;
return ;
}
buf = ( unsigned short * ) ( tmio_mmc_kmap_atomic ( host , & flags ) +
host - > sg_off ) ;
count = host - > sg_ptr - > length - host - > sg_off ;
if ( count > data - > blksz )
count = data - > blksz ;
pr_debug ( " count: %08x offset: %08x flags %08x \n " ,
2010-05-19 22:34:22 +04:00
count , host - > sg_off , data - > flags ) ;
2008-07-15 19:02:21 +04:00
/* Transfer the data */
if ( data - > flags & MMC_DATA_READ )
2009-06-04 22:12:32 +04:00
sd_ctrl_read16_rep ( host , CTL_SD_DATA_PORT , buf , count > > 1 ) ;
2008-07-15 19:02:21 +04:00
else
2009-06-04 22:12:32 +04:00
sd_ctrl_write16_rep ( host , CTL_SD_DATA_PORT , buf , count > > 1 ) ;
2008-07-15 19:02:21 +04:00
host - > sg_off + = count ;
tmio_mmc_kunmap_atomic ( host , & flags ) ;
if ( host - > sg_off = = host - > sg_ptr - > length )
tmio_mmc_next_sg ( host ) ;
return ;
}
2010-05-19 22:34:22 +04:00
static void tmio_mmc_do_data_irq ( struct tmio_mmc_host * host )
2008-07-15 19:02:21 +04:00
{
struct mmc_data * data = host - > data ;
2008-12-16 18:13:09 +03:00
struct mmc_command * stop ;
2008-07-15 19:02:21 +04:00
host - > data = NULL ;
if ( ! data ) {
2010-05-19 22:34:22 +04:00
dev_warn ( & host - > pdev - > dev , " Spurious data end IRQ \n " ) ;
2008-07-15 19:02:21 +04:00
return ;
}
2008-12-16 18:13:09 +03:00
stop = data - > stop ;
2008-07-15 19:02:21 +04:00
/* FIXME - return correct transfer count on errors */
if ( ! data - > error )
data - > bytes_xfered = data - > blocks * data - > blksz ;
else
data - > bytes_xfered = 0 ;
pr_debug ( " Completed data request \n " ) ;
2010-05-19 22:34:22 +04:00
/*
* FIXME : other drivers allow an optional stop command of any given type
2008-07-15 19:02:21 +04:00
* which we dont do , as the chip can auto generate them .
* Perhaps we can be smarter about when to use auto CMD12 and
* only issue the auto request when we know this is the desired
* stop command , allowing fallback to the stop command the
* upper layers expect . For now , we do what works .
*/
2010-05-19 22:34:22 +04:00
if ( data - > flags & MMC_DATA_READ ) {
if ( ! host - > chan_rx )
disable_mmc_irqs ( host , TMIO_MASK_READOP ) ;
dev_dbg ( & host - > pdev - > dev , " Complete Rx request %p \n " ,
host - > mrq ) ;
} else {
if ( ! host - > chan_tx )
disable_mmc_irqs ( host , TMIO_MASK_WRITEOP ) ;
dev_dbg ( & host - > pdev - > dev , " Complete Tx request %p \n " ,
host - > mrq ) ;
}
2008-07-15 19:02:21 +04:00
if ( stop ) {
if ( stop - > opcode = = 12 & & ! stop - > arg )
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_STOP_INTERNAL_ACTION , 0x000 ) ;
2008-07-15 19:02:21 +04:00
else
BUG ( ) ;
}
tmio_mmc_finish_request ( host ) ;
}
2010-05-19 22:34:22 +04:00
static void tmio_mmc_data_irq ( struct tmio_mmc_host * host )
{
struct mmc_data * data = host - > data ;
if ( ! data )
return ;
if ( host - > chan_tx & & ( data - > flags & MMC_DATA_WRITE ) ) {
/*
* Has all data been written out yet ? Testing on SuperH showed ,
* that in most cases the first interrupt comes already with the
* BUSY status bit clear , but on some operations , like mount or
* in the beginning of a write / sync / umount , there is one
* DATAEND interrupt with the BUSY bit set , in this cases
* waiting for one more interrupt fixes the problem .
*/
if ( ! ( sd_ctrl_read32 ( host , CTL_STATUS ) & TMIO_STAT_CMD_BUSY ) ) {
disable_mmc_irqs ( host , TMIO_STAT_DATAEND ) ;
tasklet_schedule ( & host - > dma_complete ) ;
}
} else if ( host - > chan_rx & & ( data - > flags & MMC_DATA_READ ) ) {
disable_mmc_irqs ( host , TMIO_STAT_DATAEND ) ;
tasklet_schedule ( & host - > dma_complete ) ;
} else {
tmio_mmc_do_data_irq ( host ) ;
}
}
static void tmio_mmc_cmd_irq ( struct tmio_mmc_host * host ,
2008-07-15 19:02:21 +04:00
unsigned int stat )
{
struct mmc_command * cmd = host - > cmd ;
2009-06-04 22:12:32 +04:00
int i , addr ;
2008-07-15 19:02:21 +04:00
if ( ! host - > cmd ) {
pr_debug ( " Spurious CMD irq \n " ) ;
return ;
}
host - > cmd = NULL ;
/* This controller is sicker than the PXA one. Not only do we need to
* drop the top 8 bits of the first response word , we also need to
* modify the order of the response for short response command types .
*/
2009-06-04 22:12:32 +04:00
for ( i = 3 , addr = CTL_RESPONSE ; i > = 0 ; i - - , addr + = 4 )
cmd - > resp [ i ] = sd_ctrl_read32 ( host , addr ) ;
2008-07-15 19:02:21 +04:00
if ( cmd - > flags & MMC_RSP_136 ) {
cmd - > resp [ 0 ] = ( cmd - > resp [ 0 ] < < 8 ) | ( cmd - > resp [ 1 ] > > 24 ) ;
cmd - > resp [ 1 ] = ( cmd - > resp [ 1 ] < < 8 ) | ( cmd - > resp [ 2 ] > > 24 ) ;
cmd - > resp [ 2 ] = ( cmd - > resp [ 2 ] < < 8 ) | ( cmd - > resp [ 3 ] > > 24 ) ;
cmd - > resp [ 3 ] < < = 8 ;
} else if ( cmd - > flags & MMC_RSP_R3 ) {
cmd - > resp [ 0 ] = cmd - > resp [ 3 ] ;
}
if ( stat & TMIO_STAT_CMDTIMEOUT )
cmd - > error = - ETIMEDOUT ;
else if ( stat & TMIO_STAT_CRCFAIL & & cmd - > flags & MMC_RSP_CRC )
cmd - > error = - EILSEQ ;
/* If there is data to handle we enable data IRQs here, and
* we will ultimatley finish the request in the data_end handler .
* If theres no data or we encountered an error , finish now .
*/
if ( host - > data & & ! cmd - > error ) {
2010-05-19 22:34:22 +04:00
if ( host - > data - > flags & MMC_DATA_READ ) {
if ( ! host - > chan_rx )
enable_mmc_irqs ( host , TMIO_MASK_READOP ) ;
} else {
struct dma_chan * chan = host - > chan_tx ;
if ( ! chan )
enable_mmc_irqs ( host , TMIO_MASK_WRITEOP ) ;
else
tasklet_schedule ( & host - > dma_issue ) ;
}
2008-07-15 19:02:21 +04:00
} else {
tmio_mmc_finish_request ( host ) ;
}
return ;
}
static irqreturn_t tmio_mmc_irq ( int irq , void * devid )
{
struct tmio_mmc_host * host = devid ;
unsigned int ireg , irq_mask , status ;
pr_debug ( " MMC IRQ begin \n " ) ;
2009-06-04 22:12:32 +04:00
status = sd_ctrl_read32 ( host , CTL_STATUS ) ;
irq_mask = sd_ctrl_read32 ( host , CTL_IRQ_MASK ) ;
2008-07-15 19:02:21 +04:00
ireg = status & TMIO_MASK_IRQ & ~ irq_mask ;
pr_debug_status ( status ) ;
pr_debug_status ( ireg ) ;
if ( ! ireg ) {
2009-06-04 22:12:32 +04:00
disable_mmc_irqs ( host , status & ~ irq_mask ) ;
2008-07-15 19:02:21 +04:00
2010-05-19 22:34:22 +04:00
pr_warning ( " tmio_mmc: Spurious irq, disabling! "
2008-07-15 19:02:21 +04:00
" 0x%08x 0x%08x 0x%08x \n " , status , irq_mask , ireg ) ;
pr_debug_status ( status ) ;
goto out ;
}
while ( ireg ) {
/* Card insert / remove attempts */
if ( ireg & ( TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE ) ) {
2009-06-04 22:12:32 +04:00
ack_mmc_irqs ( host , TMIO_STAT_CARD_INSERT |
2008-07-15 19:02:21 +04:00
TMIO_STAT_CARD_REMOVE ) ;
2010-02-17 10:38:04 +03:00
mmc_detect_change ( host - > mmc , msecs_to_jiffies ( 100 ) ) ;
2008-07-15 19:02:21 +04:00
}
/* CRC and other errors */
/* if (ireg & TMIO_STAT_ERR_IRQ)
* handled | = tmio_error_irq ( host , irq , stat ) ;
*/
/* Command completion */
if ( ireg & TMIO_MASK_CMD ) {
2009-06-04 22:12:32 +04:00
ack_mmc_irqs ( host , TMIO_MASK_CMD ) ;
2008-07-15 19:02:21 +04:00
tmio_mmc_cmd_irq ( host , status ) ;
}
/* Data transfer */
if ( ireg & ( TMIO_STAT_RXRDY | TMIO_STAT_TXRQ ) ) {
2009-06-04 22:12:32 +04:00
ack_mmc_irqs ( host , TMIO_STAT_RXRDY | TMIO_STAT_TXRQ ) ;
2008-07-15 19:02:21 +04:00
tmio_mmc_pio_irq ( host ) ;
}
/* Data transfer completion */
if ( ireg & TMIO_STAT_DATAEND ) {
2009-06-04 22:12:32 +04:00
ack_mmc_irqs ( host , TMIO_STAT_DATAEND ) ;
2008-07-15 19:02:21 +04:00
tmio_mmc_data_irq ( host ) ;
}
/* Check status - keep going until we've handled it all */
2009-06-04 22:12:32 +04:00
status = sd_ctrl_read32 ( host , CTL_STATUS ) ;
irq_mask = sd_ctrl_read32 ( host , CTL_IRQ_MASK ) ;
2008-07-15 19:02:21 +04:00
ireg = status & TMIO_MASK_IRQ & ~ irq_mask ;
pr_debug ( " Status at end of loop: %08x \n " , status ) ;
pr_debug_status ( status ) ;
}
pr_debug ( " MMC IRQ end \n " ) ;
out :
return IRQ_HANDLED ;
}
2010-05-19 22:34:22 +04:00
# ifdef CONFIG_TMIO_MMC_DMA
static void tmio_mmc_enable_dma ( struct tmio_mmc_host * host , bool enable )
{
# if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
/* Switch DMA mode on or off - SuperH specific? */
sd_ctrl_write16 ( host , 0xd8 , enable ? 2 : 0 ) ;
# endif
}
static void tmio_dma_complete ( void * arg )
{
struct tmio_mmc_host * host = arg ;
dev_dbg ( & host - > pdev - > dev , " Command completed \n " ) ;
if ( ! host - > data )
dev_warn ( & host - > pdev - > dev , " NULL data in DMA completion! \n " ) ;
else
enable_mmc_irqs ( host , TMIO_STAT_DATAEND ) ;
}
static int tmio_mmc_start_dma_rx ( struct tmio_mmc_host * host )
{
struct scatterlist * sg = host - > sg_ptr ;
struct dma_async_tx_descriptor * desc = NULL ;
struct dma_chan * chan = host - > chan_rx ;
int ret ;
ret = dma_map_sg ( & host - > pdev - > dev , sg , host - > sg_len , DMA_FROM_DEVICE ) ;
if ( ret > 0 ) {
host - > dma_sglen = ret ;
desc = chan - > device - > device_prep_slave_sg ( chan , sg , ret ,
DMA_FROM_DEVICE , DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
}
if ( desc ) {
host - > desc = desc ;
desc - > callback = tmio_dma_complete ;
desc - > callback_param = host ;
host - > cookie = desc - > tx_submit ( desc ) ;
if ( host - > cookie < 0 ) {
host - > desc = NULL ;
ret = host - > cookie ;
} else {
chan - > device - > device_issue_pending ( chan ) ;
}
}
dev_dbg ( & host - > pdev - > dev , " %s(): mapped %d -> %d, cookie %d, rq %p \n " ,
__func__ , host - > sg_len , ret , host - > cookie , host - > mrq ) ;
if ( ! host - > desc ) {
/* DMA failed, fall back to PIO */
if ( ret > = 0 )
ret = - EIO ;
host - > chan_rx = NULL ;
dma_release_channel ( chan ) ;
/* Free the Tx channel too */
chan = host - > chan_tx ;
if ( chan ) {
host - > chan_tx = NULL ;
dma_release_channel ( chan ) ;
}
dev_warn ( & host - > pdev - > dev ,
" DMA failed: %d, falling back to PIO \n " , ret ) ;
tmio_mmc_enable_dma ( host , false ) ;
reset ( host ) ;
/* Fail this request, let above layers recover */
host - > mrq - > cmd - > error = ret ;
tmio_mmc_finish_request ( host ) ;
}
dev_dbg ( & host - > pdev - > dev , " %s(): desc %p, cookie %d, sg[%d] \n " , __func__ ,
desc , host - > cookie , host - > sg_len ) ;
return ret > 0 ? 0 : ret ;
}
static int tmio_mmc_start_dma_tx ( struct tmio_mmc_host * host )
{
struct scatterlist * sg = host - > sg_ptr ;
struct dma_async_tx_descriptor * desc = NULL ;
struct dma_chan * chan = host - > chan_tx ;
int ret ;
ret = dma_map_sg ( & host - > pdev - > dev , sg , host - > sg_len , DMA_TO_DEVICE ) ;
if ( ret > 0 ) {
host - > dma_sglen = ret ;
desc = chan - > device - > device_prep_slave_sg ( chan , sg , ret ,
DMA_TO_DEVICE , DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
}
if ( desc ) {
host - > desc = desc ;
desc - > callback = tmio_dma_complete ;
desc - > callback_param = host ;
host - > cookie = desc - > tx_submit ( desc ) ;
if ( host - > cookie < 0 ) {
host - > desc = NULL ;
ret = host - > cookie ;
}
}
dev_dbg ( & host - > pdev - > dev , " %s(): mapped %d -> %d, cookie %d, rq %p \n " ,
__func__ , host - > sg_len , ret , host - > cookie , host - > mrq ) ;
if ( ! host - > desc ) {
/* DMA failed, fall back to PIO */
if ( ret > = 0 )
ret = - EIO ;
host - > chan_tx = NULL ;
dma_release_channel ( chan ) ;
/* Free the Rx channel too */
chan = host - > chan_rx ;
if ( chan ) {
host - > chan_rx = NULL ;
dma_release_channel ( chan ) ;
}
dev_warn ( & host - > pdev - > dev ,
" DMA failed: %d, falling back to PIO \n " , ret ) ;
tmio_mmc_enable_dma ( host , false ) ;
reset ( host ) ;
/* Fail this request, let above layers recover */
host - > mrq - > cmd - > error = ret ;
tmio_mmc_finish_request ( host ) ;
}
dev_dbg ( & host - > pdev - > dev , " %s(): desc %p, cookie %d \n " , __func__ ,
desc , host - > cookie ) ;
return ret > 0 ? 0 : ret ;
}
static int tmio_mmc_start_dma ( struct tmio_mmc_host * host ,
struct mmc_data * data )
{
if ( data - > flags & MMC_DATA_READ ) {
if ( host - > chan_rx )
return tmio_mmc_start_dma_rx ( host ) ;
} else {
if ( host - > chan_tx )
return tmio_mmc_start_dma_tx ( host ) ;
}
return 0 ;
}
static void tmio_issue_tasklet_fn ( unsigned long priv )
{
struct tmio_mmc_host * host = ( struct tmio_mmc_host * ) priv ;
struct dma_chan * chan = host - > chan_tx ;
chan - > device - > device_issue_pending ( chan ) ;
}
static void tmio_tasklet_fn ( unsigned long arg )
{
struct tmio_mmc_host * host = ( struct tmio_mmc_host * ) arg ;
if ( host - > data - > flags & MMC_DATA_READ )
dma_unmap_sg ( & host - > pdev - > dev , host - > sg_ptr , host - > dma_sglen ,
DMA_FROM_DEVICE ) ;
else
dma_unmap_sg ( & host - > pdev - > dev , host - > sg_ptr , host - > dma_sglen ,
DMA_TO_DEVICE ) ;
tmio_mmc_do_data_irq ( host ) ;
}
/* It might be necessary to make filter MFD specific */
static bool tmio_mmc_filter ( struct dma_chan * chan , void * arg )
{
dev_dbg ( chan - > device - > dev , " %s: slave data %p \n " , __func__ , arg ) ;
chan - > private = arg ;
return true ;
}
static void tmio_mmc_request_dma ( struct tmio_mmc_host * host ,
struct tmio_mmc_data * pdata )
{
host - > cookie = - EINVAL ;
host - > desc = NULL ;
/* We can only either use DMA for both Tx and Rx or not use it at all */
if ( pdata - > dma ) {
dma_cap_mask_t mask ;
dma_cap_zero ( mask ) ;
dma_cap_set ( DMA_SLAVE , mask ) ;
host - > chan_tx = dma_request_channel ( mask , tmio_mmc_filter ,
pdata - > dma - > chan_priv_tx ) ;
dev_dbg ( & host - > pdev - > dev , " %s: TX: got channel %p \n " , __func__ ,
host - > chan_tx ) ;
if ( ! host - > chan_tx )
return ;
host - > chan_rx = dma_request_channel ( mask , tmio_mmc_filter ,
pdata - > dma - > chan_priv_rx ) ;
dev_dbg ( & host - > pdev - > dev , " %s: RX: got channel %p \n " , __func__ ,
host - > chan_rx ) ;
if ( ! host - > chan_rx ) {
dma_release_channel ( host - > chan_tx ) ;
host - > chan_tx = NULL ;
return ;
}
tasklet_init ( & host - > dma_complete , tmio_tasklet_fn , ( unsigned long ) host ) ;
tasklet_init ( & host - > dma_issue , tmio_issue_tasklet_fn , ( unsigned long ) host ) ;
tmio_mmc_enable_dma ( host , true ) ;
}
}
static void tmio_mmc_release_dma ( struct tmio_mmc_host * host )
{
if ( host - > chan_tx ) {
struct dma_chan * chan = host - > chan_tx ;
host - > chan_tx = NULL ;
dma_release_channel ( chan ) ;
}
if ( host - > chan_rx ) {
struct dma_chan * chan = host - > chan_rx ;
host - > chan_rx = NULL ;
dma_release_channel ( chan ) ;
}
host - > cookie = - EINVAL ;
host - > desc = NULL ;
}
# else
static int tmio_mmc_start_dma ( struct tmio_mmc_host * host ,
struct mmc_data * data )
{
return 0 ;
}
static void tmio_mmc_request_dma ( struct tmio_mmc_host * host ,
struct tmio_mmc_data * pdata )
{
host - > chan_tx = NULL ;
host - > chan_rx = NULL ;
}
static void tmio_mmc_release_dma ( struct tmio_mmc_host * host )
{
}
# endif
2008-07-15 19:02:21 +04:00
static int tmio_mmc_start_data ( struct tmio_mmc_host * host ,
struct mmc_data * data )
{
pr_debug ( " setup data transfer: blocksize %08x nr_blocks %d \n " ,
2010-05-19 22:34:22 +04:00
data - > blksz , data - > blocks ) ;
2008-07-15 19:02:21 +04:00
/* Hardware cannot perform 1 and 2 byte requests in 4 bit mode */
if ( data - > blksz < 4 & & host - > mmc - > ios . bus_width = = MMC_BUS_WIDTH_4 ) {
2010-05-19 22:34:22 +04:00
pr_err ( " %s: %d byte block unsupported in 4 bit mode \n " ,
mmc_hostname ( host - > mmc ) , data - > blksz ) ;
2008-07-15 19:02:21 +04:00
return - EINVAL ;
}
tmio_mmc_init_sg ( host , data ) ;
host - > data = data ;
/* Set transfer length / blocksize */
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_SD_XFER_LEN , data - > blksz ) ;
sd_ctrl_write16 ( host , CTL_XFER_BLK_COUNT , data - > blocks ) ;
2008-07-15 19:02:21 +04:00
2010-05-19 22:34:22 +04:00
return tmio_mmc_start_dma ( host , data ) ;
2008-07-15 19:02:21 +04:00
}
/* Process requests from the MMC layer */
static void tmio_mmc_request ( struct mmc_host * mmc , struct mmc_request * mrq )
{
struct tmio_mmc_host * host = mmc_priv ( mmc ) ;
int ret ;
if ( host - > mrq )
pr_debug ( " request not null \n " ) ;
host - > mrq = mrq ;
if ( mrq - > data ) {
ret = tmio_mmc_start_data ( host , mrq - > data ) ;
if ( ret )
goto fail ;
}
ret = tmio_mmc_start_command ( host , mrq - > cmd ) ;
if ( ! ret )
return ;
fail :
mrq - > cmd - > error = ret ;
mmc_request_done ( mmc , mrq ) ;
}
/* Set MMC clock / power.
* Note : This controller uses a simple divider scheme therefore it cannot
* run a MMC card at full speed ( 20 MHz ) . The max clock is 24 MHz on SD , but as
* MMC wont run that fast , it has to be clocked at 12 MHz which is the next
* slowest setting .
*/
static void tmio_mmc_set_ios ( struct mmc_host * mmc , struct mmc_ios * ios )
{
struct tmio_mmc_host * host = mmc_priv ( mmc ) ;
if ( ios - > clock )
tmio_mmc_set_clock ( host , ios - > clock ) ;
/* Power sequence - OFF -> ON -> UP */
switch ( ios - > power_mode ) {
case MMC_POWER_OFF : /* power down SD bus */
2010-01-06 15:51:48 +03:00
if ( host - > set_pwr )
host - > set_pwr ( host - > pdev , 0 ) ;
2008-07-15 19:02:21 +04:00
tmio_mmc_clk_stop ( host ) ;
break ;
case MMC_POWER_ON : /* power up SD bus */
2010-01-06 15:51:48 +03:00
if ( host - > set_pwr )
host - > set_pwr ( host - > pdev , 1 ) ;
2008-07-15 19:02:21 +04:00
break ;
case MMC_POWER_UP : /* start bus clock */
tmio_mmc_clk_start ( host ) ;
break ;
}
switch ( ios - > bus_width ) {
case MMC_BUS_WIDTH_1 :
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_SD_MEM_CARD_OPT , 0x80e0 ) ;
2008-07-15 19:02:21 +04:00
break ;
case MMC_BUS_WIDTH_4 :
2009-06-04 22:12:32 +04:00
sd_ctrl_write16 ( host , CTL_SD_MEM_CARD_OPT , 0x00e0 ) ;
2008-07-15 19:02:21 +04:00
break ;
}
/* Let things settle. delay taken from winCE driver */
udelay ( 140 ) ;
}
static int tmio_mmc_get_ro ( struct mmc_host * mmc )
{
struct tmio_mmc_host * host = mmc_priv ( mmc ) ;
2010-05-19 22:36:02 +04:00
struct mfd_cell * cell = host - > pdev - > dev . platform_data ;
struct tmio_mmc_data * pdata = cell - > driver_data ;
2008-07-15 19:02:21 +04:00
2010-05-19 22:36:02 +04:00
return ( ( pdata - > flags & TMIO_MMC_WRPROTECT_DISABLE ) | |
( sd_ctrl_read32 ( host , CTL_STATUS ) & TMIO_STAT_WRPROTECT ) ) ? 0 : 1 ;
2008-07-15 19:02:21 +04:00
}
2010-05-19 22:34:22 +04:00
static const struct mmc_host_ops tmio_mmc_ops = {
2008-07-15 19:02:21 +04:00
. request = tmio_mmc_request ,
. set_ios = tmio_mmc_set_ios ,
. get_ro = tmio_mmc_get_ro ,
} ;
# ifdef CONFIG_PM
static int tmio_mmc_suspend ( struct platform_device * dev , pm_message_t state )
{
struct mfd_cell * cell = ( struct mfd_cell * ) dev - > dev . platform_data ;
struct mmc_host * mmc = platform_get_drvdata ( dev ) ;
int ret ;
ret = mmc_suspend_host ( mmc , state ) ;
/* Tell MFD core it can disable us now.*/
if ( ! ret & & cell - > disable )
cell - > disable ( dev ) ;
return ret ;
}
static int tmio_mmc_resume ( struct platform_device * dev )
{
struct mfd_cell * cell = ( struct mfd_cell * ) dev - > dev . platform_data ;
struct mmc_host * mmc = platform_get_drvdata ( dev ) ;
int ret = 0 ;
/* Tell the MFD core we are ready to be enabled */
2010-01-06 15:51:48 +03:00
if ( cell - > resume ) {
ret = cell - > resume ( dev ) ;
2008-07-15 19:02:21 +04:00
if ( ret )
goto out ;
}
mmc_resume_host ( mmc ) ;
out :
return ret ;
}
# else
# define tmio_mmc_suspend NULL
# define tmio_mmc_resume NULL
# endif
static int __devinit tmio_mmc_probe ( struct platform_device * dev )
{
struct mfd_cell * cell = ( struct mfd_cell * ) dev - > dev . platform_data ;
2009-06-04 22:12:31 +04:00
struct tmio_mmc_data * pdata ;
2010-01-06 15:51:48 +03:00
struct resource * res_ctl ;
2008-07-15 19:02:21 +04:00
struct tmio_mmc_host * host ;
struct mmc_host * mmc ;
2009-06-04 22:12:34 +04:00
int ret = - EINVAL ;
2010-05-19 22:34:22 +04:00
u32 irq_mask = TMIO_MASK_CMD ;
2008-07-15 19:02:21 +04:00
2010-01-06 15:51:48 +03:00
if ( dev - > num_resources ! = 2 )
2008-07-15 19:02:21 +04:00
goto out ;
res_ctl = platform_get_resource ( dev , IORESOURCE_MEM , 0 ) ;
2010-01-06 15:51:48 +03:00
if ( ! res_ctl )
2008-07-15 19:02:21 +04:00
goto out ;
2009-06-04 22:12:31 +04:00
pdata = cell - > driver_data ;
2009-06-04 22:12:34 +04:00
if ( ! pdata | | ! pdata - > hclk )
2009-06-04 22:12:31 +04:00
goto out ;
2009-06-04 22:12:34 +04:00
ret = - ENOMEM ;
2009-06-04 22:12:31 +04:00
2008-07-15 19:02:21 +04:00
mmc = mmc_alloc_host ( sizeof ( struct tmio_mmc_host ) , & dev - > dev ) ;
if ( ! mmc )
goto out ;
host = mmc_priv ( mmc ) ;
host - > mmc = mmc ;
2010-01-06 15:51:48 +03:00
host - > pdev = dev ;
2008-07-15 19:02:21 +04:00
platform_set_drvdata ( dev , mmc ) ;
2010-01-06 15:51:48 +03:00
host - > set_pwr = pdata - > set_pwr ;
host - > set_clk_div = pdata - > set_clk_div ;
2009-06-04 22:12:32 +04:00
/* SD control register space size is 0x200, 0x400 for bus_shift=1 */
host - > bus_shift = resource_size ( res_ctl ) > > 10 ;
2009-03-11 15:58:54 +03:00
host - > ctl = ioremap ( res_ctl - > start , resource_size ( res_ctl ) ) ;
2008-07-15 19:02:21 +04:00
if ( ! host - > ctl )
goto host_free ;
mmc - > ops = & tmio_mmc_ops ;
mmc - > caps = MMC_CAP_4_BIT_DATA ;
2010-02-17 10:37:55 +03:00
mmc - > caps | = pdata - > capabilities ;
2009-06-04 22:12:31 +04:00
mmc - > f_max = pdata - > hclk ;
mmc - > f_min = mmc - > f_max / 512 ;
2010-05-19 22:37:25 +04:00
if ( pdata - > ocr_mask )
mmc - > ocr_avail = pdata - > ocr_mask ;
else
mmc - > ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 ;
2008-07-15 19:02:21 +04:00
/* Tell the MFD core we are ready to be enabled */
if ( cell - > enable ) {
ret = cell - > enable ( dev ) ;
if ( ret )
2010-01-06 15:51:48 +03:00
goto unmap_ctl ;
2008-07-15 19:02:21 +04:00
}
tmio_mmc_clk_stop ( host ) ;
reset ( host ) ;
ret = platform_get_irq ( dev , 0 ) ;
if ( ret > = 0 )
host - > irq = ret ;
else
2010-02-17 10:38:23 +03:00
goto cell_disable ;
2008-07-15 19:02:21 +04:00
2009-06-04 22:12:32 +04:00
disable_mmc_irqs ( host , TMIO_MASK_ALL ) ;
2008-07-15 19:02:21 +04:00
2009-06-04 22:12:33 +04:00
ret = request_irq ( host - > irq , tmio_mmc_irq , IRQF_DISABLED |
2009-12-15 05:01:33 +03:00
IRQF_TRIGGER_FALLING , dev_name ( & dev - > dev ) , host ) ;
2008-07-15 19:02:21 +04:00
if ( ret )
2010-02-17 10:38:23 +03:00
goto cell_disable ;
2008-07-15 19:02:21 +04:00
2010-05-19 22:34:22 +04:00
/* See if we also get DMA */
tmio_mmc_request_dma ( host , pdata ) ;
2008-07-15 19:02:21 +04:00
mmc_add_host ( mmc ) ;
2010-05-19 22:34:22 +04:00
pr_info ( " %s at 0x%08lx irq %d \n " , mmc_hostname ( host - > mmc ) ,
( unsigned long ) host - > ctl , host - > irq ) ;
2008-07-15 19:02:21 +04:00
/* Unmask the IRQs we want to know about */
2010-05-19 22:34:22 +04:00
if ( ! host - > chan_rx )
irq_mask | = TMIO_MASK_READOP ;
if ( ! host - > chan_tx )
irq_mask | = TMIO_MASK_WRITEOP ;
enable_mmc_irqs ( host , irq_mask ) ;
2008-07-15 19:02:21 +04:00
return 0 ;
2010-02-17 10:38:23 +03:00
cell_disable :
if ( cell - > disable )
cell - > disable ( dev ) ;
2008-07-15 19:02:21 +04:00
unmap_ctl :
iounmap ( host - > ctl ) ;
host_free :
mmc_free_host ( mmc ) ;
out :
return ret ;
}
static int __devexit tmio_mmc_remove ( struct platform_device * dev )
{
2010-02-17 10:38:23 +03:00
struct mfd_cell * cell = ( struct mfd_cell * ) dev - > dev . platform_data ;
2008-07-15 19:02:21 +04:00
struct mmc_host * mmc = platform_get_drvdata ( dev ) ;
platform_set_drvdata ( dev , NULL ) ;
if ( mmc ) {
struct tmio_mmc_host * host = mmc_priv ( mmc ) ;
mmc_remove_host ( mmc ) ;
2010-05-19 22:34:22 +04:00
tmio_mmc_release_dma ( host ) ;
2008-07-15 19:02:21 +04:00
free_irq ( host - > irq , host ) ;
2010-02-17 10:38:23 +03:00
if ( cell - > disable )
cell - > disable ( dev ) ;
2008-07-15 19:02:21 +04:00
iounmap ( host - > ctl ) ;
2009-03-11 15:59:03 +03:00
mmc_free_host ( mmc ) ;
2008-07-15 19:02:21 +04:00
}
return 0 ;
}
/* ------------------- device registration ----------------------- */
static struct platform_driver tmio_mmc_driver = {
. driver = {
. name = " tmio-mmc " ,
. owner = THIS_MODULE ,
} ,
. probe = tmio_mmc_probe ,
. remove = __devexit_p ( tmio_mmc_remove ) ,
. suspend = tmio_mmc_suspend ,
. resume = tmio_mmc_resume ,
} ;
static int __init tmio_mmc_init ( void )
{
return platform_driver_register ( & tmio_mmc_driver ) ;
}
static void __exit tmio_mmc_exit ( void )
{
platform_driver_unregister ( & tmio_mmc_driver ) ;
}
module_init ( tmio_mmc_init ) ;
module_exit ( tmio_mmc_exit ) ;
MODULE_DESCRIPTION ( " Toshiba TMIO SD/MMC driver " ) ;
MODULE_AUTHOR ( " Ian Molton <spyro@f2s.com> " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_ALIAS ( " platform:tmio-mmc " ) ;