2011-03-23 12:42:44 +01:00
/*
* linux / drivers / mmc / host / tmio_mmc_pio . c
*
* Copyright ( C ) 2011 Guennadi Liakhovetski
* Copyright ( C ) 2007 Ian Molton
* Copyright ( C ) 2004 Ian Molton
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* Driver for the MMC / SD / SDIO IP found in :
*
* TC6393XB , TC6391XB , TC6387XB , T7L66XB , ASIC3 , SH - Mobile SoCs
*
* This driver draws mainly on scattered spec sheets , Reverse engineering
* of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver ( 4 bit
* support ) . ( Further 4 bit support from a later datasheet ) .
*
* TODO :
* Investigate using a workqueue for PIO transfers
* Eliminate FIXMEs
* SDIO support
* Better Power management
* Handle MMC errors better
* double buffer support
*
*/
# include <linux/delay.h>
# include <linux/device.h>
# include <linux/highmem.h>
# include <linux/interrupt.h>
# include <linux/io.h>
# include <linux/irq.h>
# include <linux/mfd/tmio.h>
# include <linux/mmc/host.h>
2012-06-20 19:10:35 +02:00
# include <linux/mmc/mmc.h>
2012-04-30 23:31:57 +02:00
# include <linux/mmc/slot-gpio.h>
2011-03-24 09:48:36 +01:00
# include <linux/mmc/tmio.h>
2011-03-23 12:42:44 +01:00
# include <linux/module.h>
# include <linux/pagemap.h>
# include <linux/platform_device.h>
2012-03-13 01:01:51 +01:00
# include <linux/pm_qos.h>
2011-05-05 16:13:12 +00:00
# include <linux/pm_runtime.h>
2013-02-15 16:14:00 +01:00
# include <linux/regulator/consumer.h>
2014-08-24 20:00:25 -07:00
# include <linux/mmc/sdio.h>
2011-03-23 12:42:44 +01:00
# include <linux/scatterlist.h>
# include <linux/spinlock.h>
2012-01-06 13:06:51 +01:00
# include <linux/workqueue.h>
2011-03-23 12:42:44 +01:00
# include "tmio_mmc.h"
void tmio_mmc_enable_mmc_irqs ( struct tmio_mmc_host * host , u32 i )
{
2011-08-25 10:27:25 +09:00
host - > sdcard_irq_mask & = ~ ( i & TMIO_MASK_IRQ ) ;
sd_ctrl_write32 ( host , CTL_IRQ_MASK , host - > sdcard_irq_mask ) ;
2011-03-23 12:42:44 +01:00
}
void tmio_mmc_disable_mmc_irqs ( struct tmio_mmc_host * host , u32 i )
{
2011-08-25 10:27:25 +09:00
host - > sdcard_irq_mask | = ( i & TMIO_MASK_IRQ ) ;
sd_ctrl_write32 ( host , CTL_IRQ_MASK , host - > sdcard_irq_mask ) ;
2011-03-23 12:42:44 +01:00
}
static void tmio_mmc_ack_mmc_irqs ( struct tmio_mmc_host * host , u32 i )
{
sd_ctrl_write32 ( host , CTL_STATUS , ~ i ) ;
}
static void tmio_mmc_init_sg ( struct tmio_mmc_host * host , struct mmc_data * data )
{
host - > sg_len = data - > sg_len ;
host - > sg_ptr = data - > sg ;
host - > sg_orig = data - > sg ;
host - > sg_off = 0 ;
}
static int tmio_mmc_next_sg ( struct tmio_mmc_host * host )
{
host - > sg_ptr = sg_next ( host - > sg_ptr ) ;
host - > sg_off = 0 ;
return - - host - > sg_len ;
}
# ifdef CONFIG_MMC_DEBUG
# define STATUS_TO_TEXT(a, status, i) \
do { \
if ( status & TMIO_STAT_ # # a ) { \
if ( i + + ) \
printk ( " | " ) ; \
printk ( # a ) ; \
} \
} while ( 0 )
static void pr_debug_status ( u32 status )
{
int i = 0 ;
2011-10-11 11:44:09 +05:30
pr_debug ( " status: %08x = " , status ) ;
2011-03-23 12:42:44 +01:00
STATUS_TO_TEXT ( CARD_REMOVE , status , i ) ;
STATUS_TO_TEXT ( CARD_INSERT , status , i ) ;
STATUS_TO_TEXT ( SIGSTATE , status , i ) ;
STATUS_TO_TEXT ( WRPROTECT , status , i ) ;
STATUS_TO_TEXT ( CARD_REMOVE_A , status , i ) ;
STATUS_TO_TEXT ( CARD_INSERT_A , status , i ) ;
STATUS_TO_TEXT ( SIGSTATE_A , status , i ) ;
STATUS_TO_TEXT ( CMD_IDX_ERR , status , i ) ;
STATUS_TO_TEXT ( STOPBIT_ERR , status , i ) ;
STATUS_TO_TEXT ( ILL_FUNC , status , i ) ;
STATUS_TO_TEXT ( CMD_BUSY , status , i ) ;
STATUS_TO_TEXT ( CMDRESPEND , status , i ) ;
STATUS_TO_TEXT ( DATAEND , status , i ) ;
STATUS_TO_TEXT ( CRCFAIL , status , i ) ;
STATUS_TO_TEXT ( DATATIMEOUT , status , i ) ;
STATUS_TO_TEXT ( CMDTIMEOUT , status , i ) ;
STATUS_TO_TEXT ( RXOVERFLOW , status , i ) ;
STATUS_TO_TEXT ( TXUNDERRUN , status , i ) ;
STATUS_TO_TEXT ( RXRDY , status , i ) ;
STATUS_TO_TEXT ( TXRQ , status , i ) ;
STATUS_TO_TEXT ( ILL_ACCESS , status , i ) ;
printk ( " \n " ) ;
}
# else
# define pr_debug_status(s) do { } while (0)
# endif
static void tmio_mmc_enable_sdio_irq ( struct mmc_host * mmc , int enable )
{
struct tmio_mmc_host * host = mmc_priv ( mmc ) ;
2013-10-24 15:58:45 +02:00
if ( enable & & ! host - > sdio_irq_enabled ) {
/* Keep device active while SDIO irq is enabled */
pm_runtime_get_sync ( mmc_dev ( mmc ) ) ;
host - > sdio_irq_enabled = true ;
2011-08-25 10:27:25 +09:00
host - > sdio_irq_mask = TMIO_SDIO_MASK_ALL &
~ TMIO_SDIO_STAT_IOIRQ ;
2011-03-23 12:42:44 +01:00
sd_ctrl_write16 ( host , CTL_TRANSACTION_CTL , 0x0001 ) ;
2011-08-25 10:27:25 +09:00
sd_ctrl_write16 ( host , CTL_SDIO_IRQ_MASK , host - > sdio_irq_mask ) ;
2013-10-24 15:58:45 +02:00
} else if ( ! enable & & host - > sdio_irq_enabled ) {
2011-08-25 10:27:25 +09:00
host - > sdio_irq_mask = TMIO_SDIO_MASK_ALL ;
sd_ctrl_write16 ( host , CTL_SDIO_IRQ_MASK , host - > sdio_irq_mask ) ;
2011-03-23 12:42:44 +01:00
sd_ctrl_write16 ( host , CTL_TRANSACTION_CTL , 0x0000 ) ;
2013-10-24 15:58:45 +02:00
host - > sdio_irq_enabled = false ;
2013-10-24 16:42:33 +02:00
pm_runtime_mark_last_busy ( mmc_dev ( mmc ) ) ;
pm_runtime_put_autosuspend ( mmc_dev ( mmc ) ) ;
2011-03-23 12:42:44 +01:00
}
}
2013-10-30 00:16:17 +01:00
static void tmio_mmc_set_clock ( struct tmio_mmc_host * host ,
unsigned int new_clock )
2011-03-23 12:42:44 +01:00
{
u32 clk = 0 , clock ;
if ( new_clock ) {
for ( clock = host - > mmc - > f_min , clk = 0x80000080 ;
new_clock > = ( clock < < 1 ) ; clk > > = 1 )
clock < < = 1 ;
2014-08-24 20:03:00 -07:00
/* 1/1 clock is option */
if ( ( host - > pdata - > flags & TMIO_MMC_CLK_ACTUAL ) & &
( ( clk > > 22 ) & 0x1 ) )
clk | = 0xff ;
2011-03-23 12:42:44 +01:00
}
if ( host - > set_clk_div )
host - > set_clk_div ( host - > pdev , ( clk > > 22 ) & 1 ) ;
sd_ctrl_write16 ( host , CTL_SD_CARD_CLK_CTL , clk & 0x1ff ) ;
2013-02-15 16:14:00 +01:00
msleep ( 10 ) ;
2011-03-23 12:42:44 +01:00
}
static void tmio_mmc_clk_stop ( struct tmio_mmc_host * host )
{
2011-03-09 17:28:55 +01:00
/* implicit BUG_ON(!res) */
2013-11-20 00:31:06 -08:00
if ( host - > pdata - > flags & TMIO_MMC_HAVE_HIGH_REG ) {
2011-03-09 17:28:55 +01:00
sd_ctrl_write16 ( host , CTL_CLK_AND_WAIT_CTL , 0x0000 ) ;
msleep ( 10 ) ;
}
2011-03-10 18:43:07 +01:00
2011-03-23 12:42:44 +01:00
sd_ctrl_write16 ( host , CTL_SD_CARD_CLK_CTL , ~ 0x0100 &
sd_ctrl_read16 ( host , CTL_SD_CARD_CLK_CTL ) ) ;
msleep ( 10 ) ;
}
static void tmio_mmc_clk_start ( struct tmio_mmc_host * host )
{
sd_ctrl_write16 ( host , CTL_SD_CARD_CLK_CTL , 0x0100 |
sd_ctrl_read16 ( host , CTL_SD_CARD_CLK_CTL ) ) ;
msleep ( 10 ) ;
2011-03-10 18:43:07 +01:00
2011-03-09 17:28:55 +01:00
/* implicit BUG_ON(!res) */
2013-11-20 00:31:06 -08:00
if ( host - > pdata - > flags & TMIO_MMC_HAVE_HIGH_REG ) {
2011-03-09 17:28:55 +01:00
sd_ctrl_write16 ( host , CTL_CLK_AND_WAIT_CTL , 0x0100 ) ;
msleep ( 10 ) ;
}
2011-03-23 12:42:44 +01:00
}
static void tmio_mmc_reset ( struct tmio_mmc_host * host )
{
/* FIXME - should we set stop clock reg here */
sd_ctrl_write16 ( host , CTL_RESET_SD , 0x0000 ) ;
2011-03-09 17:28:55 +01:00
/* implicit BUG_ON(!res) */
2013-11-20 00:31:06 -08:00
if ( host - > pdata - > flags & TMIO_MMC_HAVE_HIGH_REG )
2011-03-09 17:28:55 +01:00
sd_ctrl_write16 ( host , CTL_RESET_SDIO , 0x0000 ) ;
2011-03-23 12:42:44 +01:00
msleep ( 10 ) ;
sd_ctrl_write16 ( host , CTL_RESET_SD , 0x0001 ) ;
2013-11-20 00:31:06 -08:00
if ( host - > pdata - > flags & TMIO_MMC_HAVE_HIGH_REG )
2011-03-09 17:28:55 +01:00
sd_ctrl_write16 ( host , CTL_RESET_SDIO , 0x0001 ) ;
2011-03-23 12:42:44 +01:00
msleep ( 10 ) ;
}
static void tmio_mmc_reset_work ( struct work_struct * work )
{
struct tmio_mmc_host * host = container_of ( work , struct tmio_mmc_host ,
delayed_reset_work . work ) ;
struct mmc_request * mrq ;
unsigned long flags ;
spin_lock_irqsave ( & host - > lock , flags ) ;
mrq = host - > mrq ;
2011-04-21 07:20:16 +00:00
/*
* is request already finished ? Since we use a non - blocking
* cancel_delayed_work ( ) , it can happen , that a . set_ios ( ) call preempts
* us , so , have to check for IS_ERR ( host - > mrq )
*/
if ( IS_ERR_OR_NULL ( mrq )
2011-03-23 12:42:44 +01:00
| | time_is_after_jiffies ( host - > last_req_ts +
msecs_to_jiffies ( 2000 ) ) ) {
spin_unlock_irqrestore ( & host - > lock , flags ) ;
return ;
}
dev_warn ( & host - > pdev - > dev ,
" timeout waiting for hardware interrupt (CMD%u) \n " ,
mrq - > cmd - > opcode ) ;
if ( host - > data )
host - > data - > error = - ETIMEDOUT ;
else if ( host - > cmd )
host - > cmd - > error = - ETIMEDOUT ;
else
mrq - > cmd - > error = - ETIMEDOUT ;
host - > cmd = NULL ;
host - > data = NULL ;
host - > force_pio = false ;
spin_unlock_irqrestore ( & host - > lock , flags ) ;
tmio_mmc_reset ( host ) ;
2011-04-21 07:20:16 +00:00
/* Ready for new calls */
host - > mrq = NULL ;
2012-01-06 13:06:51 +01:00
tmio_mmc_abort_dma ( host ) ;
2011-03-23 12:42:44 +01:00
mmc_request_done ( host - > mmc , mrq ) ;
2013-10-24 16:42:33 +02:00
pm_runtime_mark_last_busy ( mmc_dev ( host - > mmc ) ) ;
pm_runtime_put_autosuspend ( mmc_dev ( host - > mmc ) ) ;
2011-03-23 12:42:44 +01:00
}
2011-04-21 07:20:16 +00:00
/* called with host->lock held, interrupts disabled */
2011-03-23 12:42:44 +01:00
static void tmio_mmc_finish_request ( struct tmio_mmc_host * host )
{
2011-07-14 12:12:38 +02:00
struct mmc_request * mrq ;
unsigned long flags ;
2011-03-23 12:42:44 +01:00
2011-07-14 12:12:38 +02:00
spin_lock_irqsave ( & host - > lock , flags ) ;
mrq = host - > mrq ;
if ( IS_ERR_OR_NULL ( mrq ) ) {
spin_unlock_irqrestore ( & host - > lock , flags ) ;
2011-03-23 12:42:44 +01:00
return ;
2011-07-14 12:12:38 +02:00
}
2011-03-23 12:42:44 +01:00
host - > cmd = NULL ;
host - > data = NULL ;
host - > force_pio = false ;
cancel_delayed_work ( & host - > delayed_reset_work ) ;
2011-04-21 07:20:16 +00:00
host - > mrq = NULL ;
2011-07-14 12:12:38 +02:00
spin_unlock_irqrestore ( & host - > lock , flags ) ;
2011-04-21 07:20:16 +00:00
2012-01-06 13:06:51 +01:00
if ( mrq - > cmd - > error | | ( mrq - > data & & mrq - > data - > error ) )
tmio_mmc_abort_dma ( host ) ;
2011-03-23 12:42:44 +01:00
mmc_request_done ( host - > mmc , mrq ) ;
2013-10-24 16:42:33 +02:00
pm_runtime_mark_last_busy ( mmc_dev ( host - > mmc ) ) ;
pm_runtime_put_autosuspend ( mmc_dev ( host - > mmc ) ) ;
2011-03-23 12:42:44 +01:00
}
2011-07-14 12:12:38 +02:00
static void tmio_mmc_done_work ( struct work_struct * work )
{
struct tmio_mmc_host * host = container_of ( work , struct tmio_mmc_host ,
done ) ;
tmio_mmc_finish_request ( host ) ;
}
2011-03-23 12:42:44 +01:00
/* These are the bitmasks the tmio chip requires to implement the MMC response
* types . Note that R1 and R6 are the same in this scheme . */
# define APP_CMD 0x0040
# define RESP_NONE 0x0300
# define RESP_R1 0x0400
# define RESP_R1B 0x0500
# define RESP_R2 0x0600
# define RESP_R3 0x0700
# define DATA_PRESENT 0x0800
# define TRANSFER_READ 0x1000
# define TRANSFER_MULTI 0x2000
# define SECURITY_CMD 0x4000
2014-08-24 20:00:25 -07:00
# define NO_CMD12_ISSUE 0x4000 /* TMIO_MMC_HAVE_CMD12_CTRL */
2011-03-23 12:42:44 +01:00
static int tmio_mmc_start_command ( struct tmio_mmc_host * host , struct mmc_command * cmd )
{
struct mmc_data * data = host - > data ;
int c = cmd - > opcode ;
2012-02-22 13:16:09 +01:00
u32 irq_mask = TMIO_MASK_CMD ;
2011-03-23 12:42:44 +01:00
2012-06-20 19:10:35 +02:00
/* CMD12 is handled by hardware */
if ( cmd - > opcode = = MMC_STOP_TRANSMISSION & & ! cmd - > arg ) {
2011-03-23 12:42:44 +01:00
sd_ctrl_write16 ( host , CTL_STOP_INTERNAL_ACTION , 0x001 ) ;
return 0 ;
}
switch ( mmc_resp_type ( cmd ) ) {
case MMC_RSP_NONE : c | = RESP_NONE ; break ;
case MMC_RSP_R1 : c | = RESP_R1 ; break ;
case MMC_RSP_R1B : c | = RESP_R1B ; break ;
case MMC_RSP_R2 : c | = RESP_R2 ; break ;
case MMC_RSP_R3 : c | = RESP_R3 ; break ;
default :
pr_debug ( " Unknown response type %d \n " , mmc_resp_type ( cmd ) ) ;
return - EINVAL ;
}
host - > cmd = cmd ;
/* FIXME - this seems to be ok commented out but the spec suggest this bit
* should be set when issuing app commands .
* if ( cmd - > flags & MMC_FLAG_ACMD )
* c | = APP_CMD ;
*/
if ( data ) {
c | = DATA_PRESENT ;
if ( data - > blocks > 1 ) {
sd_ctrl_write16 ( host , CTL_STOP_INTERNAL_ACTION , 0x100 ) ;
c | = TRANSFER_MULTI ;
2014-08-24 20:00:25 -07:00
/*
* Disable auto CMD12 at IO_RW_EXTENDED when
* multiple block transfer
*/
if ( ( host - > pdata - > flags & TMIO_MMC_HAVE_CMD12_CTRL ) & &
( cmd - > opcode = = SD_IO_RW_EXTENDED ) )
c | = NO_CMD12_ISSUE ;
2011-03-23 12:42:44 +01:00
}
if ( data - > flags & MMC_DATA_READ )
c | = TRANSFER_READ ;
}
2012-02-22 13:16:09 +01:00
if ( ! host - > native_hotplug )
irq_mask & = ~ ( TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT ) ;
tmio_mmc_enable_mmc_irqs ( host , irq_mask ) ;
2011-03-23 12:42:44 +01:00
/* Fire off the command */
sd_ctrl_write32 ( host , CTL_ARG_REG , cmd - > arg ) ;
sd_ctrl_write16 ( host , CTL_SD_CMD , c ) ;
return 0 ;
}
2014-09-10 00:23:24 -07:00
static void tmio_mmc_transfer_data ( struct tmio_mmc_host * host ,
unsigned short * buf ,
unsigned int count )
{
int is_read = host - > data - > flags & MMC_DATA_READ ;
u8 * buf8 ;
/*
* Transfer the data
*/
if ( is_read )
sd_ctrl_read16_rep ( host , CTL_SD_DATA_PORT , buf , count > > 1 ) ;
else
sd_ctrl_write16_rep ( host , CTL_SD_DATA_PORT , buf , count > > 1 ) ;
/* if count was even number */
if ( ! ( count & 0x1 ) )
return ;
/* if count was odd number */
buf8 = ( u8 * ) ( buf + ( count > > 1 ) ) ;
/*
* FIXME
*
* driver and this function are assuming that
* it is used as little endian
*/
if ( is_read )
* buf8 = sd_ctrl_read16 ( host , CTL_SD_DATA_PORT ) & 0xff ;
else
sd_ctrl_write16 ( host , CTL_SD_DATA_PORT , * buf8 ) ;
}
2011-03-23 12:42:44 +01:00
/*
* This chip always returns ( at least ? ) as much data as you ask for .
* I ' m unsure what happens if you ask for less than a block . This should be
2011-03-30 22:57:33 -03:00
* looked into to ensure that a funny length read doesn ' t hose the controller .
2011-03-23 12:42:44 +01:00
*/
static void tmio_mmc_pio_irq ( struct tmio_mmc_host * host )
{
struct mmc_data * data = host - > data ;
void * sg_virt ;
unsigned short * buf ;
unsigned int count ;
unsigned long flags ;
if ( ( host - > chan_tx | | host - > chan_rx ) & & ! host - > force_pio ) {
pr_err ( " PIO IRQ in DMA mode! \n " ) ;
return ;
} else if ( ! data ) {
pr_debug ( " Spurious PIO IRQ \n " ) ;
return ;
}
sg_virt = tmio_mmc_kmap_atomic ( host - > sg_ptr , & flags ) ;
buf = ( unsigned short * ) ( sg_virt + host - > sg_off ) ;
count = host - > sg_ptr - > length - host - > sg_off ;
if ( count > data - > blksz )
count = data - > blksz ;
pr_debug ( " count: %08x offset: %08x flags %08x \n " ,
count , host - > sg_off , data - > flags ) ;
/* Transfer the data */
2014-09-10 00:23:24 -07:00
tmio_mmc_transfer_data ( host , buf , count ) ;
2011-03-23 12:42:44 +01:00
host - > sg_off + = count ;
tmio_mmc_kunmap_atomic ( host - > sg_ptr , & flags , sg_virt ) ;
if ( host - > sg_off = = host - > sg_ptr - > length )
tmio_mmc_next_sg ( host ) ;
return ;
}
static void tmio_mmc_check_bounce_buffer ( struct tmio_mmc_host * host )
{
if ( host - > sg_ptr = = & host - > bounce_sg ) {
unsigned long flags ;
void * sg_vaddr = tmio_mmc_kmap_atomic ( host - > sg_orig , & flags ) ;
memcpy ( sg_vaddr , host - > bounce_buf , host - > bounce_sg . length ) ;
tmio_mmc_kunmap_atomic ( host - > sg_orig , & flags , sg_vaddr ) ;
}
}
/* needs to be called with host->lock held */
void tmio_mmc_do_data_irq ( struct tmio_mmc_host * host )
{
struct mmc_data * data = host - > data ;
struct mmc_command * stop ;
host - > data = NULL ;
if ( ! data ) {
dev_warn ( & host - > pdev - > dev , " Spurious data end IRQ \n " ) ;
return ;
}
stop = data - > stop ;
/* FIXME - return correct transfer count on errors */
if ( ! data - > error )
data - > bytes_xfered = data - > blocks * data - > blksz ;
else
data - > bytes_xfered = 0 ;
pr_debug ( " Completed data request \n " ) ;
/*
* FIXME : other drivers allow an optional stop command of any given type
* which we dont do , as the chip can auto generate them .
* Perhaps we can be smarter about when to use auto CMD12 and
* only issue the auto request when we know this is the desired
* stop command , allowing fallback to the stop command the
* upper layers expect . For now , we do what works .
*/
if ( data - > flags & MMC_DATA_READ ) {
if ( host - > chan_rx & & ! host - > force_pio )
tmio_mmc_check_bounce_buffer ( host ) ;
dev_dbg ( & host - > pdev - > dev , " Complete Rx request %p \n " ,
host - > mrq ) ;
} else {
dev_dbg ( & host - > pdev - > dev , " Complete Tx request %p \n " ,
host - > mrq ) ;
}
if ( stop ) {
2012-06-20 19:10:35 +02:00
if ( stop - > opcode = = MMC_STOP_TRANSMISSION & & ! stop - > arg )
2011-03-23 12:42:44 +01:00
sd_ctrl_write16 ( host , CTL_STOP_INTERNAL_ACTION , 0x000 ) ;
else
BUG ( ) ;
}
2011-07-14 12:12:38 +02:00
schedule_work ( & host - > done ) ;
2011-03-23 12:42:44 +01:00
}
static void tmio_mmc_data_irq ( struct tmio_mmc_host * host )
{
struct mmc_data * data ;
spin_lock ( & host - > lock ) ;
data = host - > data ;
if ( ! data )
goto out ;
if ( host - > chan_tx & & ( data - > flags & MMC_DATA_WRITE ) & & ! host - > force_pio ) {
2014-08-24 20:01:32 -07:00
u32 status = sd_ctrl_read32 ( host , CTL_STATUS ) ;
bool done = false ;
2011-03-23 12:42:44 +01:00
/*
* Has all data been written out yet ? Testing on SuperH showed ,
* that in most cases the first interrupt comes already with the
* BUSY status bit clear , but on some operations , like mount or
* in the beginning of a write / sync / umount , there is one
* DATAEND interrupt with the BUSY bit set , in this cases
* waiting for one more interrupt fixes the problem .
*/
2014-08-24 20:01:32 -07:00
if ( host - > pdata - > flags & TMIO_MMC_HAS_IDLE_WAIT ) {
if ( status & TMIO_STAT_ILL_FUNC )
done = true ;
} else {
if ( ! ( status & TMIO_STAT_CMD_BUSY ) )
done = true ;
}
if ( done ) {
2011-03-23 12:42:44 +01:00
tmio_mmc_disable_mmc_irqs ( host , TMIO_STAT_DATAEND ) ;
tasklet_schedule ( & host - > dma_complete ) ;
}
} else if ( host - > chan_rx & & ( data - > flags & MMC_DATA_READ ) & & ! host - > force_pio ) {
tmio_mmc_disable_mmc_irqs ( host , TMIO_STAT_DATAEND ) ;
tasklet_schedule ( & host - > dma_complete ) ;
} else {
tmio_mmc_do_data_irq ( host ) ;
tmio_mmc_disable_mmc_irqs ( host , TMIO_MASK_READOP | TMIO_MASK_WRITEOP ) ;
}
out :
spin_unlock ( & host - > lock ) ;
}
static void tmio_mmc_cmd_irq ( struct tmio_mmc_host * host ,
unsigned int stat )
{
struct mmc_command * cmd = host - > cmd ;
int i , addr ;
spin_lock ( & host - > lock ) ;
if ( ! host - > cmd ) {
pr_debug ( " Spurious CMD irq \n " ) ;
goto out ;
}
host - > cmd = NULL ;
/* This controller is sicker than the PXA one. Not only do we need to
* drop the top 8 bits of the first response word , we also need to
* modify the order of the response for short response command types .
*/
for ( i = 3 , addr = CTL_RESPONSE ; i > = 0 ; i - - , addr + = 4 )
cmd - > resp [ i ] = sd_ctrl_read32 ( host , addr ) ;
if ( cmd - > flags & MMC_RSP_136 ) {
cmd - > resp [ 0 ] = ( cmd - > resp [ 0 ] < < 8 ) | ( cmd - > resp [ 1 ] > > 24 ) ;
cmd - > resp [ 1 ] = ( cmd - > resp [ 1 ] < < 8 ) | ( cmd - > resp [ 2 ] > > 24 ) ;
cmd - > resp [ 2 ] = ( cmd - > resp [ 2 ] < < 8 ) | ( cmd - > resp [ 3 ] > > 24 ) ;
cmd - > resp [ 3 ] < < = 8 ;
} else if ( cmd - > flags & MMC_RSP_R3 ) {
cmd - > resp [ 0 ] = cmd - > resp [ 3 ] ;
}
if ( stat & TMIO_STAT_CMDTIMEOUT )
cmd - > error = - ETIMEDOUT ;
else if ( stat & TMIO_STAT_CRCFAIL & & cmd - > flags & MMC_RSP_CRC )
cmd - > error = - EILSEQ ;
/* If there is data to handle we enable data IRQs here, and
* we will ultimatley finish the request in the data_end handler .
* If theres no data or we encountered an error , finish now .
*/
if ( host - > data & & ! cmd - > error ) {
if ( host - > data - > flags & MMC_DATA_READ ) {
if ( host - > force_pio | | ! host - > chan_rx )
tmio_mmc_enable_mmc_irqs ( host , TMIO_MASK_READOP ) ;
else
tasklet_schedule ( & host - > dma_issue ) ;
} else {
if ( host - > force_pio | | ! host - > chan_tx )
tmio_mmc_enable_mmc_irqs ( host , TMIO_MASK_WRITEOP ) ;
else
tasklet_schedule ( & host - > dma_issue ) ;
}
} else {
2011-07-14 12:12:38 +02:00
schedule_work ( & host - > done ) ;
2011-03-23 12:42:44 +01:00
}
out :
spin_unlock ( & host - > lock ) ;
}
2011-08-25 10:27:26 +09:00
static void tmio_mmc_card_irq_status ( struct tmio_mmc_host * host ,
int * ireg , int * status )
2011-03-23 12:42:44 +01:00
{
2011-08-25 10:27:26 +09:00
* status = sd_ctrl_read32 ( host , CTL_STATUS ) ;
* ireg = * status & TMIO_MASK_IRQ & ~ host - > sdcard_irq_mask ;
2011-03-23 12:42:44 +01:00
2011-08-25 10:27:26 +09:00
pr_debug_status ( * status ) ;
pr_debug_status ( * ireg ) ;
2014-08-24 19:59:22 -07:00
/* Clear the status except the interrupt status */
sd_ctrl_write32 ( host , CTL_STATUS , TMIO_MASK_IRQ ) ;
2011-08-25 10:27:26 +09:00
}
2011-03-23 12:42:44 +01:00
2011-08-25 10:27:26 +09:00
static bool __tmio_mmc_card_detect_irq ( struct tmio_mmc_host * host ,
int ireg , int status )
{
struct mmc_host * mmc = host - > mmc ;
2011-03-23 12:42:44 +01:00
2011-05-15 13:24:41 +00:00
/* Card insert / remove attempts */
if ( ireg & ( TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE ) ) {
tmio_mmc_ack_mmc_irqs ( host , TMIO_STAT_CARD_INSERT |
TMIO_STAT_CARD_REMOVE ) ;
2011-07-14 12:16:59 +02:00
if ( ( ( ( ireg & TMIO_STAT_CARD_REMOVE ) & & mmc - > card ) | |
( ( ireg & TMIO_STAT_CARD_INSERT ) & & ! mmc - > card ) ) & &
! work_pending ( & mmc - > detect . work ) )
2011-07-14 12:12:38 +02:00
mmc_detect_change ( host - > mmc , msecs_to_jiffies ( 100 ) ) ;
2011-08-25 10:27:26 +09:00
return true ;
2011-03-23 12:42:44 +01:00
}
2011-08-25 10:27:26 +09:00
return false ;
}
irqreturn_t tmio_mmc_card_detect_irq ( int irq , void * devid )
{
unsigned int ireg , status ;
struct tmio_mmc_host * host = devid ;
2011-03-23 12:42:44 +01:00
2011-08-25 10:27:26 +09:00
tmio_mmc_card_irq_status ( host , & ireg , & status ) ;
__tmio_mmc_card_detect_irq ( host , ireg , status ) ;
return IRQ_HANDLED ;
}
EXPORT_SYMBOL ( tmio_mmc_card_detect_irq ) ;
static bool __tmio_mmc_sdcard_irq ( struct tmio_mmc_host * host ,
int ireg , int status )
{
2011-05-15 13:24:41 +00:00
/* Command completion */
if ( ireg & ( TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT ) ) {
tmio_mmc_ack_mmc_irqs ( host ,
TMIO_STAT_CMDRESPEND |
TMIO_STAT_CMDTIMEOUT ) ;
tmio_mmc_cmd_irq ( host , status ) ;
2011-08-25 10:27:26 +09:00
return true ;
2011-05-15 13:24:41 +00:00
}
2011-03-23 12:42:44 +01:00
2011-05-15 13:24:41 +00:00
/* Data transfer */
if ( ireg & ( TMIO_STAT_RXRDY | TMIO_STAT_TXRQ ) ) {
tmio_mmc_ack_mmc_irqs ( host , TMIO_STAT_RXRDY | TMIO_STAT_TXRQ ) ;
tmio_mmc_pio_irq ( host ) ;
2011-08-25 10:27:26 +09:00
return true ;
2011-05-15 13:24:41 +00:00
}
2011-03-23 12:42:44 +01:00
2011-05-15 13:24:41 +00:00
/* Data transfer completion */
if ( ireg & TMIO_STAT_DATAEND ) {
tmio_mmc_ack_mmc_irqs ( host , TMIO_STAT_DATAEND ) ;
tmio_mmc_data_irq ( host ) ;
2011-08-25 10:27:26 +09:00
return true ;
2011-03-23 12:42:44 +01:00
}
2011-05-15 13:24:41 +00:00
2011-08-25 10:27:26 +09:00
return false ;
}
irqreturn_t tmio_mmc_sdcard_irq ( int irq , void * devid )
{
unsigned int ireg , status ;
struct tmio_mmc_host * host = devid ;
tmio_mmc_card_irq_status ( host , & ireg , & status ) ;
__tmio_mmc_sdcard_irq ( host , ireg , status ) ;
return IRQ_HANDLED ;
}
EXPORT_SYMBOL ( tmio_mmc_sdcard_irq ) ;
irqreturn_t tmio_mmc_sdio_irq ( int irq , void * devid )
{
struct tmio_mmc_host * host = devid ;
struct mmc_host * mmc = host - > mmc ;
struct tmio_mmc_data * pdata = host - > pdata ;
unsigned int ireg , status ;
2014-08-24 20:00:52 -07:00
unsigned int sdio_status ;
2011-08-25 10:27:26 +09:00
if ( ! ( pdata - > flags & TMIO_MMC_SDIO_IRQ ) )
return IRQ_HANDLED ;
status = sd_ctrl_read16 ( host , CTL_SDIO_STATUS ) ;
ireg = status & TMIO_SDIO_MASK_ALL & ~ host - > sdcard_irq_mask ;
2014-08-24 20:00:52 -07:00
sdio_status = status & ~ TMIO_SDIO_MASK_ALL ;
if ( pdata - > flags & TMIO_MMC_SDIO_STATUS_QUIRK )
sdio_status | = 6 ;
sd_ctrl_write16 ( host , CTL_SDIO_STATUS , sdio_status ) ;
2011-08-25 10:27:26 +09:00
if ( mmc - > caps & MMC_CAP_SDIO_IRQ & & ireg & TMIO_SDIO_STAT_IOIRQ )
mmc_signal_sdio_irq ( mmc ) ;
return IRQ_HANDLED ;
}
EXPORT_SYMBOL ( tmio_mmc_sdio_irq ) ;
irqreturn_t tmio_mmc_irq ( int irq , void * devid )
{
struct tmio_mmc_host * host = devid ;
unsigned int ireg , status ;
pr_debug ( " MMC IRQ begin \n " ) ;
tmio_mmc_card_irq_status ( host , & ireg , & status ) ;
if ( __tmio_mmc_card_detect_irq ( host , ireg , status ) )
return IRQ_HANDLED ;
if ( __tmio_mmc_sdcard_irq ( host , ireg , status ) )
return IRQ_HANDLED ;
tmio_mmc_sdio_irq ( irq , devid ) ;
2011-03-23 12:42:44 +01:00
return IRQ_HANDLED ;
}
2011-05-06 11:02:33 +00:00
EXPORT_SYMBOL ( tmio_mmc_irq ) ;
2011-03-23 12:42:44 +01:00
static int tmio_mmc_start_data ( struct tmio_mmc_host * host ,
struct mmc_data * data )
{
struct tmio_mmc_data * pdata = host - > pdata ;
pr_debug ( " setup data transfer: blocksize %08x nr_blocks %d \n " ,
data - > blksz , data - > blocks ) ;
/* Some hardware cannot perform 2 byte requests in 4 bit mode */
if ( host - > mmc - > ios . bus_width = = MMC_BUS_WIDTH_4 ) {
int blksz_2bytes = pdata - > flags & TMIO_MMC_BLKSZ_2BYTES ;
if ( data - > blksz < 2 | | ( data - > blksz < 4 & & ! blksz_2bytes ) ) {
pr_err ( " %s: %d byte block unsupported in 4 bit mode \n " ,
mmc_hostname ( host - > mmc ) , data - > blksz ) ;
return - EINVAL ;
}
}
tmio_mmc_init_sg ( host , data ) ;
host - > data = data ;
/* Set transfer length / blocksize */
sd_ctrl_write16 ( host , CTL_SD_XFER_LEN , data - > blksz ) ;
sd_ctrl_write16 ( host , CTL_XFER_BLK_COUNT , data - > blocks ) ;
tmio_mmc_start_dma ( host , data ) ;
return 0 ;
}
/* Process requests from the MMC layer */
static void tmio_mmc_request ( struct mmc_host * mmc , struct mmc_request * mrq )
{
struct tmio_mmc_host * host = mmc_priv ( mmc ) ;
2011-04-21 07:20:16 +00:00
unsigned long flags ;
2011-03-23 12:42:44 +01:00
int ret ;
2011-04-21 07:20:16 +00:00
spin_lock_irqsave ( & host - > lock , flags ) ;
if ( host - > mrq ) {
2011-03-23 12:42:44 +01:00
pr_debug ( " request not null \n " ) ;
2011-04-21 07:20:16 +00:00
if ( IS_ERR ( host - > mrq ) ) {
spin_unlock_irqrestore ( & host - > lock , flags ) ;
mrq - > cmd - > error = - EAGAIN ;
mmc_request_done ( mmc , mrq ) ;
return ;
}
}
2011-03-23 12:42:44 +01:00
host - > last_req_ts = jiffies ;
wmb ( ) ;
host - > mrq = mrq ;
2011-04-21 07:20:16 +00:00
spin_unlock_irqrestore ( & host - > lock , flags ) ;
2013-10-24 16:42:33 +02:00
pm_runtime_get_sync ( mmc_dev ( mmc ) ) ;
2011-03-23 12:42:44 +01:00
if ( mrq - > data ) {
ret = tmio_mmc_start_data ( host , mrq - > data ) ;
if ( ret )
goto fail ;
}
ret = tmio_mmc_start_command ( host , mrq - > cmd ) ;
if ( ! ret ) {
schedule_delayed_work ( & host - > delayed_reset_work ,
msecs_to_jiffies ( 2000 ) ) ;
return ;
}
fail :
host - > force_pio = false ;
2011-04-21 07:20:16 +00:00
host - > mrq = NULL ;
2011-03-23 12:42:44 +01:00
mrq - > cmd - > error = ret ;
mmc_request_done ( mmc , mrq ) ;
2013-10-24 16:42:33 +02:00
pm_runtime_mark_last_busy ( mmc_dev ( mmc ) ) ;
pm_runtime_put_autosuspend ( mmc_dev ( mmc ) ) ;
2011-03-23 12:42:44 +01:00
}
2013-10-30 00:16:17 +01:00
static int tmio_mmc_clk_update ( struct tmio_mmc_host * host )
2012-06-20 19:10:31 +02:00
{
2013-10-30 00:16:17 +01:00
struct mmc_host * mmc = host - > mmc ;
2012-06-20 19:10:31 +02:00
struct tmio_mmc_data * pdata = host - > pdata ;
int ret ;
if ( ! pdata - > clk_enable )
return - ENOTSUPP ;
ret = pdata - > clk_enable ( host - > pdev , & mmc - > f_max ) ;
if ( ! ret )
mmc - > f_min = mmc - > f_max / 512 ;
return ret ;
}
2013-02-15 16:14:00 +01:00
static void tmio_mmc_power_on ( struct tmio_mmc_host * host , unsigned short vdd )
2012-06-20 19:10:33 +02:00
{
struct mmc_host * mmc = host - > mmc ;
2013-02-15 16:14:00 +01:00
int ret = 0 ;
/* .set_ios() is returning void, so, no chance to report an error */
2012-06-20 19:10:33 +02:00
2013-09-06 07:29:05 -04:00
if ( host - > set_pwr )
host - > set_pwr ( host - > pdev , 1 ) ;
2013-02-15 16:14:00 +01:00
if ( ! IS_ERR ( mmc - > supply . vmmc ) ) {
ret = mmc_regulator_set_ocr ( mmc , mmc - > supply . vmmc , vdd ) ;
/*
* Attention : empiric value . With a b43 WiFi SDIO card this
* delay proved necessary for reliable card - insertion probing .
* 100u s were not enough . Is this the same 140u s delay , as in
* tmio_mmc_set_ios ( ) ?
*/
udelay ( 200 ) ;
}
/*
* It seems , VccQ should be switched on after Vcc , this is also what the
* omap_hsmmc . c driver does .
*/
if ( ! IS_ERR ( mmc - > supply . vqmmc ) & & ! ret ) {
2013-07-08 11:38:09 +02:00
ret = regulator_enable ( mmc - > supply . vqmmc ) ;
2013-02-15 16:14:00 +01:00
udelay ( 200 ) ;
}
2013-07-08 11:38:09 +02:00
if ( ret < 0 )
dev_dbg ( & host - > pdev - > dev , " Regulators failed to power up: %d \n " ,
ret ) ;
2013-02-15 16:14:00 +01:00
}
static void tmio_mmc_power_off ( struct tmio_mmc_host * host )
{
struct mmc_host * mmc = host - > mmc ;
if ( ! IS_ERR ( mmc - > supply . vqmmc ) )
regulator_disable ( mmc - > supply . vqmmc ) ;
2012-06-20 19:10:33 +02:00
if ( ! IS_ERR ( mmc - > supply . vmmc ) )
2013-02-15 16:14:00 +01:00
mmc_regulator_set_ocr ( mmc , mmc - > supply . vmmc , 0 ) ;
2013-09-06 07:29:05 -04:00
if ( host - > set_pwr )
host - > set_pwr ( host - > pdev , 0 ) ;
2012-06-20 19:10:33 +02:00
}
2013-10-24 17:42:53 +02:00
static void tmio_mmc_set_bus_width ( struct tmio_mmc_host * host ,
unsigned char bus_width )
{
switch ( bus_width ) {
case MMC_BUS_WIDTH_1 :
sd_ctrl_write16 ( host , CTL_SD_MEM_CARD_OPT , 0x80e0 ) ;
break ;
case MMC_BUS_WIDTH_4 :
sd_ctrl_write16 ( host , CTL_SD_MEM_CARD_OPT , 0x00e0 ) ;
break ;
}
}
2011-03-23 12:42:44 +01:00
/* Set MMC clock / power.
* Note : This controller uses a simple divider scheme therefore it cannot
* run a MMC card at full speed ( 20 MHz ) . The max clock is 24 MHz on SD , but as
* MMC wont run that fast , it has to be clocked at 12 MHz which is the next
* slowest setting .
*/
static void tmio_mmc_set_ios ( struct mmc_host * mmc , struct mmc_ios * ios )
{
struct tmio_mmc_host * host = mmc_priv ( mmc ) ;
2012-02-09 22:57:16 +01:00
struct device * dev = & host - > pdev - > dev ;
2011-04-21 07:20:16 +00:00
unsigned long flags ;
2013-10-24 16:42:33 +02:00
pm_runtime_get_sync ( mmc_dev ( mmc ) ) ;
2011-07-14 12:12:38 +02:00
mutex_lock ( & host - > ios_lock ) ;
2011-04-21 07:20:16 +00:00
spin_lock_irqsave ( & host - > lock , flags ) ;
if ( host - > mrq ) {
if ( IS_ERR ( host - > mrq ) ) {
2012-02-09 22:57:16 +01:00
dev_dbg ( dev ,
2011-04-21 07:20:16 +00:00
" %s.%d: concurrent .set_ios(), clk %u, mode %u \n " ,
current - > comm , task_pid_nr ( current ) ,
ios - > clock , ios - > power_mode ) ;
host - > mrq = ERR_PTR ( - EINTR ) ;
} else {
2012-02-09 22:57:16 +01:00
dev_dbg ( dev ,
2011-04-21 07:20:16 +00:00
" %s.%d: CMD%u active since %lu, now %lu! \n " ,
current - > comm , task_pid_nr ( current ) ,
host - > mrq - > cmd - > opcode , host - > last_req_ts , jiffies ) ;
}
spin_unlock_irqrestore ( & host - > lock , flags ) ;
2011-07-14 12:12:38 +02:00
mutex_unlock ( & host - > ios_lock ) ;
2011-04-21 07:20:16 +00:00
return ;
}
host - > mrq = ERR_PTR ( - EBUSY ) ;
spin_unlock_irqrestore ( & host - > lock , flags ) ;
2011-03-23 12:42:44 +01:00
2013-10-24 17:53:15 +02:00
switch ( ios - > power_mode ) {
case MMC_POWER_OFF :
tmio_mmc_power_off ( host ) ;
tmio_mmc_clk_stop ( host ) ;
break ;
case MMC_POWER_UP :
2011-07-14 12:16:59 +02:00
tmio_mmc_set_clock ( host , ios - > clock ) ;
2013-10-24 17:53:15 +02:00
tmio_mmc_power_on ( host , ios - > vdd ) ;
2011-03-09 14:45:44 +01:00
tmio_mmc_clk_start ( host ) ;
2013-10-24 17:42:53 +02:00
tmio_mmc_set_bus_width ( host , ios - > bus_width ) ;
2013-10-24 17:53:15 +02:00
break ;
case MMC_POWER_ON :
tmio_mmc_set_clock ( host , ios - > clock ) ;
tmio_mmc_clk_start ( host ) ;
tmio_mmc_set_bus_width ( host , ios - > bus_width ) ;
break ;
}
2011-03-23 12:42:44 +01:00
/* Let things settle. delay taken from winCE driver */
udelay ( 140 ) ;
2011-04-21 07:20:16 +00:00
if ( PTR_ERR ( host - > mrq ) = = - EINTR )
dev_dbg ( & host - > pdev - > dev ,
" %s.%d: IOS interrupted: clk %u, mode %u " ,
current - > comm , task_pid_nr ( current ) ,
ios - > clock , ios - > power_mode ) ;
host - > mrq = NULL ;
2011-07-14 12:12:38 +02:00
2013-10-30 00:16:17 +01:00
host - > clk_cache = ios - > clock ;
2011-07-14 12:12:38 +02:00
mutex_unlock ( & host - > ios_lock ) ;
2013-10-24 16:42:33 +02:00
pm_runtime_mark_last_busy ( mmc_dev ( mmc ) ) ;
pm_runtime_put_autosuspend ( mmc_dev ( mmc ) ) ;
2011-03-23 12:42:44 +01:00
}
static int tmio_mmc_get_ro ( struct mmc_host * mmc )
{
struct tmio_mmc_host * host = mmc_priv ( mmc ) ;
struct tmio_mmc_data * pdata = host - > pdata ;
2012-05-01 17:11:56 +02:00
int ret = mmc_gpio_get_ro ( mmc ) ;
if ( ret > = 0 )
return ret ;
2011-03-23 12:42:44 +01:00
2013-10-24 16:42:33 +02:00
pm_runtime_get_sync ( mmc_dev ( mmc ) ) ;
ret = ! ( ( pdata - > flags & TMIO_MMC_WRPROTECT_DISABLE ) | |
( sd_ctrl_read32 ( host , CTL_STATUS ) & TMIO_STAT_WRPROTECT ) ) ;
pm_runtime_mark_last_busy ( mmc_dev ( mmc ) ) ;
pm_runtime_put_autosuspend ( mmc_dev ( mmc ) ) ;
return ret ;
2011-03-23 12:42:44 +01:00
}
2014-09-08 23:45:25 -07:00
static int tmio_multi_io_quirk ( struct mmc_card * card ,
unsigned int direction , int blk_size )
{
struct tmio_mmc_host * host = mmc_priv ( card - > host ) ;
struct tmio_mmc_data * pdata = host - > pdata ;
if ( pdata - > multi_io_quirk )
return pdata - > multi_io_quirk ( card , direction , blk_size ) ;
return blk_size ;
}
2011-03-23 12:42:44 +01:00
static const struct mmc_host_ops tmio_mmc_ops = {
. request = tmio_mmc_request ,
. set_ios = tmio_mmc_set_ios ,
. get_ro = tmio_mmc_get_ro ,
2013-08-08 12:38:43 +02:00
. get_cd = mmc_gpio_get_cd ,
2011-03-23 12:42:44 +01:00
. enable_sdio_irq = tmio_mmc_enable_sdio_irq ,
2014-09-08 23:45:25 -07:00
. multi_io_quirk = tmio_multi_io_quirk ,
2011-03-23 12:42:44 +01:00
} ;
2013-11-20 00:30:39 -08:00
static int tmio_mmc_init_ocr ( struct tmio_mmc_host * host )
2012-06-20 19:10:33 +02:00
{
struct tmio_mmc_data * pdata = host - > pdata ;
struct mmc_host * mmc = host - > mmc ;
mmc_regulator_get_supply ( mmc ) ;
2013-11-20 00:30:39 -08:00
/* use ocr_mask if no regulator */
2012-06-20 19:10:33 +02:00
if ( ! mmc - > ocr_avail )
2013-11-20 00:30:39 -08:00
mmc - > ocr_avail = pdata - > ocr_mask ;
/*
* try again .
* There is possibility that regulator has not been probed
*/
if ( ! mmc - > ocr_avail )
return - EPROBE_DEFER ;
return 0 ;
2012-06-20 19:10:33 +02:00
}
2013-02-15 16:13:56 +01:00
static void tmio_mmc_of_parse ( struct platform_device * pdev ,
struct tmio_mmc_data * pdata )
{
const struct device_node * np = pdev - > dev . of_node ;
if ( ! np )
return ;
if ( of_get_property ( np , " toshiba,mmc-wrprotect-disable " , NULL ) )
pdata - > flags | = TMIO_MMC_WRPROTECT_DISABLE ;
}
2012-11-19 13:23:06 -05:00
int tmio_mmc_host_probe ( struct tmio_mmc_host * * host ,
2011-03-23 12:42:44 +01:00
struct platform_device * pdev ,
struct tmio_mmc_data * pdata )
{
struct tmio_mmc_host * _host ;
struct mmc_host * mmc ;
struct resource * res_ctl ;
int ret ;
u32 irq_mask = TMIO_MASK_CMD ;
2013-02-15 16:13:56 +01:00
tmio_mmc_of_parse ( pdev , pdata ) ;
2013-02-15 16:13:50 +01:00
if ( ! ( pdata - > flags & TMIO_MMC_HAS_IDLE_WAIT ) )
pdata - > write16_hook = NULL ;
2011-03-23 12:42:44 +01:00
res_ctl = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
if ( ! res_ctl )
return - EINVAL ;
mmc = mmc_alloc_host ( sizeof ( struct tmio_mmc_host ) , & pdev - > dev ) ;
if ( ! mmc )
return - ENOMEM ;
2013-06-09 22:14:13 +02:00
ret = mmc_of_parse ( mmc ) ;
if ( ret < 0 )
goto host_free ;
2013-02-15 16:13:56 +01:00
2011-05-11 16:51:11 +00:00
pdata - > dev = & pdev - > dev ;
2011-03-23 12:42:44 +01:00
_host = mmc_priv ( mmc ) ;
_host - > pdata = pdata ;
_host - > mmc = mmc ;
_host - > pdev = pdev ;
platform_set_drvdata ( pdev , mmc ) ;
2013-09-06 07:29:05 -04:00
_host - > set_pwr = pdata - > set_pwr ;
2011-03-23 12:42:44 +01:00
_host - > set_clk_div = pdata - > set_clk_div ;
2013-11-20 00:30:39 -08:00
ret = tmio_mmc_init_ocr ( _host ) ;
if ( ret < 0 )
goto host_free ;
2011-03-23 12:42:44 +01:00
_host - > ctl = ioremap ( res_ctl - > start , resource_size ( res_ctl ) ) ;
if ( ! _host - > ctl ) {
ret = - ENOMEM ;
goto host_free ;
}
mmc - > ops = & tmio_mmc_ops ;
2013-02-15 16:13:56 +01:00
mmc - > caps | = MMC_CAP_4_BIT_DATA | pdata - > capabilities ;
2013-11-20 00:16:14 -08:00
mmc - > caps2 | = pdata - > capabilities2 ;
2011-03-23 12:42:44 +01:00
mmc - > max_segs = 32 ;
mmc - > max_blk_size = 512 ;
mmc - > max_blk_count = ( PAGE_CACHE_SIZE / mmc - > max_blk_size ) *
mmc - > max_segs ;
mmc - > max_req_size = mmc - > max_blk_size * mmc - > max_blk_count ;
mmc - > max_seg_size = mmc - > max_req_size ;
2012-02-09 22:57:09 +01:00
_host - > native_hotplug = ! ( pdata - > flags & TMIO_MMC_USE_GPIO_CD | |
2012-02-09 22:57:08 +01:00
mmc - > caps & MMC_CAP_NEEDS_POLL | |
2013-02-15 16:13:56 +01:00
mmc - > caps & MMC_CAP_NONREMOVABLE | |
mmc - > slot . cd_irq > = 0 ) ;
2012-02-09 22:57:08 +01:00
2013-10-30 00:16:17 +01:00
if ( tmio_mmc_clk_update ( _host ) < 0 ) {
2012-06-20 19:10:31 +02:00
mmc - > f_max = pdata - > hclk ;
mmc - > f_min = mmc - > f_max / 512 ;
}
2014-09-18 23:33:49 +04:00
/*
* Check the sanity of mmc - > f_min to prevent tmio_mmc_set_clock ( ) from
* looping forever . . .
*/
if ( mmc - > f_min = = 0 ) {
ret = - EINVAL ;
goto host_free ;
}
2011-12-23 23:03:13 +01:00
/*
2013-10-24 16:42:33 +02:00
* While using internal tmio hardware logic for card detection , we need
* to ensure it stays powered for it to work .
2011-12-23 23:03:13 +01:00
*/
2012-02-09 22:57:08 +01:00
if ( _host - > native_hotplug )
2011-12-23 23:03:13 +01:00
pm_runtime_get_noresume ( & pdev - > dev ) ;
2011-03-23 12:42:44 +01:00
tmio_mmc_clk_stop ( _host ) ;
tmio_mmc_reset ( _host ) ;
2011-08-25 10:27:25 +09:00
_host - > sdcard_irq_mask = sd_ctrl_read32 ( _host , CTL_IRQ_MASK ) ;
2011-03-23 12:42:44 +01:00
tmio_mmc_disable_mmc_irqs ( _host , TMIO_MASK_ALL ) ;
2012-06-20 19:10:30 +02:00
/* Unmask the IRQs we want to know about */
if ( ! _host - > chan_rx )
irq_mask | = TMIO_MASK_READOP ;
if ( ! _host - > chan_tx )
irq_mask | = TMIO_MASK_WRITEOP ;
if ( ! _host - > native_hotplug )
irq_mask & = ~ ( TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT ) ;
_host - > sdcard_irq_mask & = ~ irq_mask ;
2013-10-24 15:58:45 +02:00
_host - > sdio_irq_enabled = false ;
if ( pdata - > flags & TMIO_MMC_SDIO_IRQ ) {
_host - > sdio_irq_mask = TMIO_SDIO_MASK_ALL ;
sd_ctrl_write16 ( _host , CTL_SDIO_IRQ_MASK , _host - > sdio_irq_mask ) ;
sd_ctrl_write16 ( _host , CTL_TRANSACTION_CTL , 0x0000 ) ;
}
2011-03-23 12:42:44 +01:00
spin_lock_init ( & _host - > lock ) ;
2011-07-14 12:12:38 +02:00
mutex_init ( & _host - > ios_lock ) ;
2011-03-23 12:42:44 +01:00
/* Init delayed work for request timeouts */
INIT_DELAYED_WORK ( & _host - > delayed_reset_work , tmio_mmc_reset_work ) ;
2011-07-14 12:12:38 +02:00
INIT_WORK ( & _host - > done , tmio_mmc_done_work ) ;
2011-03-23 12:42:44 +01:00
/* See if we also get DMA */
tmio_mmc_request_dma ( _host , pdata ) ;
2013-10-24 16:42:33 +02:00
pm_runtime_set_active ( & pdev - > dev ) ;
pm_runtime_set_autosuspend_delay ( & pdev - > dev , 50 ) ;
pm_runtime_use_autosuspend ( & pdev - > dev ) ;
pm_runtime_enable ( & pdev - > dev ) ;
2012-06-20 19:10:31 +02:00
ret = mmc_add_host ( mmc ) ;
if ( ret < 0 ) {
tmio_mmc_host_remove ( _host ) ;
return ret ;
}
2011-03-23 12:42:44 +01:00
2012-03-13 01:01:51 +01:00
dev_pm_qos_expose_latency_limit ( & pdev - > dev , 100 ) ;
2012-02-09 22:57:09 +01:00
if ( pdata - > flags & TMIO_MMC_USE_GPIO_CD ) {
2013-08-08 12:38:31 +02:00
ret = mmc_gpio_request_cd ( mmc , pdata - > cd_gpio , 0 ) ;
2012-02-09 22:57:09 +01:00
if ( ret < 0 ) {
tmio_mmc_host_remove ( _host ) ;
return ret ;
}
mmc: don't request CD IRQ until mmc_start_host()
As soon as the CD IRQ is requested, it can trigger, since it's an
externally controlled event. If it does, delayed_work host->detect will
be scheduled.
Many host controller probe()s are roughly structured as:
*_probe() {
host = sdhci_pltfm_init();
mmc_of_parse(host->mmc);
rc = sdhci_add_host(host);
if (rc) {
sdhci_pltfm_free();
return rc;
}
In 3.17, CD IRQs can are enabled quite early via *_probe() ->
mmc_of_parse() -> mmc_gpio_request_cd() -> mmc_gpiod_request_cd_irq().
Note that in linux-next, mmc_of_parse() calls mmc_gpio*d*_request_cd()
rather than mmc_gpio_request_cd(), and mmc_gpio*d*_request_cd() doesn't
call mmc_gpiod_request_cd_irq(). However, this issue still exists if
mmc_gpio_request_cd() is called directly before mmc_start_host().
sdhci_add_host() may fail part way through (e.g. due to deferred
probe for a vmmc regulator), and sdhci_pltfm_free() does nothing to
unrequest the CD IRQ nor cancel the delayed_work. sdhci_pltfm_free() is
coded to assume that if sdhci_add_host() failed, then the delayed_work
cannot (or should not) have been triggered.
This can lead to the following with CONFIG_DEBUG_OBJECTS_* enabled, when
kfree(host) is eventually called inside sdhci_pltfm_free():
WARNING: CPU: 2 PID: 6 at lib/debugobjects.c:263 debug_print_object+0x8c/0xb4()
ODEBUG: free active (active state 0) object type: timer_list hint: delayed_work_timer_fn+0x0/0x18
The object being complained about is host->detect.
There's no need to request the CD IRQ so early; mmc_start_host() already
requests it. For most SDHCI hosts at least, the typical call path that
does this is: *_probe() -> sdhci_add_host() -> mmc_add_host() ->
mmc_start_host(). Therefore, remove the call to mmc_gpiod_request_cd_irq()
from mmc_gpio_request_cd(). This also matches mmc_gpio*d*_request_cd(),
which already doesn't call mmc_gpiod_request_cd_irq().
However, some host controller drivers call mmc_gpio_request_cd() after
mmc_start_host() has already been called, and assume that this will also
call mmc_gpiod_request_cd_irq(). Update those drivers to explicitly call
mmc_gpiod_request_cd_irq() themselves. Ideally, these drivers should be
modified to move their call to mmc_gpio_request_cd() before their call
to mmc_add_host(). However that's too large a change for stable.
This solves the problem (eliminates the kernel error message above),
since it guarantees that the IRQ can't trigger before mmc_start_host()
is called.
The critical point here is that once sdhci_add_host() calls
mmc_add_host() -> mmc_start_host(), sdhci_add_host() is coded not to
fail. In other words, if there's a chance that mmc_start_host() may have
been called, and CD IRQs triggered, and the delayed_work scheduled,
sdhci_add_host() won't fail, and so cleanup is no longer via
sdhci_pltfm_free() (which doesn't free the IRQ or cancel the work queue)
but instead must be via sdhci_remove_host(), which calls mmc_remove_host()
-> mmc_stop_host(), which does free the IRQ and cancel the work queue.
CC: Russell King <linux@arm.linux.org.uk>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexandre Courbot <acourbot@nvidia.com>
Cc: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Cc: <stable@vger.kernel.org> # v3.15+
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2014-09-22 09:57:42 -06:00
mmc_gpiod_request_cd_irq ( mmc ) ;
2012-02-09 22:57:09 +01:00
}
2011-03-23 12:42:44 +01:00
* host = _host ;
return 0 ;
host_free :
mmc_free_host ( mmc ) ;
return ret ;
}
EXPORT_SYMBOL ( tmio_mmc_host_probe ) ;
void tmio_mmc_host_remove ( struct tmio_mmc_host * host )
{
2011-05-05 16:13:12 +00:00
struct platform_device * pdev = host - > pdev ;
2012-02-09 22:57:09 +01:00
struct mmc_host * mmc = host - > mmc ;
2012-02-09 22:57:08 +01:00
if ( ! host - > native_hotplug )
2011-05-11 16:51:11 +00:00
pm_runtime_get_sync ( & pdev - > dev ) ;
2012-03-13 01:01:51 +01:00
dev_pm_qos_hide_latency_limit ( & pdev - > dev ) ;
2012-02-09 22:57:09 +01:00
mmc_remove_host ( mmc ) ;
2011-07-14 12:12:38 +02:00
cancel_work_sync ( & host - > done ) ;
2011-03-23 12:42:44 +01:00
cancel_delayed_work_sync ( & host - > delayed_reset_work ) ;
tmio_mmc_release_dma ( host ) ;
2011-05-05 16:13:12 +00:00
pm_runtime_put_sync ( & pdev - > dev ) ;
pm_runtime_disable ( & pdev - > dev ) ;
2011-05-11 16:51:11 +00:00
iounmap ( host - > ctl ) ;
2012-02-09 22:57:09 +01:00
mmc_free_host ( mmc ) ;
2011-03-23 12:42:44 +01:00
}
EXPORT_SYMBOL ( tmio_mmc_host_remove ) ;
2014-08-25 12:03:20 +02:00
# ifdef CONFIG_PM
2011-05-11 16:51:11 +00:00
int tmio_mmc_host_runtime_suspend ( struct device * dev )
{
2013-10-30 00:16:17 +01:00
struct mmc_host * mmc = dev_get_drvdata ( dev ) ;
struct tmio_mmc_host * host = mmc_priv ( mmc ) ;
2014-08-25 11:55:57 +02:00
tmio_mmc_disable_mmc_irqs ( host , TMIO_MASK_ALL ) ;
2013-10-30 00:16:17 +01:00
if ( host - > clk_cache )
tmio_mmc_clk_stop ( host ) ;
if ( host - > pdata - > clk_disable )
host - > pdata - > clk_disable ( host - > pdev ) ;
2011-05-11 16:51:11 +00:00
return 0 ;
}
EXPORT_SYMBOL ( tmio_mmc_host_runtime_suspend ) ;
int tmio_mmc_host_runtime_resume ( struct device * dev )
{
struct mmc_host * mmc = dev_get_drvdata ( dev ) ;
struct tmio_mmc_host * host = mmc_priv ( mmc ) ;
2013-10-30 00:16:17 +01:00
tmio_mmc_reset ( host ) ;
tmio_mmc_clk_update ( host ) ;
if ( host - > clk_cache ) {
tmio_mmc_set_clock ( host , host - > clk_cache ) ;
tmio_mmc_clk_start ( host ) ;
}
2011-07-14 18:39:10 +02:00
tmio_mmc_enable_dma ( host , true ) ;
2011-05-11 16:51:11 +00:00
return 0 ;
}
EXPORT_SYMBOL ( tmio_mmc_host_runtime_resume ) ;
2013-10-23 14:55:07 +02:00
# endif
2011-05-11 16:51:11 +00:00
2011-03-23 12:42:44 +01:00
MODULE_LICENSE ( " GPL v2 " ) ;