2011-02-22 13:16:07 +03:00
/*
* drivers / ata / pata_arasan_cf . c
*
* Arasan Compact Flash host controller source file
*
* Copyright ( C ) 2011 ST Microelectronics
2015-07-18 02:23:50 +03:00
* Viresh Kumar < vireshk @ kernel . org >
2011-02-22 13:16:07 +03:00
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed " as is " without any
* warranty of any kind , whether express or implied .
*/
/*
* The Arasan CompactFlash Device Controller IP core has three basic modes of
* operation : PC card ATA using I / O mode , PC card ATA using memory mode , PC card
* ATA using true IDE modes . This driver supports only True IDE mode currently .
*
* Arasan CF Controller shares global irq register with Arasan XD Controller .
*
* Tested on arch / arm / mach - spear13xx
*/
# include <linux/ata.h>
# include <linux/clk.h>
# include <linux/completion.h>
# include <linux/delay.h>
# include <linux/dmaengine.h>
# include <linux/io.h>
# include <linux/irq.h>
# include <linux/kernel.h>
# include <linux/libata.h>
# include <linux/module.h>
2012-08-27 09:07:18 +04:00
# include <linux/of.h>
2011-02-22 13:16:07 +03:00
# include <linux/pata_arasan_cf_data.h>
# include <linux/platform_device.h>
# include <linux/pm.h>
# include <linux/slab.h>
# include <linux/spinlock.h>
# include <linux/types.h>
# include <linux/workqueue.h>
2021-12-21 10:20:31 +03:00
# include <trace/events/libata.h>
2011-02-22 13:16:07 +03:00
# define DRIVER_NAME "arasan_cf"
# define TIMEOUT msecs_to_jiffies(3000)
/* Registers */
/* CompactFlash Interface Status */
# define CFI_STS 0x000
# define STS_CHG (1)
# define BIN_AUDIO_OUT (1 << 1)
# define CARD_DETECT1 (1 << 2)
# define CARD_DETECT2 (1 << 3)
# define INP_ACK (1 << 4)
# define CARD_READY (1 << 5)
# define IO_READY (1 << 6)
# define B16_IO_PORT_SEL (1 << 7)
/* IRQ */
# define IRQ_STS 0x004
/* Interrupt Enable */
# define IRQ_EN 0x008
# define CARD_DETECT_IRQ (1)
# define STATUS_CHNG_IRQ (1 << 1)
# define MEM_MODE_IRQ (1 << 2)
# define IO_MODE_IRQ (1 << 3)
# define TRUE_IDE_MODE_IRQ (1 << 8)
# define PIO_XFER_ERR_IRQ (1 << 9)
# define BUF_AVAIL_IRQ (1 << 10)
# define XFER_DONE_IRQ (1 << 11)
# define IGNORED_IRQS (STATUS_CHNG_IRQ | MEM_MODE_IRQ | IO_MODE_IRQ |\
TRUE_IDE_MODE_IRQ )
# define TRUE_IDE_IRQS (CARD_DETECT_IRQ | PIO_XFER_ERR_IRQ |\
BUF_AVAIL_IRQ | XFER_DONE_IRQ )
/* Operation Mode */
# define OP_MODE 0x00C
# define CARD_MODE_MASK (0x3)
# define MEM_MODE (0x0)
# define IO_MODE (0x1)
# define TRUE_IDE_MODE (0x2)
# define CARD_TYPE_MASK (1 << 2)
# define CF_CARD (0)
# define CF_PLUS_CARD (1 << 2)
# define CARD_RESET (1 << 3)
# define CFHOST_ENB (1 << 4)
# define OUTPUTS_TRISTATE (1 << 5)
# define ULTRA_DMA_ENB (1 << 8)
# define MULTI_WORD_DMA_ENB (1 << 9)
# define DRQ_BLOCK_SIZE_MASK (0x3 << 11)
# define DRQ_BLOCK_SIZE_512 (0)
# define DRQ_BLOCK_SIZE_1024 (1 << 11)
# define DRQ_BLOCK_SIZE_2048 (2 << 11)
# define DRQ_BLOCK_SIZE_4096 (3 << 11)
/* CF Interface Clock Configuration */
# define CLK_CFG 0x010
# define CF_IF_CLK_MASK (0XF)
/* CF Timing Mode Configuration */
# define TM_CFG 0x014
# define MEM_MODE_TIMING_MASK (0x3)
# define MEM_MODE_TIMING_250NS (0x0)
# define MEM_MODE_TIMING_120NS (0x1)
# define MEM_MODE_TIMING_100NS (0x2)
# define MEM_MODE_TIMING_80NS (0x3)
# define IO_MODE_TIMING_MASK (0x3 << 2)
# define IO_MODE_TIMING_250NS (0x0 << 2)
# define IO_MODE_TIMING_120NS (0x1 << 2)
# define IO_MODE_TIMING_100NS (0x2 << 2)
# define IO_MODE_TIMING_80NS (0x3 << 2)
# define TRUEIDE_PIO_TIMING_MASK (0x7 << 4)
# define TRUEIDE_PIO_TIMING_SHIFT 4
# define TRUEIDE_MWORD_DMA_TIMING_MASK (0x7 << 7)
# define TRUEIDE_MWORD_DMA_TIMING_SHIFT 7
# define ULTRA_DMA_TIMING_MASK (0x7 << 10)
# define ULTRA_DMA_TIMING_SHIFT 10
/* CF Transfer Address */
# define XFER_ADDR 0x014
# define XFER_ADDR_MASK (0x7FF)
# define MAX_XFER_COUNT 0x20000u
/* Transfer Control */
# define XFER_CTR 0x01C
# define XFER_COUNT_MASK (0x3FFFF)
# define ADDR_INC_DISABLE (1 << 24)
# define XFER_WIDTH_MASK (1 << 25)
# define XFER_WIDTH_8B (0)
# define XFER_WIDTH_16B (1 << 25)
# define MEM_TYPE_MASK (1 << 26)
# define MEM_TYPE_COMMON (0)
# define MEM_TYPE_ATTRIBUTE (1 << 26)
# define MEM_IO_XFER_MASK (1 << 27)
# define MEM_XFER (0)
# define IO_XFER (1 << 27)
# define DMA_XFER_MODE (1 << 28)
# define AHB_BUS_NORMAL_PIO_OPRTN (~(1 << 29))
# define XFER_DIR_MASK (1 << 30)
# define XFER_READ (0)
# define XFER_WRITE (1 << 30)
# define XFER_START (1 << 31)
/* Write Data Port */
# define WRITE_PORT 0x024
/* Read Data Port */
# define READ_PORT 0x028
/* ATA Data Port */
# define ATA_DATA_PORT 0x030
# define ATA_DATA_PORT_MASK (0xFFFF)
/* ATA Error/Features */
# define ATA_ERR_FTR 0x034
/* ATA Sector Count */
# define ATA_SC 0x038
/* ATA Sector Number */
# define ATA_SN 0x03C
/* ATA Cylinder Low */
# define ATA_CL 0x040
/* ATA Cylinder High */
# define ATA_CH 0x044
/* ATA Select Card/Head */
# define ATA_SH 0x048
/* ATA Status-Command */
# define ATA_STS_CMD 0x04C
/* ATA Alternate Status/Device Control */
# define ATA_ASTS_DCTR 0x050
/* Extended Write Data Port 0x200-0x3FC */
# define EXT_WRITE_PORT 0x200
/* Extended Read Data Port 0x400-0x5FC */
# define EXT_READ_PORT 0x400
# define FIFO_SIZE 0x200u
/* Global Interrupt Status */
# define GIRQ_STS 0x800
/* Global Interrupt Status enable */
# define GIRQ_STS_EN 0x804
/* Global Interrupt Signal enable */
# define GIRQ_SGN_EN 0x808
# define GIRQ_CF (1)
# define GIRQ_XD (1 << 1)
/* Compact Flash Controller Dev Structure */
struct arasan_cf_dev {
/* pointer to ata_host structure */
struct ata_host * host ;
2012-07-31 01:39:35 +04:00
/* clk structure */
2011-02-22 13:16:07 +03:00
struct clk * clk ;
/* physical base address of controller */
dma_addr_t pbase ;
/* virtual base address of controller */
void __iomem * vbase ;
/* irq number*/
int irq ;
/* status to be updated to framework regarding DMA transfer */
u8 dma_status ;
/* Card is present or Not */
u8 card_present ;
/* dma specific */
/* Completion for transfer complete interrupt from controller */
struct completion cf_completion ;
/* Completion for DMA transfer complete. */
struct completion dma_completion ;
/* Dma channel allocated */
struct dma_chan * dma_chan ;
/* Mask for DMA transfers */
dma_cap_mask_t mask ;
/* DMA transfer work */
struct work_struct work ;
/* DMA delayed finish work */
struct delayed_work dwork ;
/* qc to be transferred using DMA */
struct ata_queued_cmd * qc ;
} ;
static struct scsi_host_template arasan_cf_sht = {
ATA_BASE_SHT ( DRIVER_NAME ) ,
. dma_boundary = 0xFFFFFFFFUL ,
} ;
static void cf_dumpregs ( struct arasan_cf_dev * acdev )
{
struct device * dev = acdev - > host - > dev ;
dev_dbg ( dev , " : =========== REGISTER DUMP =========== " ) ;
dev_dbg ( dev , " : CFI_STS: %x " , readl ( acdev - > vbase + CFI_STS ) ) ;
dev_dbg ( dev , " : IRQ_STS: %x " , readl ( acdev - > vbase + IRQ_STS ) ) ;
dev_dbg ( dev , " : IRQ_EN: %x " , readl ( acdev - > vbase + IRQ_EN ) ) ;
dev_dbg ( dev , " : OP_MODE: %x " , readl ( acdev - > vbase + OP_MODE ) ) ;
dev_dbg ( dev , " : CLK_CFG: %x " , readl ( acdev - > vbase + CLK_CFG ) ) ;
dev_dbg ( dev , " : TM_CFG: %x " , readl ( acdev - > vbase + TM_CFG ) ) ;
dev_dbg ( dev , " : XFER_CTR: %x " , readl ( acdev - > vbase + XFER_CTR ) ) ;
dev_dbg ( dev , " : GIRQ_STS: %x " , readl ( acdev - > vbase + GIRQ_STS ) ) ;
dev_dbg ( dev , " : GIRQ_STS_EN: %x " , readl ( acdev - > vbase + GIRQ_STS_EN ) ) ;
dev_dbg ( dev , " : GIRQ_SGN_EN: %x " , readl ( acdev - > vbase + GIRQ_SGN_EN ) ) ;
dev_dbg ( dev , " : ===================================== " ) ;
}
/* Enable/Disable global interrupts shared between CF and XD ctrlr. */
static void cf_ginterrupt_enable ( struct arasan_cf_dev * acdev , bool enable )
{
/* enable should be 0 or 1 */
writel ( enable , acdev - > vbase + GIRQ_STS_EN ) ;
writel ( enable , acdev - > vbase + GIRQ_SGN_EN ) ;
}
/* Enable/Disable CF interrupts */
static inline void
cf_interrupt_enable ( struct arasan_cf_dev * acdev , u32 mask , bool enable )
{
u32 val = readl ( acdev - > vbase + IRQ_EN ) ;
/* clear & enable/disable irqs */
if ( enable ) {
writel ( mask , acdev - > vbase + IRQ_STS ) ;
writel ( val | mask , acdev - > vbase + IRQ_EN ) ;
} else
writel ( val & ~ mask , acdev - > vbase + IRQ_EN ) ;
}
static inline void cf_card_reset ( struct arasan_cf_dev * acdev )
{
u32 val = readl ( acdev - > vbase + OP_MODE ) ;
writel ( val | CARD_RESET , acdev - > vbase + OP_MODE ) ;
udelay ( 200 ) ;
writel ( val & ~ CARD_RESET , acdev - > vbase + OP_MODE ) ;
}
static inline void cf_ctrl_reset ( struct arasan_cf_dev * acdev )
{
writel ( readl ( acdev - > vbase + OP_MODE ) & ~ CFHOST_ENB ,
acdev - > vbase + OP_MODE ) ;
writel ( readl ( acdev - > vbase + OP_MODE ) | CFHOST_ENB ,
acdev - > vbase + OP_MODE ) ;
}
static void cf_card_detect ( struct arasan_cf_dev * acdev , bool hotplugged )
{
struct ata_port * ap = acdev - > host - > ports [ 0 ] ;
struct ata_eh_info * ehi = & ap - > link . eh_info ;
u32 val = readl ( acdev - > vbase + CFI_STS ) ;
/* Both CD1 & CD2 should be low if card inserted completely */
if ( ! ( val & ( CARD_DETECT1 | CARD_DETECT2 ) ) ) {
if ( acdev - > card_present )
return ;
acdev - > card_present = 1 ;
cf_card_reset ( acdev ) ;
} else {
if ( ! acdev - > card_present )
return ;
acdev - > card_present = 0 ;
}
if ( hotplugged ) {
ata_ehi_hotplugged ( ehi ) ;
ata_port_freeze ( ap ) ;
}
}
static int cf_init ( struct arasan_cf_dev * acdev )
{
struct arasan_cf_pdata * pdata = dev_get_platdata ( acdev - > host - > dev ) ;
2013-01-28 21:42:24 +04:00
unsigned int if_clk ;
2011-02-22 13:16:07 +03:00
unsigned long flags ;
int ret = 0 ;
2012-08-27 09:07:17 +04:00
ret = clk_prepare_enable ( acdev - > clk ) ;
2011-02-22 13:16:07 +03:00
if ( ret ) {
dev_dbg ( acdev - > host - > dev , " clock enable failed " ) ;
return ret ;
}
2012-11-08 19:09:54 +04:00
ret = clk_set_rate ( acdev - > clk , 166000000 ) ;
if ( ret ) {
dev_warn ( acdev - > host - > dev , " clock set rate failed " ) ;
2013-11-21 07:07:09 +04:00
clk_disable_unprepare ( acdev - > clk ) ;
2012-11-08 19:09:54 +04:00
return ret ;
}
2011-02-22 13:16:07 +03:00
spin_lock_irqsave ( & acdev - > host - > lock , flags ) ;
/* configure CF interface clock */
2013-01-28 21:42:24 +04:00
/* TODO: read from device tree */
if_clk = CF_IF_CLK_166M ;
if ( pdata & & pdata - > cf_if_clk < = CF_IF_CLK_200M )
if_clk = pdata - > cf_if_clk ;
writel ( if_clk , acdev - > vbase + CLK_CFG ) ;
2011-02-22 13:16:07 +03:00
writel ( TRUE_IDE_MODE | CFHOST_ENB , acdev - > vbase + OP_MODE ) ;
cf_interrupt_enable ( acdev , CARD_DETECT_IRQ , 1 ) ;
cf_ginterrupt_enable ( acdev , 1 ) ;
spin_unlock_irqrestore ( & acdev - > host - > lock , flags ) ;
return ret ;
}
static void cf_exit ( struct arasan_cf_dev * acdev )
{
unsigned long flags ;
spin_lock_irqsave ( & acdev - > host - > lock , flags ) ;
cf_ginterrupt_enable ( acdev , 0 ) ;
cf_interrupt_enable ( acdev , TRUE_IDE_IRQS , 0 ) ;
cf_card_reset ( acdev ) ;
writel ( readl ( acdev - > vbase + OP_MODE ) & ~ CFHOST_ENB ,
acdev - > vbase + OP_MODE ) ;
spin_unlock_irqrestore ( & acdev - > host - > lock , flags ) ;
2012-08-27 09:07:17 +04:00
clk_disable_unprepare ( acdev - > clk ) ;
2011-02-22 13:16:07 +03:00
}
static void dma_callback ( void * dev )
{
2014-03-26 20:34:49 +04:00
struct arasan_cf_dev * acdev = dev ;
2011-02-22 13:16:07 +03:00
complete ( & acdev - > dma_completion ) ;
}
static inline void dma_complete ( struct arasan_cf_dev * acdev )
{
struct ata_queued_cmd * qc = acdev - > qc ;
unsigned long flags ;
acdev - > qc = NULL ;
ata_sff_interrupt ( acdev - > irq , acdev - > host ) ;
spin_lock_irqsave ( & acdev - > host - > lock , flags ) ;
if ( unlikely ( qc - > err_mask ) & & ata_is_dma ( qc - > tf . protocol ) )
ata_ehi_push_desc ( & qc - > ap - > link . eh_info , " DMA Failed: Timeout " ) ;
spin_unlock_irqrestore ( & acdev - > host - > lock , flags ) ;
}
static inline int wait4buf ( struct arasan_cf_dev * acdev )
{
if ( ! wait_for_completion_timeout ( & acdev - > cf_completion , TIMEOUT ) ) {
u32 rw = acdev - > qc - > tf . flags & ATA_TFLAG_WRITE ;
dev_err ( acdev - > host - > dev , " %s TimeOut " , rw ? " write " : " read " ) ;
return - ETIMEDOUT ;
}
2011-03-31 05:57:33 +04:00
/* Check if PIO Error interrupt has occurred */
2011-02-22 13:16:07 +03:00
if ( acdev - > dma_status & ATA_DMA_ERR )
return - EAGAIN ;
return 0 ;
}
static int
dma_xfer ( struct arasan_cf_dev * acdev , dma_addr_t src , dma_addr_t dest , u32 len )
{
struct dma_async_tx_descriptor * tx ;
struct dma_chan * chan = acdev - > dma_chan ;
dma_cookie_t cookie ;
2013-10-18 21:35:33 +04:00
unsigned long flags = DMA_PREP_INTERRUPT ;
2011-02-22 13:16:07 +03:00
int ret = 0 ;
tx = chan - > device - > device_prep_dma_memcpy ( chan , dest , src , len , flags ) ;
if ( ! tx ) {
dev_err ( acdev - > host - > dev , " device_prep_dma_memcpy failed \n " ) ;
return - EAGAIN ;
}
tx - > callback = dma_callback ;
tx - > callback_param = acdev ;
cookie = tx - > tx_submit ( tx ) ;
ret = dma_submit_error ( cookie ) ;
if ( ret ) {
dev_err ( acdev - > host - > dev , " dma_submit_error \n " ) ;
return ret ;
}
chan - > device - > device_issue_pending ( chan ) ;
/* Wait for DMA to complete */
if ( ! wait_for_completion_timeout ( & acdev - > dma_completion , TIMEOUT ) ) {
2014-10-11 19:40:29 +04:00
dmaengine_terminate_all ( chan ) ;
2011-02-22 13:16:07 +03:00
dev_err ( acdev - > host - > dev , " wait_for_completion_timeout \n " ) ;
return - ETIMEDOUT ;
}
return ret ;
}
static int sg_xfer ( struct arasan_cf_dev * acdev , struct scatterlist * sg )
{
dma_addr_t dest = 0 , src = 0 ;
u32 xfer_cnt , sglen , dma_len , xfer_ctr ;
u32 write = acdev - > qc - > tf . flags & ATA_TFLAG_WRITE ;
unsigned long flags ;
int ret = 0 ;
sglen = sg_dma_len ( sg ) ;
if ( write ) {
src = sg_dma_address ( sg ) ;
dest = acdev - > pbase + EXT_WRITE_PORT ;
} else {
dest = sg_dma_address ( sg ) ;
src = acdev - > pbase + EXT_READ_PORT ;
}
/*
* For each sg :
* MAX_XFER_COUNT data will be transferred before we get transfer
2011-03-31 05:57:33 +04:00
* complete interrupt . Between after FIFO_SIZE data
2011-02-22 13:16:07 +03:00
* buffer available interrupt will be generated . At this time we will
* fill FIFO again : max FIFO_SIZE data .
*/
while ( sglen ) {
xfer_cnt = min ( sglen , MAX_XFER_COUNT ) ;
spin_lock_irqsave ( & acdev - > host - > lock , flags ) ;
xfer_ctr = readl ( acdev - > vbase + XFER_CTR ) &
~ XFER_COUNT_MASK ;
writel ( xfer_ctr | xfer_cnt | XFER_START ,
acdev - > vbase + XFER_CTR ) ;
spin_unlock_irqrestore ( & acdev - > host - > lock , flags ) ;
2011-03-31 05:57:33 +04:00
/* continue dma xfers until current sg is completed */
2011-02-22 13:16:07 +03:00
while ( xfer_cnt ) {
/* wait for read to complete */
if ( ! write ) {
ret = wait4buf ( acdev ) ;
if ( ret )
goto fail ;
}
/* read/write FIFO in chunk of FIFO_SIZE */
dma_len = min ( xfer_cnt , FIFO_SIZE ) ;
ret = dma_xfer ( acdev , src , dest , dma_len ) ;
if ( ret ) {
dev_err ( acdev - > host - > dev , " dma failed " ) ;
goto fail ;
}
if ( write )
src + = dma_len ;
else
dest + = dma_len ;
sglen - = dma_len ;
xfer_cnt - = dma_len ;
/* wait for write to complete */
if ( write ) {
ret = wait4buf ( acdev ) ;
if ( ret )
goto fail ;
}
}
}
fail :
spin_lock_irqsave ( & acdev - > host - > lock , flags ) ;
writel ( readl ( acdev - > vbase + XFER_CTR ) & ~ XFER_START ,
acdev - > vbase + XFER_CTR ) ;
spin_unlock_irqrestore ( & acdev - > host - > lock , flags ) ;
return ret ;
}
/*
* This routine uses External DMA controller to read / write data to FIFO of CF
* controller . There are two xfer related interrupt supported by CF controller :
* - buf_avail : This interrupt is generated as soon as we have buffer of 512
* bytes available for reading or empty buffer available for writing .
* - xfer_done : This interrupt is generated on transfer of " xfer_size " amount of
* data to / from FIFO . xfer_size is programmed in XFER_CTR register .
*
* Max buffer size = FIFO_SIZE = 512 Bytes .
* Max xfer_size = MAX_XFER_COUNT = 256 KB .
*/
static void data_xfer ( struct work_struct * work )
{
struct arasan_cf_dev * acdev = container_of ( work , struct arasan_cf_dev ,
work ) ;
struct ata_queued_cmd * qc = acdev - > qc ;
struct scatterlist * sg ;
unsigned long flags ;
u32 temp ;
int ret = 0 ;
/* request dma channels */
/* dma_request_channel may sleep, so calling from process context */
2020-01-13 17:27:47 +03:00
acdev - > dma_chan = dma_request_chan ( acdev - > host - > dev , " data " ) ;
if ( IS_ERR ( acdev - > dma_chan ) ) {
2011-02-22 13:16:07 +03:00
dev_err ( acdev - > host - > dev , " Unable to get dma_chan \n " ) ;
2020-01-13 17:27:47 +03:00
acdev - > dma_chan = NULL ;
2011-02-22 13:16:07 +03:00
goto chan_request_fail ;
}
for_each_sg ( qc - > sg , sg , qc - > n_elem , temp ) {
ret = sg_xfer ( acdev , sg ) ;
if ( ret )
break ;
}
dma_release_channel ( acdev - > dma_chan ) ;
2020-01-13 17:27:47 +03:00
acdev - > dma_chan = NULL ;
2011-02-22 13:16:07 +03:00
/* data xferred successfully */
if ( ! ret ) {
u32 status ;
spin_lock_irqsave ( & acdev - > host - > lock , flags ) ;
status = ioread8 ( qc - > ap - > ioaddr . altstatus_addr ) ;
spin_unlock_irqrestore ( & acdev - > host - > lock , flags ) ;
if ( status & ( ATA_BUSY | ATA_DRQ ) ) {
ata_sff_queue_delayed_work ( & acdev - > dwork , 1 ) ;
return ;
}
goto sff_intr ;
}
cf_dumpregs ( acdev ) ;
chan_request_fail :
spin_lock_irqsave ( & acdev - > host - > lock , flags ) ;
2011-03-31 05:57:33 +04:00
/* error when transferring data to/from memory */
2011-02-22 13:16:07 +03:00
qc - > err_mask | = AC_ERR_HOST_BUS ;
qc - > ap - > hsm_task_state = HSM_ST_ERR ;
cf_ctrl_reset ( acdev ) ;
2016-06-27 10:51:42 +03:00
spin_unlock_irqrestore ( & acdev - > host - > lock , flags ) ;
2011-02-22 13:16:07 +03:00
sff_intr :
dma_complete ( acdev ) ;
}
static void delayed_finish ( struct work_struct * work )
{
struct arasan_cf_dev * acdev = container_of ( work , struct arasan_cf_dev ,
dwork . work ) ;
struct ata_queued_cmd * qc = acdev - > qc ;
unsigned long flags ;
u8 status ;
spin_lock_irqsave ( & acdev - > host - > lock , flags ) ;
status = ioread8 ( qc - > ap - > ioaddr . altstatus_addr ) ;
spin_unlock_irqrestore ( & acdev - > host - > lock , flags ) ;
if ( status & ( ATA_BUSY | ATA_DRQ ) )
ata_sff_queue_delayed_work ( & acdev - > dwork , 1 ) ;
else
dma_complete ( acdev ) ;
}
static irqreturn_t arasan_cf_interrupt ( int irq , void * dev )
{
struct arasan_cf_dev * acdev = ( ( struct ata_host * ) dev ) - > private_data ;
unsigned long flags ;
u32 irqsts ;
irqsts = readl ( acdev - > vbase + GIRQ_STS ) ;
if ( ! ( irqsts & GIRQ_CF ) )
return IRQ_NONE ;
spin_lock_irqsave ( & acdev - > host - > lock , flags ) ;
irqsts = readl ( acdev - > vbase + IRQ_STS ) ;
writel ( irqsts , acdev - > vbase + IRQ_STS ) ; /* clear irqs */
writel ( GIRQ_CF , acdev - > vbase + GIRQ_STS ) ; /* clear girqs */
/* handle only relevant interrupts */
irqsts & = ~ IGNORED_IRQS ;
if ( irqsts & CARD_DETECT_IRQ ) {
cf_card_detect ( acdev , 1 ) ;
spin_unlock_irqrestore ( & acdev - > host - > lock , flags ) ;
return IRQ_HANDLED ;
}
if ( irqsts & PIO_XFER_ERR_IRQ ) {
acdev - > dma_status = ATA_DMA_ERR ;
writel ( readl ( acdev - > vbase + XFER_CTR ) & ~ XFER_START ,
acdev - > vbase + XFER_CTR ) ;
spin_unlock_irqrestore ( & acdev - > host - > lock , flags ) ;
complete ( & acdev - > cf_completion ) ;
dev_err ( acdev - > host - > dev , " pio xfer err irq \n " ) ;
return IRQ_HANDLED ;
}
spin_unlock_irqrestore ( & acdev - > host - > lock , flags ) ;
if ( irqsts & BUF_AVAIL_IRQ ) {
complete ( & acdev - > cf_completion ) ;
return IRQ_HANDLED ;
}
if ( irqsts & XFER_DONE_IRQ ) {
struct ata_queued_cmd * qc = acdev - > qc ;
/* Send Complete only for write */
if ( qc - > tf . flags & ATA_TFLAG_WRITE )
complete ( & acdev - > cf_completion ) ;
}
return IRQ_HANDLED ;
}
static void arasan_cf_freeze ( struct ata_port * ap )
{
struct arasan_cf_dev * acdev = ap - > host - > private_data ;
/* stop transfer and reset controller */
writel ( readl ( acdev - > vbase + XFER_CTR ) & ~ XFER_START ,
acdev - > vbase + XFER_CTR ) ;
cf_ctrl_reset ( acdev ) ;
acdev - > dma_status = ATA_DMA_ERR ;
ata_sff_dma_pause ( ap ) ;
ata_sff_freeze ( ap ) ;
}
2013-08-09 09:35:00 +04:00
static void arasan_cf_error_handler ( struct ata_port * ap )
2011-02-22 13:16:07 +03:00
{
struct arasan_cf_dev * acdev = ap - > host - > private_data ;
/*
* DMA transfers using an external DMA controller may be scheduled .
* Abort them before handling error . Refer data_xfer ( ) for further
* details .
*/
cancel_work_sync ( & acdev - > work ) ;
cancel_delayed_work_sync ( & acdev - > dwork ) ;
return ata_sff_error_handler ( ap ) ;
}
static void arasan_cf_dma_start ( struct arasan_cf_dev * acdev )
{
2012-11-01 19:28:36 +04:00
struct ata_queued_cmd * qc = acdev - > qc ;
struct ata_port * ap = qc - > ap ;
struct ata_taskfile * tf = & qc - > tf ;
2011-02-22 13:16:07 +03:00
u32 xfer_ctr = readl ( acdev - > vbase + XFER_CTR ) & ~ XFER_DIR_MASK ;
2012-11-01 19:28:36 +04:00
u32 write = tf - > flags & ATA_TFLAG_WRITE ;
2011-02-22 13:16:07 +03:00
xfer_ctr | = write ? XFER_WRITE : XFER_READ ;
writel ( xfer_ctr , acdev - > vbase + XFER_CTR ) ;
2012-11-01 19:28:36 +04:00
ap - > ops - > sff_exec_command ( ap , tf ) ;
2011-02-22 13:16:07 +03:00
ata_sff_queue_work ( & acdev - > work ) ;
}
2013-08-09 09:35:00 +04:00
static unsigned int arasan_cf_qc_issue ( struct ata_queued_cmd * qc )
2011-02-22 13:16:07 +03:00
{
struct ata_port * ap = qc - > ap ;
struct arasan_cf_dev * acdev = ap - > host - > private_data ;
/* defer PIO handling to sff_qc_issue */
if ( ! ata_is_dma ( qc - > tf . protocol ) )
return ata_sff_qc_issue ( qc ) ;
/* select the device */
ata_wait_idle ( ap ) ;
ata_sff_dev_select ( ap , qc - > dev - > devno ) ;
ata_wait_idle ( ap ) ;
/* start the command */
switch ( qc - > tf . protocol ) {
case ATA_PROT_DMA :
WARN_ON_ONCE ( qc - > tf . flags & ATA_TFLAG_POLLING ) ;
2021-12-21 10:20:31 +03:00
trace_ata_tf_load ( ap , & qc - > tf ) ;
2011-02-22 13:16:07 +03:00
ap - > ops - > sff_tf_load ( ap , & qc - > tf ) ;
acdev - > dma_status = 0 ;
acdev - > qc = qc ;
2021-12-21 10:20:31 +03:00
trace_ata_bmdma_start ( ap , & qc - > tf , qc - > tag ) ;
2011-02-22 13:16:07 +03:00
arasan_cf_dma_start ( acdev ) ;
ap - > hsm_task_state = HSM_ST_LAST ;
break ;
default :
WARN_ON ( 1 ) ;
return AC_ERR_SYSTEM ;
}
return 0 ;
}
static void arasan_cf_set_piomode ( struct ata_port * ap , struct ata_device * adev )
{
struct arasan_cf_dev * acdev = ap - > host - > private_data ;
u8 pio = adev - > pio_mode - XFER_PIO_0 ;
unsigned long flags ;
u32 val ;
/* Arasan ctrl supports Mode0 -> Mode6 */
if ( pio > 6 ) {
dev_err ( ap - > dev , " Unknown PIO mode \n " ) ;
return ;
}
spin_lock_irqsave ( & acdev - > host - > lock , flags ) ;
val = readl ( acdev - > vbase + OP_MODE ) &
~ ( ULTRA_DMA_ENB | MULTI_WORD_DMA_ENB | DRQ_BLOCK_SIZE_MASK ) ;
writel ( val , acdev - > vbase + OP_MODE ) ;
val = readl ( acdev - > vbase + TM_CFG ) & ~ TRUEIDE_PIO_TIMING_MASK ;
val | = pio < < TRUEIDE_PIO_TIMING_SHIFT ;
writel ( val , acdev - > vbase + TM_CFG ) ;
cf_interrupt_enable ( acdev , BUF_AVAIL_IRQ | XFER_DONE_IRQ , 0 ) ;
cf_interrupt_enable ( acdev , PIO_XFER_ERR_IRQ , 1 ) ;
spin_unlock_irqrestore ( & acdev - > host - > lock , flags ) ;
}
static void arasan_cf_set_dmamode ( struct ata_port * ap , struct ata_device * adev )
{
struct arasan_cf_dev * acdev = ap - > host - > private_data ;
u32 opmode , tmcfg , dma_mode = adev - > dma_mode ;
unsigned long flags ;
spin_lock_irqsave ( & acdev - > host - > lock , flags ) ;
opmode = readl ( acdev - > vbase + OP_MODE ) &
~ ( MULTI_WORD_DMA_ENB | ULTRA_DMA_ENB ) ;
tmcfg = readl ( acdev - > vbase + TM_CFG ) ;
if ( ( dma_mode > = XFER_UDMA_0 ) & & ( dma_mode < = XFER_UDMA_6 ) ) {
opmode | = ULTRA_DMA_ENB ;
tmcfg & = ~ ULTRA_DMA_TIMING_MASK ;
tmcfg | = ( dma_mode - XFER_UDMA_0 ) < < ULTRA_DMA_TIMING_SHIFT ;
} else if ( ( dma_mode > = XFER_MW_DMA_0 ) & & ( dma_mode < = XFER_MW_DMA_4 ) ) {
opmode | = MULTI_WORD_DMA_ENB ;
tmcfg & = ~ TRUEIDE_MWORD_DMA_TIMING_MASK ;
tmcfg | = ( dma_mode - XFER_MW_DMA_0 ) < <
TRUEIDE_MWORD_DMA_TIMING_SHIFT ;
} else {
dev_err ( ap - > dev , " Unknown DMA mode \n " ) ;
spin_unlock_irqrestore ( & acdev - > host - > lock , flags ) ;
return ;
}
writel ( opmode , acdev - > vbase + OP_MODE ) ;
writel ( tmcfg , acdev - > vbase + TM_CFG ) ;
writel ( DMA_XFER_MODE , acdev - > vbase + XFER_CTR ) ;
cf_interrupt_enable ( acdev , PIO_XFER_ERR_IRQ , 0 ) ;
cf_interrupt_enable ( acdev , BUF_AVAIL_IRQ | XFER_DONE_IRQ , 1 ) ;
spin_unlock_irqrestore ( & acdev - > host - > lock , flags ) ;
}
static struct ata_port_operations arasan_cf_ops = {
. inherits = & ata_sff_port_ops ,
. freeze = arasan_cf_freeze ,
. error_handler = arasan_cf_error_handler ,
. qc_issue = arasan_cf_qc_issue ,
. set_piomode = arasan_cf_set_piomode ,
. set_dmamode = arasan_cf_set_dmamode ,
} ;
2012-12-22 01:19:58 +04:00
static int arasan_cf_probe ( struct platform_device * pdev )
2011-02-22 13:16:07 +03:00
{
struct arasan_cf_dev * acdev ;
struct arasan_cf_pdata * pdata = dev_get_platdata ( & pdev - > dev ) ;
struct ata_host * host ;
struct ata_port * ap ;
struct resource * res ;
2013-01-28 21:42:24 +04:00
u32 quirk ;
2011-02-22 13:16:07 +03:00
irq_handler_t irq_handler = NULL ;
2018-02-16 18:28:23 +03:00
int ret ;
2011-02-22 13:16:07 +03:00
res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
if ( ! res )
return - EINVAL ;
if ( ! devm_request_mem_region ( & pdev - > dev , res - > start , resource_size ( res ) ,
DRIVER_NAME ) ) {
dev_warn ( & pdev - > dev , " Failed to get memory region resource \n " ) ;
return - ENOENT ;
}
acdev = devm_kzalloc ( & pdev - > dev , sizeof ( * acdev ) , GFP_KERNEL ) ;
2018-02-16 18:01:12 +03:00
if ( ! acdev )
2011-02-22 13:16:07 +03:00
return - ENOMEM ;
2013-01-28 21:42:24 +04:00
if ( pdata )
quirk = pdata - > quirk ;
else
quirk = CF_BROKEN_UDMA ; /* as it is on spear1340 */
2021-03-25 23:50:24 +03:00
/*
* If there ' s an error getting IRQ ( or we do get IRQ0 ) ,
* support only PIO
*/
ret = platform_get_irq ( pdev , 0 ) ;
if ( ret > 0 ) {
acdev - > irq = ret ;
2011-02-22 13:16:07 +03:00
irq_handler = arasan_cf_interrupt ;
2021-03-25 23:50:24 +03:00
} else if ( ret = = - EPROBE_DEFER ) {
return ret ;
} else {
2013-01-28 21:42:24 +04:00
quirk | = CF_BROKEN_MWDMA | CF_BROKEN_UDMA ;
2021-03-25 23:50:24 +03:00
}
2011-02-22 13:16:07 +03:00
acdev - > pbase = res - > start ;
2020-01-06 11:43:50 +03:00
acdev - > vbase = devm_ioremap ( & pdev - > dev , res - > start ,
2011-02-22 13:16:07 +03:00
resource_size ( res ) ) ;
if ( ! acdev - > vbase ) {
dev_warn ( & pdev - > dev , " ioremap fail \n " ) ;
return - ENOMEM ;
}
2015-08-22 06:34:50 +03:00
acdev - > clk = devm_clk_get ( & pdev - > dev , NULL ) ;
2011-02-22 13:16:07 +03:00
if ( IS_ERR ( acdev - > clk ) ) {
dev_warn ( & pdev - > dev , " Clock not found \n " ) ;
return PTR_ERR ( acdev - > clk ) ;
}
/* allocate host */
host = ata_host_alloc ( & pdev - > dev , 1 ) ;
if ( ! host ) {
dev_warn ( & pdev - > dev , " alloc host fail \n " ) ;
2015-08-22 06:34:50 +03:00
return - ENOMEM ;
2011-02-22 13:16:07 +03:00
}
ap = host - > ports [ 0 ] ;
host - > private_data = acdev ;
acdev - > host = host ;
ap - > ops = & arasan_cf_ops ;
ap - > pio_mask = ATA_PIO6 ;
ap - > mwdma_mask = ATA_MWDMA4 ;
ap - > udma_mask = ATA_UDMA6 ;
init_completion ( & acdev - > cf_completion ) ;
init_completion ( & acdev - > dma_completion ) ;
INIT_WORK ( & acdev - > work , data_xfer ) ;
INIT_DELAYED_WORK ( & acdev - > dwork , delayed_finish ) ;
dma_cap_set ( DMA_MEMCPY , acdev - > mask ) ;
/* Handle platform specific quirks */
2013-01-28 21:42:24 +04:00
if ( quirk ) {
if ( quirk & CF_BROKEN_PIO ) {
2011-02-22 13:16:07 +03:00
ap - > ops - > set_piomode = NULL ;
ap - > pio_mask = 0 ;
}
2013-01-28 21:42:24 +04:00
if ( quirk & CF_BROKEN_MWDMA )
2011-02-22 13:16:07 +03:00
ap - > mwdma_mask = 0 ;
2013-01-28 21:42:24 +04:00
if ( quirk & CF_BROKEN_UDMA )
2011-02-22 13:16:07 +03:00
ap - > udma_mask = 0 ;
}
ap - > flags | = ATA_FLAG_PIO_POLLING | ATA_FLAG_NO_ATAPI ;
ap - > ioaddr . cmd_addr = acdev - > vbase + ATA_DATA_PORT ;
ap - > ioaddr . data_addr = acdev - > vbase + ATA_DATA_PORT ;
ap - > ioaddr . error_addr = acdev - > vbase + ATA_ERR_FTR ;
ap - > ioaddr . feature_addr = acdev - > vbase + ATA_ERR_FTR ;
ap - > ioaddr . nsect_addr = acdev - > vbase + ATA_SC ;
ap - > ioaddr . lbal_addr = acdev - > vbase + ATA_SN ;
ap - > ioaddr . lbam_addr = acdev - > vbase + ATA_CL ;
ap - > ioaddr . lbah_addr = acdev - > vbase + ATA_CH ;
ap - > ioaddr . device_addr = acdev - > vbase + ATA_SH ;
ap - > ioaddr . status_addr = acdev - > vbase + ATA_STS_CMD ;
ap - > ioaddr . command_addr = acdev - > vbase + ATA_STS_CMD ;
ap - > ioaddr . altstatus_addr = acdev - > vbase + ATA_ASTS_DCTR ;
ap - > ioaddr . ctl_addr = acdev - > vbase + ATA_ASTS_DCTR ;
2011-03-14 09:54:03 +03:00
ata_port_desc ( ap , " phy_addr %llx virt_addr %p " ,
( unsigned long long ) res - > start , acdev - > vbase ) ;
2011-02-22 13:16:07 +03:00
ret = cf_init ( acdev ) ;
if ( ret )
2015-08-22 06:34:50 +03:00
return ret ;
2011-02-22 13:16:07 +03:00
cf_card_detect ( acdev , 0 ) ;
2014-04-14 20:01:47 +04:00
ret = ata_host_activate ( host , acdev - > irq , irq_handler , 0 ,
& arasan_cf_sht ) ;
if ( ! ret )
return 0 ;
2011-02-22 13:16:07 +03:00
2014-04-14 20:01:47 +04:00
cf_exit ( acdev ) ;
2015-08-22 06:34:50 +03:00
2011-02-22 13:16:07 +03:00
return ret ;
}
2012-12-22 01:19:58 +04:00
static int arasan_cf_remove ( struct platform_device * pdev )
2011-02-22 13:16:07 +03:00
{
2013-05-23 14:41:21 +04:00
struct ata_host * host = platform_get_drvdata ( pdev ) ;
2011-02-22 13:16:07 +03:00
struct arasan_cf_dev * acdev = host - > ports [ 0 ] - > private_data ;
ata_host_detach ( host ) ;
cf_exit ( acdev ) ;
return 0 ;
}
2012-10-16 18:59:01 +04:00
# ifdef CONFIG_PM_SLEEP
2011-02-22 13:16:07 +03:00
static int arasan_cf_suspend ( struct device * dev )
{
2011-10-12 19:09:09 +04:00
struct ata_host * host = dev_get_drvdata ( dev ) ;
2011-02-22 13:16:07 +03:00
struct arasan_cf_dev * acdev = host - > ports [ 0 ] - > private_data ;
2012-02-23 13:33:53 +04:00
if ( acdev - > dma_chan )
2014-10-11 19:40:29 +04:00
dmaengine_terminate_all ( acdev - > dma_chan ) ;
2012-02-23 13:33:53 +04:00
2011-02-22 13:16:07 +03:00
cf_exit ( acdev ) ;
2022-02-02 23:58:21 +03:00
ata_host_suspend ( host , PMSG_SUSPEND ) ;
return 0 ;
2011-02-22 13:16:07 +03:00
}
static int arasan_cf_resume ( struct device * dev )
{
2011-10-12 19:09:09 +04:00
struct ata_host * host = dev_get_drvdata ( dev ) ;
2011-02-22 13:16:07 +03:00
struct arasan_cf_dev * acdev = host - > ports [ 0 ] - > private_data ;
cf_init ( acdev ) ;
ata_host_resume ( host ) ;
return 0 ;
}
2012-04-21 16:10:09 +04:00
# endif
2011-02-22 13:16:07 +03:00
2012-02-23 13:33:53 +04:00
static SIMPLE_DEV_PM_OPS ( arasan_cf_pm_ops , arasan_cf_suspend , arasan_cf_resume ) ;
2011-02-22 13:16:07 +03:00
2012-08-27 09:07:18 +04:00
# ifdef CONFIG_OF
static const struct of_device_id arasan_cf_id_table [ ] = {
{ . compatible = " arasan,cf-spear1340 " } ,
{ }
} ;
MODULE_DEVICE_TABLE ( of , arasan_cf_id_table ) ;
# endif
2011-02-22 13:16:07 +03:00
static struct platform_driver arasan_cf_driver = {
. probe = arasan_cf_probe ,
2012-12-22 01:19:58 +04:00
. remove = arasan_cf_remove ,
2011-02-22 13:16:07 +03:00
. driver = {
. name = DRIVER_NAME ,
2012-02-23 13:33:53 +04:00
. pm = & arasan_cf_pm_ops ,
2012-08-27 09:07:18 +04:00
. of_match_table = of_match_ptr ( arasan_cf_id_table ) ,
2011-02-22 13:16:07 +03:00
} ,
} ;
2011-11-27 10:44:26 +04:00
module_platform_driver ( arasan_cf_driver ) ;
2011-02-22 13:16:07 +03:00
2015-07-18 02:23:50 +03:00
MODULE_AUTHOR ( " Viresh Kumar <vireshk@kernel.org> " ) ;
2011-02-22 13:16:07 +03:00
MODULE_DESCRIPTION ( " Arasan ATA Compact Flash driver " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_ALIAS ( " platform: " DRIVER_NAME ) ;