2008-03-01 17:42:48 +03:00
/*
* Freescale MPC85xx , MPC83xx DMA Engine support
*
* Copyright ( C ) 2007 Freescale Semiconductor , Inc . All rights reserved .
*
* Author :
* Zhang Wei < wei . zhang @ freescale . com > , Jul 2007
* Ebony Zhu < ebony . zhu @ freescale . com > , May 2007
*
* Description :
* DMA engine driver for Freescale MPC8540 DMA controller , which is
* also fit for MPC8560 , MPC8555 , MPC8548 , MPC8641 , and etc .
* The support for MPC8349 DMA contorller is also added .
*
2009-04-24 03:17:54 +04:00
* This driver instructs the DMA controller to issue the PCI Read Multiple
* command for PCI read operations , instead of using the default PCI Read Line
* command . Please be aware that this setting may result in read pre - fetching
* on some platforms .
*
2008-03-01 17:42:48 +03:00
* This is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
*/
# include <linux/init.h>
# include <linux/module.h>
# include <linux/pci.h>
# include <linux/interrupt.h>
# include <linux/dmaengine.h>
# include <linux/delay.h>
# include <linux/dma-mapping.h>
# include <linux/dmapool.h>
# include <linux/of_platform.h>
2009-09-09 04:53:04 +04:00
# include <asm/fsldma.h>
2008-03-01 17:42:48 +03:00
# include "fsldma.h"
2010-01-06 16:34:05 +03:00
static void dma_init ( struct fsldma_chan * chan )
2008-03-01 17:42:48 +03:00
{
/* Reset the channel */
2010-01-06 16:34:05 +03:00
DMA_OUT ( chan , & chan - > regs - > mr , 0 , 32 ) ;
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:05 +03:00
switch ( chan - > feature & FSL_DMA_IP_MASK ) {
2008-03-01 17:42:48 +03:00
case FSL_DMA_IP_85XX :
/* Set the channel to below modes:
* EIE - Error interrupt enable
* EOSIE - End of segments interrupt enable ( basic mode )
* EOLNIE - End of links interrupt enable
*/
2010-01-06 16:34:05 +03:00
DMA_OUT ( chan , & chan - > regs - > mr , FSL_DMA_MR_EIE
2008-03-01 17:42:48 +03:00
| FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE , 32 ) ;
break ;
case FSL_DMA_IP_83XX :
/* Set the channel to below modes:
* EOTIE - End - of - transfer interrupt enable
2009-04-24 03:17:54 +04:00
* PRC_RM - PCI read multiple
2008-03-01 17:42:48 +03:00
*/
2010-01-06 16:34:05 +03:00
DMA_OUT ( chan , & chan - > regs - > mr , FSL_DMA_MR_EOTIE
2009-04-24 03:17:54 +04:00
| FSL_DMA_MR_PRC_RM , 32 ) ;
2008-03-01 17:42:48 +03:00
break ;
}
}
2010-01-06 16:34:05 +03:00
static void set_sr ( struct fsldma_chan * chan , u32 val )
2008-03-01 17:42:48 +03:00
{
2010-01-06 16:34:05 +03:00
DMA_OUT ( chan , & chan - > regs - > sr , val , 32 ) ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:05 +03:00
static u32 get_sr ( struct fsldma_chan * chan )
2008-03-01 17:42:48 +03:00
{
2010-01-06 16:34:05 +03:00
return DMA_IN ( chan , & chan - > regs - > sr , 32 ) ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:05 +03:00
static void set_desc_cnt ( struct fsldma_chan * chan ,
2008-03-01 17:42:48 +03:00
struct fsl_dma_ld_hw * hw , u32 count )
{
2010-01-06 16:34:05 +03:00
hw - > count = CPU_TO_DMA ( chan , count , 32 ) ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:05 +03:00
static void set_desc_src ( struct fsldma_chan * chan ,
2008-03-01 17:42:48 +03:00
struct fsl_dma_ld_hw * hw , dma_addr_t src )
{
u64 snoop_bits ;
2010-01-06 16:34:05 +03:00
snoop_bits = ( ( chan - > feature & FSL_DMA_IP_MASK ) = = FSL_DMA_IP_85XX )
2008-03-01 17:42:48 +03:00
? ( ( u64 ) FSL_DMA_SATR_SREADTYPE_SNOOP_READ < < 32 ) : 0 ;
2010-01-06 16:34:05 +03:00
hw - > src_addr = CPU_TO_DMA ( chan , snoop_bits | src , 64 ) ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:05 +03:00
static void set_desc_dst ( struct fsldma_chan * chan ,
2010-01-06 16:34:02 +03:00
struct fsl_dma_ld_hw * hw , dma_addr_t dst )
2008-03-01 17:42:48 +03:00
{
u64 snoop_bits ;
2010-01-06 16:34:05 +03:00
snoop_bits = ( ( chan - > feature & FSL_DMA_IP_MASK ) = = FSL_DMA_IP_85XX )
2008-03-01 17:42:48 +03:00
? ( ( u64 ) FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE < < 32 ) : 0 ;
2010-01-06 16:34:05 +03:00
hw - > dst_addr = CPU_TO_DMA ( chan , snoop_bits | dst , 64 ) ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:05 +03:00
static void set_desc_next ( struct fsldma_chan * chan ,
2008-03-01 17:42:48 +03:00
struct fsl_dma_ld_hw * hw , dma_addr_t next )
{
u64 snoop_bits ;
2010-01-06 16:34:05 +03:00
snoop_bits = ( ( chan - > feature & FSL_DMA_IP_MASK ) = = FSL_DMA_IP_83XX )
2008-03-01 17:42:48 +03:00
? FSL_DMA_SNEN : 0 ;
2010-01-06 16:34:05 +03:00
hw - > next_ln_addr = CPU_TO_DMA ( chan , snoop_bits | next , 64 ) ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:05 +03:00
static void set_cdar ( struct fsldma_chan * chan , dma_addr_t addr )
2008-03-01 17:42:48 +03:00
{
2010-01-06 16:34:05 +03:00
DMA_OUT ( chan , & chan - > regs - > cdar , addr | FSL_DMA_SNEN , 64 ) ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:05 +03:00
static dma_addr_t get_cdar ( struct fsldma_chan * chan )
2008-03-01 17:42:48 +03:00
{
2010-01-06 16:34:05 +03:00
return DMA_IN ( chan , & chan - > regs - > cdar , 64 ) & ~ FSL_DMA_SNEN ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:05 +03:00
static dma_addr_t get_ndar ( struct fsldma_chan * chan )
2008-03-01 17:42:48 +03:00
{
2010-01-06 16:34:05 +03:00
return DMA_IN ( chan , & chan - > regs - > ndar , 64 ) ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:05 +03:00
static u32 get_bcr ( struct fsldma_chan * chan )
2008-03-19 04:45:00 +03:00
{
2010-01-06 16:34:05 +03:00
return DMA_IN ( chan , & chan - > regs - > bcr , 32 ) ;
2008-03-19 04:45:00 +03:00
}
2010-01-06 16:34:05 +03:00
static int dma_is_idle ( struct fsldma_chan * chan )
2008-03-01 17:42:48 +03:00
{
2010-01-06 16:34:05 +03:00
u32 sr = get_sr ( chan ) ;
2008-03-01 17:42:48 +03:00
return ( ! ( sr & FSL_DMA_SR_CB ) ) | | ( sr & FSL_DMA_SR_CH ) ;
}
2010-01-06 16:34:05 +03:00
static void dma_start ( struct fsldma_chan * chan )
2008-03-01 17:42:48 +03:00
{
2010-01-06 16:33:59 +03:00
u32 mode ;
2010-01-06 16:34:05 +03:00
mode = DMA_IN ( chan , & chan - > regs - > mr , 32 ) ;
2010-01-06 16:33:59 +03:00
2010-01-06 16:34:05 +03:00
if ( ( chan - > feature & FSL_DMA_IP_MASK ) = = FSL_DMA_IP_85XX ) {
if ( chan - > feature & FSL_DMA_CHAN_PAUSE_EXT ) {
DMA_OUT ( chan , & chan - > regs - > bcr , 0 , 32 ) ;
2010-01-06 16:33:59 +03:00
mode | = FSL_DMA_MR_EMP_EN ;
} else {
mode & = ~ FSL_DMA_MR_EMP_EN ;
}
2009-05-28 13:26:40 +04:00
}
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:05 +03:00
if ( chan - > feature & FSL_DMA_CHAN_START_EXT )
2010-01-06 16:33:59 +03:00
mode | = FSL_DMA_MR_EMS_EN ;
2008-03-01 17:42:48 +03:00
else
2010-01-06 16:33:59 +03:00
mode | = FSL_DMA_MR_CS ;
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:05 +03:00
DMA_OUT ( chan , & chan - > regs - > mr , mode , 32 ) ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:05 +03:00
static void dma_halt ( struct fsldma_chan * chan )
2008-03-01 17:42:48 +03:00
{
2010-01-06 16:33:59 +03:00
u32 mode ;
2009-03-03 01:33:46 +03:00
int i ;
2010-01-06 16:34:05 +03:00
mode = DMA_IN ( chan , & chan - > regs - > mr , 32 ) ;
2010-01-06 16:33:59 +03:00
mode | = FSL_DMA_MR_CA ;
2010-01-06 16:34:05 +03:00
DMA_OUT ( chan , & chan - > regs - > mr , mode , 32 ) ;
2010-01-06 16:33:59 +03:00
mode & = ~ ( FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA ) ;
2010-01-06 16:34:05 +03:00
DMA_OUT ( chan , & chan - > regs - > mr , mode , 32 ) ;
2008-03-01 17:42:48 +03:00
2009-03-03 01:33:46 +03:00
for ( i = 0 ; i < 100 ; i + + ) {
2010-01-06 16:34:05 +03:00
if ( dma_is_idle ( chan ) )
2010-01-06 16:34:06 +03:00
return ;
2008-03-01 17:42:48 +03:00
udelay ( 10 ) ;
2009-03-03 01:33:46 +03:00
}
2010-01-06 16:33:59 +03:00
2010-01-06 16:34:06 +03:00
if ( ! dma_is_idle ( chan ) )
2010-01-06 16:34:05 +03:00
dev_err ( chan - > dev , " DMA halt timeout! \n " ) ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:05 +03:00
static void set_ld_eol ( struct fsldma_chan * chan ,
2008-03-01 17:42:48 +03:00
struct fsl_desc_sw * desc )
{
2009-05-15 22:33:20 +04:00
u64 snoop_bits ;
2010-01-06 16:34:05 +03:00
snoop_bits = ( ( chan - > feature & FSL_DMA_IP_MASK ) = = FSL_DMA_IP_83XX )
2009-05-15 22:33:20 +04:00
? FSL_DMA_SNEN : 0 ;
2010-01-06 16:34:05 +03:00
desc - > hw . next_ln_addr = CPU_TO_DMA ( chan ,
DMA_TO_CPU ( chan , desc - > hw . next_ln_addr , 64 ) | FSL_DMA_EOL
2009-05-15 22:33:20 +04:00
| snoop_bits , 64 ) ;
2008-03-01 17:42:48 +03:00
}
/**
* fsl_chan_set_src_loop_size - Set source address hold transfer size
2010-01-06 16:34:05 +03:00
* @ chan : Freescale DMA channel
2008-03-01 17:42:48 +03:00
* @ size : Address loop size , 0 for disable loop
*
* The set source address hold transfer size . The source
* address hold or loop transfer size is when the DMA transfer
* data from source address ( SA ) , if the loop size is 4 , the DMA will
* read data from SA , SA + 1 , SA + 2 , SA + 3 , then loop back to SA ,
* SA + 1 . . . and so on .
*/
2010-01-06 16:34:05 +03:00
static void fsl_chan_set_src_loop_size ( struct fsldma_chan * chan , int size )
2008-03-01 17:42:48 +03:00
{
2010-01-06 16:33:59 +03:00
u32 mode ;
2010-01-06 16:34:05 +03:00
mode = DMA_IN ( chan , & chan - > regs - > mr , 32 ) ;
2010-01-06 16:33:59 +03:00
2008-03-01 17:42:48 +03:00
switch ( size ) {
case 0 :
2010-01-06 16:33:59 +03:00
mode & = ~ FSL_DMA_MR_SAHE ;
2008-03-01 17:42:48 +03:00
break ;
case 1 :
case 2 :
case 4 :
case 8 :
2010-01-06 16:33:59 +03:00
mode | = FSL_DMA_MR_SAHE | ( __ilog2 ( size ) < < 14 ) ;
2008-03-01 17:42:48 +03:00
break ;
}
2010-01-06 16:33:59 +03:00
2010-01-06 16:34:05 +03:00
DMA_OUT ( chan , & chan - > regs - > mr , mode , 32 ) ;
2008-03-01 17:42:48 +03:00
}
/**
2010-01-06 16:34:02 +03:00
* fsl_chan_set_dst_loop_size - Set destination address hold transfer size
2010-01-06 16:34:05 +03:00
* @ chan : Freescale DMA channel
2008-03-01 17:42:48 +03:00
* @ size : Address loop size , 0 for disable loop
*
* The set destination address hold transfer size . The destination
* address hold or loop transfer size is when the DMA transfer
* data to destination address ( TA ) , if the loop size is 4 , the DMA will
* write data to TA , TA + 1 , TA + 2 , TA + 3 , then loop back to TA ,
* TA + 1 . . . and so on .
*/
2010-01-06 16:34:05 +03:00
static void fsl_chan_set_dst_loop_size ( struct fsldma_chan * chan , int size )
2008-03-01 17:42:48 +03:00
{
2010-01-06 16:33:59 +03:00
u32 mode ;
2010-01-06 16:34:05 +03:00
mode = DMA_IN ( chan , & chan - > regs - > mr , 32 ) ;
2010-01-06 16:33:59 +03:00
2008-03-01 17:42:48 +03:00
switch ( size ) {
case 0 :
2010-01-06 16:33:59 +03:00
mode & = ~ FSL_DMA_MR_DAHE ;
2008-03-01 17:42:48 +03:00
break ;
case 1 :
case 2 :
case 4 :
case 8 :
2010-01-06 16:33:59 +03:00
mode | = FSL_DMA_MR_DAHE | ( __ilog2 ( size ) < < 16 ) ;
2008-03-01 17:42:48 +03:00
break ;
}
2010-01-06 16:33:59 +03:00
2010-01-06 16:34:05 +03:00
DMA_OUT ( chan , & chan - > regs - > mr , mode , 32 ) ;
2008-03-01 17:42:48 +03:00
}
/**
2009-09-09 04:53:04 +04:00
* fsl_chan_set_request_count - Set DMA Request Count for external control
2010-01-06 16:34:05 +03:00
* @ chan : Freescale DMA channel
2009-09-09 04:53:04 +04:00
* @ size : Number of bytes to transfer in a single request
*
* The Freescale DMA channel can be controlled by the external signal DREQ # .
* The DMA request count is how many bytes are allowed to transfer before
* pausing the channel , after which a new assertion of DREQ # resumes channel
* operation .
2008-03-01 17:42:48 +03:00
*
2009-09-09 04:53:04 +04:00
* A size of 0 disables external pause control . The maximum size is 1024.
2008-03-01 17:42:48 +03:00
*/
2010-01-06 16:34:05 +03:00
static void fsl_chan_set_request_count ( struct fsldma_chan * chan , int size )
2008-03-01 17:42:48 +03:00
{
2010-01-06 16:33:59 +03:00
u32 mode ;
2009-09-09 04:53:04 +04:00
BUG_ON ( size > 1024 ) ;
2010-01-06 16:33:59 +03:00
2010-01-06 16:34:05 +03:00
mode = DMA_IN ( chan , & chan - > regs - > mr , 32 ) ;
2010-01-06 16:33:59 +03:00
mode | = ( __ilog2 ( size ) < < 24 ) & 0x0f000000 ;
2010-01-06 16:34:05 +03:00
DMA_OUT ( chan , & chan - > regs - > mr , mode , 32 ) ;
2009-09-09 04:53:04 +04:00
}
2008-03-01 17:42:48 +03:00
2009-09-09 04:53:04 +04:00
/**
* fsl_chan_toggle_ext_pause - Toggle channel external pause status
2010-01-06 16:34:05 +03:00
* @ chan : Freescale DMA channel
2009-09-09 04:53:04 +04:00
* @ enable : 0 is disabled , 1 is enabled .
*
* The Freescale DMA channel can be controlled by the external signal DREQ # .
* The DMA Request Count feature should be used in addition to this feature
* to set the number of bytes to transfer before pausing the channel .
*/
2010-01-06 16:34:05 +03:00
static void fsl_chan_toggle_ext_pause ( struct fsldma_chan * chan , int enable )
2009-09-09 04:53:04 +04:00
{
if ( enable )
2010-01-06 16:34:05 +03:00
chan - > feature | = FSL_DMA_CHAN_PAUSE_EXT ;
2009-09-09 04:53:04 +04:00
else
2010-01-06 16:34:05 +03:00
chan - > feature & = ~ FSL_DMA_CHAN_PAUSE_EXT ;
2008-03-01 17:42:48 +03:00
}
/**
* fsl_chan_toggle_ext_start - Toggle channel external start status
2010-01-06 16:34:05 +03:00
* @ chan : Freescale DMA channel
2008-03-01 17:42:48 +03:00
* @ enable : 0 is disabled , 1 is enabled .
*
* If enable the external start , the channel can be started by an
* external DMA start pin . So the dma_start ( ) does not start the
* transfer immediately . The DMA channel will wait for the
* control pin asserted .
*/
2010-01-06 16:34:05 +03:00
static void fsl_chan_toggle_ext_start ( struct fsldma_chan * chan , int enable )
2008-03-01 17:42:48 +03:00
{
if ( enable )
2010-01-06 16:34:05 +03:00
chan - > feature | = FSL_DMA_CHAN_START_EXT ;
2008-03-01 17:42:48 +03:00
else
2010-01-06 16:34:05 +03:00
chan - > feature & = ~ FSL_DMA_CHAN_START_EXT ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:06 +03:00
static void append_ld_queue ( struct fsldma_chan * chan ,
struct fsl_desc_sw * desc )
{
struct fsl_desc_sw * tail = to_fsl_desc ( chan - > ld_pending . prev ) ;
if ( list_empty ( & chan - > ld_pending ) )
goto out_splice ;
/*
* Add the hardware descriptor to the chain of hardware descriptors
* that already exists in memory .
*
* This will un - set the EOL bit of the existing transaction , and the
* last link in this transaction will become the EOL descriptor .
*/
set_desc_next ( chan , & tail - > hw , desc - > async_tx . phys ) ;
/*
* Add the software descriptor and all children to the list
* of pending transactions
*/
out_splice :
list_splice_tail_init ( & desc - > tx_list , & chan - > ld_pending ) ;
}
2008-03-01 17:42:48 +03:00
static dma_cookie_t fsl_dma_tx_submit ( struct dma_async_tx_descriptor * tx )
{
2010-01-06 16:34:05 +03:00
struct fsldma_chan * chan = to_fsl_chan ( tx - > chan ) ;
2009-09-09 04:53:02 +04:00
struct fsl_desc_sw * desc = tx_to_fsl_desc ( tx ) ;
struct fsl_desc_sw * child ;
2008-03-01 17:42:48 +03:00
unsigned long flags ;
dma_cookie_t cookie ;
2010-01-06 16:34:05 +03:00
spin_lock_irqsave ( & chan - > desc_lock , flags ) ;
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:06 +03:00
/*
* assign cookies to all of the software descriptors
* that make up this transaction
*/
2010-01-06 16:34:05 +03:00
cookie = chan - > common . cookie ;
2009-09-09 04:53:02 +04:00
list_for_each_entry ( child , & desc - > tx_list , node ) {
fsldma: fix infinite loop on multi-descriptor DMA chain completion
When creating a DMA transaction with multiple descriptors, the async_tx
cookie is set to 0 for each descriptor in the chain, excluding the last
descriptor, whose cookie is set to -EBUSY.
When fsl_dma_tx_submit() is run, it only assigns a cookie to the first
descriptor. All of the remaining descriptors keep their original value,
including the last descriptor, which is set to -EBUSY.
After the DMA completes, the driver will update the last completed cookie
to be -EBUSY, which is an error code instead of a valid cookie. This causes
dma_async_is_complete() to always return DMA_IN_PROGRESS.
This causes the fsldma driver to never cleanup the queue of link
descriptors, and the driver will re-run the DMA transaction on the hardware
each time it receives the End-of-Chain interrupt. This causes an infinite
loop.
With this patch, fsl_dma_tx_submit() is changed to assign a cookie to every
descriptor in the chain. The rest of the code then works without problems.
Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu>
Signed-off-by: Li Yang <leoli@freescale.com>
2009-05-16 01:27:16 +04:00
cookie + + ;
if ( cookie < 0 )
cookie = 1 ;
2010-02-25 22:39:30 +03:00
child - > async_tx . cookie = cookie ;
fsldma: fix infinite loop on multi-descriptor DMA chain completion
When creating a DMA transaction with multiple descriptors, the async_tx
cookie is set to 0 for each descriptor in the chain, excluding the last
descriptor, whose cookie is set to -EBUSY.
When fsl_dma_tx_submit() is run, it only assigns a cookie to the first
descriptor. All of the remaining descriptors keep their original value,
including the last descriptor, which is set to -EBUSY.
After the DMA completes, the driver will update the last completed cookie
to be -EBUSY, which is an error code instead of a valid cookie. This causes
dma_async_is_complete() to always return DMA_IN_PROGRESS.
This causes the fsldma driver to never cleanup the queue of link
descriptors, and the driver will re-run the DMA transaction on the hardware
each time it receives the End-of-Chain interrupt. This causes an infinite
loop.
With this patch, fsl_dma_tx_submit() is changed to assign a cookie to every
descriptor in the chain. The rest of the code then works without problems.
Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu>
Signed-off-by: Li Yang <leoli@freescale.com>
2009-05-16 01:27:16 +04:00
}
2010-01-06 16:34:05 +03:00
chan - > common . cookie = cookie ;
2010-01-06 16:34:06 +03:00
/* put this transaction onto the tail of the pending queue */
2010-01-06 16:34:05 +03:00
append_ld_queue ( chan , desc ) ;
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:05 +03:00
spin_unlock_irqrestore ( & chan - > desc_lock , flags ) ;
2008-03-01 17:42:48 +03:00
return cookie ;
}
/**
* fsl_dma_alloc_descriptor - Allocate descriptor from channel ' s DMA pool .
2010-01-06 16:34:05 +03:00
* @ chan : Freescale DMA channel
2008-03-01 17:42:48 +03:00
*
* Return - The descriptor allocated . NULL for failed .
*/
static struct fsl_desc_sw * fsl_dma_alloc_descriptor (
2010-01-06 16:34:05 +03:00
struct fsldma_chan * chan )
2008-03-01 17:42:48 +03:00
{
2010-01-06 16:34:06 +03:00
struct fsl_desc_sw * desc ;
2008-03-01 17:42:48 +03:00
dma_addr_t pdesc ;
2010-01-06 16:34:06 +03:00
desc = dma_pool_alloc ( chan - > desc_pool , GFP_ATOMIC , & pdesc ) ;
if ( ! desc ) {
dev_dbg ( chan - > dev , " out of memory for link desc \n " ) ;
return NULL ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:06 +03:00
memset ( desc , 0 , sizeof ( * desc ) ) ;
INIT_LIST_HEAD ( & desc - > tx_list ) ;
dma_async_tx_descriptor_init ( & desc - > async_tx , & chan - > common ) ;
desc - > async_tx . tx_submit = fsl_dma_tx_submit ;
desc - > async_tx . phys = pdesc ;
return desc ;
2008-03-01 17:42:48 +03:00
}
/**
* fsl_dma_alloc_chan_resources - Allocate resources for DMA channel .
2010-01-06 16:34:05 +03:00
* @ chan : Freescale DMA channel
2008-03-01 17:42:48 +03:00
*
* This function will create a dma pool for descriptor allocation .
*
* Return - The number of descriptors allocated .
*/
2010-01-06 16:34:05 +03:00
static int fsl_dma_alloc_chan_resources ( struct dma_chan * dchan )
2008-03-01 17:42:48 +03:00
{
2010-01-06 16:34:05 +03:00
struct fsldma_chan * chan = to_fsl_chan ( dchan ) ;
2008-09-27 04:00:11 +04:00
/* Has this channel already been allocated? */
2010-01-06 16:34:05 +03:00
if ( chan - > desc_pool )
2008-09-27 04:00:11 +04:00
return 1 ;
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:06 +03:00
/*
* We need the descriptor to be aligned to 32 bytes
2008-03-01 17:42:48 +03:00
* for meeting FSL DMA specification requirement .
*/
2010-01-06 16:34:05 +03:00
chan - > desc_pool = dma_pool_create ( " fsl_dma_engine_desc_pool " ,
2010-01-06 16:34:06 +03:00
chan - > dev ,
sizeof ( struct fsl_desc_sw ) ,
__alignof__ ( struct fsl_desc_sw ) , 0 ) ;
2010-01-06 16:34:05 +03:00
if ( ! chan - > desc_pool ) {
2010-01-06 16:34:06 +03:00
dev_err ( chan - > dev , " unable to allocate channel %d "
" descriptor pool \n " , chan - > id ) ;
return - ENOMEM ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:06 +03:00
/* there is at least one descriptor free to be allocated */
2008-03-01 17:42:48 +03:00
return 1 ;
}
2010-01-06 16:34:06 +03:00
/**
* fsldma_free_desc_list - Free all descriptors in a queue
* @ chan : Freescae DMA channel
* @ list : the list to free
*
* LOCKING : must hold chan - > desc_lock
*/
static void fsldma_free_desc_list ( struct fsldma_chan * chan ,
struct list_head * list )
{
struct fsl_desc_sw * desc , * _desc ;
list_for_each_entry_safe ( desc , _desc , list , node ) {
list_del ( & desc - > node ) ;
dma_pool_free ( chan - > desc_pool , desc , desc - > async_tx . phys ) ;
}
}
static void fsldma_free_desc_list_reverse ( struct fsldma_chan * chan ,
struct list_head * list )
{
struct fsl_desc_sw * desc , * _desc ;
list_for_each_entry_safe_reverse ( desc , _desc , list , node ) {
list_del ( & desc - > node ) ;
dma_pool_free ( chan - > desc_pool , desc , desc - > async_tx . phys ) ;
}
}
2008-03-01 17:42:48 +03:00
/**
* fsl_dma_free_chan_resources - Free all resources of the channel .
2010-01-06 16:34:05 +03:00
* @ chan : Freescale DMA channel
2008-03-01 17:42:48 +03:00
*/
2010-01-06 16:34:05 +03:00
static void fsl_dma_free_chan_resources ( struct dma_chan * dchan )
2008-03-01 17:42:48 +03:00
{
2010-01-06 16:34:05 +03:00
struct fsldma_chan * chan = to_fsl_chan ( dchan ) ;
2008-03-01 17:42:48 +03:00
unsigned long flags ;
2010-01-06 16:34:05 +03:00
dev_dbg ( chan - > dev , " Free all channel resources. \n " ) ;
spin_lock_irqsave ( & chan - > desc_lock , flags ) ;
2010-01-06 16:34:06 +03:00
fsldma_free_desc_list ( chan , & chan - > ld_pending ) ;
fsldma_free_desc_list ( chan , & chan - > ld_running ) ;
2010-01-06 16:34:05 +03:00
spin_unlock_irqrestore ( & chan - > desc_lock , flags ) ;
2008-09-27 04:00:11 +04:00
2010-01-06 16:34:06 +03:00
dma_pool_destroy ( chan - > desc_pool ) ;
2010-01-06 16:34:05 +03:00
chan - > desc_pool = NULL ;
2008-03-01 17:42:48 +03:00
}
2008-03-14 03:45:28 +03:00
static struct dma_async_tx_descriptor *
2010-01-06 16:34:05 +03:00
fsl_dma_prep_interrupt ( struct dma_chan * dchan , unsigned long flags )
2008-03-14 03:45:28 +03:00
{
2010-01-06 16:34:05 +03:00
struct fsldma_chan * chan ;
2008-03-14 03:45:28 +03:00
struct fsl_desc_sw * new ;
2010-01-06 16:34:05 +03:00
if ( ! dchan )
2008-03-14 03:45:28 +03:00
return NULL ;
2010-01-06 16:34:05 +03:00
chan = to_fsl_chan ( dchan ) ;
2008-03-14 03:45:28 +03:00
2010-01-06 16:34:05 +03:00
new = fsl_dma_alloc_descriptor ( chan ) ;
2008-03-14 03:45:28 +03:00
if ( ! new ) {
2010-01-06 16:34:05 +03:00
dev_err ( chan - > dev , " No free memory for link descriptor \n " ) ;
2008-03-14 03:45:28 +03:00
return NULL ;
}
new - > async_tx . cookie = - EBUSY ;
2008-04-18 07:17:26 +04:00
new - > async_tx . flags = flags ;
2008-03-14 03:45:28 +03:00
2008-03-19 04:45:00 +03:00
/* Insert the link descriptor to the LD ring */
2009-09-09 04:53:02 +04:00
list_add_tail ( & new - > node , & new - > tx_list ) ;
2008-03-19 04:45:00 +03:00
2008-03-14 03:45:28 +03:00
/* Set End-of-link to the last link descriptor of new list*/
2010-01-06 16:34:05 +03:00
set_ld_eol ( chan , new ) ;
2008-03-14 03:45:28 +03:00
return & new - > async_tx ;
}
2008-03-01 17:42:48 +03:00
static struct dma_async_tx_descriptor * fsl_dma_prep_memcpy (
2010-01-06 16:34:05 +03:00
struct dma_chan * dchan , dma_addr_t dma_dst , dma_addr_t dma_src ,
2008-03-01 17:42:48 +03:00
size_t len , unsigned long flags )
{
2010-01-06 16:34:05 +03:00
struct fsldma_chan * chan ;
2008-03-01 17:42:48 +03:00
struct fsl_desc_sw * first = NULL , * prev = NULL , * new ;
size_t copy ;
2010-01-06 16:34:05 +03:00
if ( ! dchan )
2008-03-01 17:42:48 +03:00
return NULL ;
if ( ! len )
return NULL ;
2010-01-06 16:34:05 +03:00
chan = to_fsl_chan ( dchan ) ;
2008-03-01 17:42:48 +03:00
do {
/* Allocate the link descriptor from DMA pool */
2010-01-06 16:34:05 +03:00
new = fsl_dma_alloc_descriptor ( chan ) ;
2008-03-01 17:42:48 +03:00
if ( ! new ) {
2010-01-06 16:34:05 +03:00
dev_err ( chan - > dev ,
2008-03-01 17:42:48 +03:00
" No free memory for link descriptor \n " ) ;
2009-05-15 20:59:46 +04:00
goto fail ;
2008-03-01 17:42:48 +03:00
}
# ifdef FSL_DMA_LD_DEBUG
2010-01-06 16:34:05 +03:00
dev_dbg ( chan - > dev , " new link desc alloc %p \n " , new ) ;
2008-03-01 17:42:48 +03:00
# endif
2008-03-13 20:45:27 +03:00
copy = min ( len , ( size_t ) FSL_DMA_BCR_MAX_CNT ) ;
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:05 +03:00
set_desc_cnt ( chan , & new - > hw , copy ) ;
set_desc_src ( chan , & new - > hw , dma_src ) ;
set_desc_dst ( chan , & new - > hw , dma_dst ) ;
2008-03-01 17:42:48 +03:00
if ( ! first )
first = new ;
else
2010-01-06 16:34:05 +03:00
set_desc_next ( chan , & prev - > hw , new - > async_tx . phys ) ;
2008-03-01 17:42:48 +03:00
new - > async_tx . cookie = 0 ;
2008-04-18 07:17:26 +04:00
async_tx_ack ( & new - > async_tx ) ;
2008-03-01 17:42:48 +03:00
prev = new ;
len - = copy ;
dma_src + = copy ;
2010-01-06 16:34:02 +03:00
dma_dst + = copy ;
2008-03-01 17:42:48 +03:00
/* Insert the link descriptor to the LD ring */
2009-09-09 04:53:02 +04:00
list_add_tail ( & new - > node , & first - > tx_list ) ;
2008-03-01 17:42:48 +03:00
} while ( len ) ;
2008-04-18 07:17:26 +04:00
new - > async_tx . flags = flags ; /* client is in control of this ack */
2008-03-01 17:42:48 +03:00
new - > async_tx . cookie = - EBUSY ;
/* Set End-of-link to the last link descriptor of new list*/
2010-01-06 16:34:05 +03:00
set_ld_eol ( chan , new ) ;
2008-03-01 17:42:48 +03:00
2009-05-15 20:59:46 +04:00
return & first - > async_tx ;
fail :
if ( ! first )
return NULL ;
2010-01-06 16:34:06 +03:00
fsldma_free_desc_list_reverse ( chan , & first - > tx_list ) ;
2009-05-15 20:59:46 +04:00
return NULL ;
2008-03-01 17:42:48 +03:00
}
2009-09-09 04:53:04 +04:00
/**
* fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @ chan : DMA channel
* @ sgl : scatterlist to transfer to / from
* @ sg_len : number of entries in @ scatterlist
* @ direction : DMA direction
* @ flags : DMAEngine flags
*
* Prepare a set of descriptors for a DMA_SLAVE transaction . Following the
* DMA_SLAVE API , this gets the device - specific information from the
* chan - > private variable .
*/
static struct dma_async_tx_descriptor * fsl_dma_prep_slave_sg (
2010-01-06 16:34:05 +03:00
struct dma_chan * dchan , struct scatterlist * sgl , unsigned int sg_len ,
2009-09-09 04:53:04 +04:00
enum dma_data_direction direction , unsigned long flags )
{
2010-01-06 16:34:05 +03:00
struct fsldma_chan * chan ;
2009-09-09 04:53:04 +04:00
struct fsl_desc_sw * first = NULL , * prev = NULL , * new = NULL ;
struct fsl_dma_slave * slave ;
size_t copy ;
int i ;
struct scatterlist * sg ;
size_t sg_used ;
size_t hw_used ;
struct fsl_dma_hw_addr * hw ;
dma_addr_t dma_dst , dma_src ;
2010-01-06 16:34:05 +03:00
if ( ! dchan )
2009-09-09 04:53:04 +04:00
return NULL ;
2010-01-06 16:34:05 +03:00
if ( ! dchan - > private )
2009-09-09 04:53:04 +04:00
return NULL ;
2010-01-06 16:34:05 +03:00
chan = to_fsl_chan ( dchan ) ;
slave = dchan - > private ;
2009-09-09 04:53:04 +04:00
if ( list_empty ( & slave - > addresses ) )
return NULL ;
hw = list_first_entry ( & slave - > addresses , struct fsl_dma_hw_addr , entry ) ;
hw_used = 0 ;
/*
* Build the hardware transaction to copy from the scatterlist to
* the hardware , or from the hardware to the scatterlist
*
* If you are copying from the hardware to the scatterlist and it
* takes two hardware entries to fill an entire page , then both
* hardware entries will be coalesced into the same page
*
* If you are copying from the scatterlist to the hardware and a
* single page can fill two hardware entries , then the data will
* be read out of the page into the first hardware entry , and so on
*/
for_each_sg ( sgl , sg , sg_len , i ) {
sg_used = 0 ;
/* Loop until the entire scatterlist entry is used */
while ( sg_used < sg_dma_len ( sg ) ) {
/*
* If we ' ve used up the current hardware address / length
* pair , we need to load a new one
*
* This is done in a while loop so that descriptors with
* length = = 0 will be skipped
*/
while ( hw_used > = hw - > length ) {
/*
* If the current hardware entry is the last
* entry in the list , we ' re finished
*/
if ( list_is_last ( & hw - > entry , & slave - > addresses ) )
goto finished ;
/* Get the next hardware address/length pair */
hw = list_entry ( hw - > entry . next ,
struct fsl_dma_hw_addr , entry ) ;
hw_used = 0 ;
}
/* Allocate the link descriptor from DMA pool */
2010-01-06 16:34:05 +03:00
new = fsl_dma_alloc_descriptor ( chan ) ;
2009-09-09 04:53:04 +04:00
if ( ! new ) {
2010-01-06 16:34:05 +03:00
dev_err ( chan - > dev , " No free memory for "
2009-09-09 04:53:04 +04:00
" link descriptor \n " ) ;
goto fail ;
}
# ifdef FSL_DMA_LD_DEBUG
2010-01-06 16:34:05 +03:00
dev_dbg ( chan - > dev , " new link desc alloc %p \n " , new ) ;
2009-09-09 04:53:04 +04:00
# endif
/*
* Calculate the maximum number of bytes to transfer ,
* making sure it is less than the DMA controller limit
*/
copy = min_t ( size_t , sg_dma_len ( sg ) - sg_used ,
hw - > length - hw_used ) ;
copy = min_t ( size_t , copy , FSL_DMA_BCR_MAX_CNT ) ;
/*
* DMA_FROM_DEVICE
* from the hardware to the scatterlist
*
* DMA_TO_DEVICE
* from the scatterlist to the hardware
*/
if ( direction = = DMA_FROM_DEVICE ) {
dma_src = hw - > address + hw_used ;
dma_dst = sg_dma_address ( sg ) + sg_used ;
} else {
dma_src = sg_dma_address ( sg ) + sg_used ;
dma_dst = hw - > address + hw_used ;
}
/* Fill in the descriptor */
2010-01-06 16:34:05 +03:00
set_desc_cnt ( chan , & new - > hw , copy ) ;
set_desc_src ( chan , & new - > hw , dma_src ) ;
set_desc_dst ( chan , & new - > hw , dma_dst ) ;
2009-09-09 04:53:04 +04:00
/*
* If this is not the first descriptor , chain the
* current descriptor after the previous descriptor
*/
if ( ! first ) {
first = new ;
} else {
2010-01-06 16:34:05 +03:00
set_desc_next ( chan , & prev - > hw ,
2009-09-09 04:53:04 +04:00
new - > async_tx . phys ) ;
}
new - > async_tx . cookie = 0 ;
async_tx_ack ( & new - > async_tx ) ;
prev = new ;
sg_used + = copy ;
hw_used + = copy ;
/* Insert the link descriptor into the LD ring */
list_add_tail ( & new - > node , & first - > tx_list ) ;
}
}
finished :
/* All of the hardware address/length pairs had length == 0 */
if ( ! first | | ! new )
return NULL ;
new - > async_tx . flags = flags ;
new - > async_tx . cookie = - EBUSY ;
/* Set End-of-link to the last link descriptor of new list */
2010-01-06 16:34:05 +03:00
set_ld_eol ( chan , new ) ;
2009-09-09 04:53:04 +04:00
/* Enable extra controller features */
2010-01-06 16:34:05 +03:00
if ( chan - > set_src_loop_size )
chan - > set_src_loop_size ( chan , slave - > src_loop_size ) ;
2009-09-09 04:53:04 +04:00
2010-01-06 16:34:05 +03:00
if ( chan - > set_dst_loop_size )
chan - > set_dst_loop_size ( chan , slave - > dst_loop_size ) ;
2009-09-09 04:53:04 +04:00
2010-01-06 16:34:05 +03:00
if ( chan - > toggle_ext_start )
chan - > toggle_ext_start ( chan , slave - > external_start ) ;
2009-09-09 04:53:04 +04:00
2010-01-06 16:34:05 +03:00
if ( chan - > toggle_ext_pause )
chan - > toggle_ext_pause ( chan , slave - > external_pause ) ;
2009-09-09 04:53:04 +04:00
2010-01-06 16:34:05 +03:00
if ( chan - > set_request_count )
chan - > set_request_count ( chan , slave - > request_count ) ;
2009-09-09 04:53:04 +04:00
return & first - > async_tx ;
fail :
/* If first was not set, then we failed to allocate the very first
* descriptor , and we ' re done */
if ( ! first )
return NULL ;
/*
* First is set , so all of the descriptors we allocated have been added
* to first - > tx_list , INCLUDING " first " itself . Therefore we
* must traverse the list backwards freeing each descriptor in turn
*
* We ' re re - using variables for the loop , oh well
*/
2010-01-06 16:34:06 +03:00
fsldma_free_desc_list_reverse ( chan , & first - > tx_list ) ;
2009-09-09 04:53:04 +04:00
return NULL ;
}
2010-03-27 02:44:01 +03:00
static int fsl_dma_device_control ( struct dma_chan * dchan ,
enum dma_ctrl_cmd cmd )
2009-09-09 04:53:04 +04:00
{
2010-01-06 16:34:05 +03:00
struct fsldma_chan * chan ;
2009-09-09 04:53:04 +04:00
unsigned long flags ;
2010-03-27 02:44:01 +03:00
/* Only supports DMA_TERMINATE_ALL */
if ( cmd ! = DMA_TERMINATE_ALL )
return - ENXIO ;
2010-01-06 16:34:05 +03:00
if ( ! dchan )
2010-03-27 02:44:01 +03:00
return - EINVAL ;
2009-09-09 04:53:04 +04:00
2010-01-06 16:34:05 +03:00
chan = to_fsl_chan ( dchan ) ;
2009-09-09 04:53:04 +04:00
/* Halt the DMA engine */
2010-01-06 16:34:05 +03:00
dma_halt ( chan ) ;
2009-09-09 04:53:04 +04:00
2010-01-06 16:34:05 +03:00
spin_lock_irqsave ( & chan - > desc_lock , flags ) ;
2009-09-09 04:53:04 +04:00
/* Remove and free all of the descriptors in the LD queue */
2010-01-06 16:34:06 +03:00
fsldma_free_desc_list ( chan , & chan - > ld_pending ) ;
fsldma_free_desc_list ( chan , & chan - > ld_running ) ;
2009-09-09 04:53:04 +04:00
2010-01-06 16:34:05 +03:00
spin_unlock_irqrestore ( & chan - > desc_lock , flags ) ;
2010-03-27 02:44:01 +03:00
return 0 ;
2009-09-09 04:53:04 +04:00
}
2008-03-01 17:42:48 +03:00
/**
* fsl_dma_update_completed_cookie - Update the completed cookie .
2010-01-06 16:34:05 +03:00
* @ chan : Freescale DMA channel
2010-01-06 16:34:06 +03:00
*
* CONTEXT : hardirq
2008-03-01 17:42:48 +03:00
*/
2010-01-06 16:34:05 +03:00
static void fsl_dma_update_completed_cookie ( struct fsldma_chan * chan )
2008-03-01 17:42:48 +03:00
{
2010-01-06 16:34:06 +03:00
struct fsl_desc_sw * desc ;
unsigned long flags ;
dma_cookie_t cookie ;
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:06 +03:00
spin_lock_irqsave ( & chan - > desc_lock , flags ) ;
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:06 +03:00
if ( list_empty ( & chan - > ld_running ) ) {
dev_dbg ( chan - > dev , " no running descriptors \n " ) ;
goto out_unlock ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:06 +03:00
/* Get the last descriptor, update the cookie to that */
desc = to_fsl_desc ( chan - > ld_running . prev ) ;
if ( dma_is_idle ( chan ) )
cookie = desc - > async_tx . cookie ;
2010-03-01 08:18:16 +03:00
else {
2010-01-06 16:34:06 +03:00
cookie = desc - > async_tx . cookie - 1 ;
2010-03-01 08:18:16 +03:00
if ( unlikely ( cookie < DMA_MIN_COOKIE ) )
cookie = DMA_MAX_COOKIE ;
}
2010-01-06 16:34:06 +03:00
chan - > completed_cookie = cookie ;
out_unlock :
spin_unlock_irqrestore ( & chan - > desc_lock , flags ) ;
}
/**
* fsldma_desc_status - Check the status of a descriptor
* @ chan : Freescale DMA channel
* @ desc : DMA SW descriptor
*
* This function will return the status of the given descriptor
*/
static enum dma_status fsldma_desc_status ( struct fsldma_chan * chan ,
struct fsl_desc_sw * desc )
{
return dma_async_is_complete ( desc - > async_tx . cookie ,
chan - > completed_cookie ,
chan - > common . cookie ) ;
2008-03-01 17:42:48 +03:00
}
/**
* fsl_chan_ld_cleanup - Clean up link descriptors
2010-01-06 16:34:05 +03:00
* @ chan : Freescale DMA channel
2008-03-01 17:42:48 +03:00
*
* This function clean up the ld_queue of DMA channel .
*/
2010-01-06 16:34:05 +03:00
static void fsl_chan_ld_cleanup ( struct fsldma_chan * chan )
2008-03-01 17:42:48 +03:00
{
struct fsl_desc_sw * desc , * _desc ;
unsigned long flags ;
2010-01-06 16:34:05 +03:00
spin_lock_irqsave ( & chan - > desc_lock , flags ) ;
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:06 +03:00
dev_dbg ( chan - > dev , " chan completed_cookie = %d \n " , chan - > completed_cookie ) ;
list_for_each_entry_safe ( desc , _desc , & chan - > ld_running , node ) {
2008-03-01 17:42:48 +03:00
dma_async_tx_callback callback ;
void * callback_param ;
2010-01-06 16:34:06 +03:00
if ( fsldma_desc_status ( chan , desc ) = = DMA_IN_PROGRESS )
2008-03-01 17:42:48 +03:00
break ;
2010-01-06 16:34:06 +03:00
/* Remove from the list of running transactions */
2008-03-01 17:42:48 +03:00
list_del ( & desc - > node ) ;
/* Run the link descriptor callback function */
2010-01-06 16:34:06 +03:00
callback = desc - > async_tx . callback ;
callback_param = desc - > async_tx . callback_param ;
2008-03-01 17:42:48 +03:00
if ( callback ) {
2010-01-06 16:34:05 +03:00
spin_unlock_irqrestore ( & chan - > desc_lock , flags ) ;
2010-01-06 16:34:06 +03:00
dev_dbg ( chan - > dev , " LD %p callback \n " , desc ) ;
2008-03-01 17:42:48 +03:00
callback ( callback_param ) ;
2010-01-06 16:34:05 +03:00
spin_lock_irqsave ( & chan - > desc_lock , flags ) ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:06 +03:00
/* Run any dependencies, then free the descriptor */
dma_run_dependencies ( & desc - > async_tx ) ;
dma_pool_free ( chan - > desc_pool , desc , desc - > async_tx . phys ) ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:06 +03:00
2010-01-06 16:34:05 +03:00
spin_unlock_irqrestore ( & chan - > desc_lock , flags ) ;
2008-03-01 17:42:48 +03:00
}
/**
2010-01-06 16:34:06 +03:00
* fsl_chan_xfer_ld_queue - transfer any pending transactions
2010-01-06 16:34:05 +03:00
* @ chan : Freescale DMA channel
2010-01-06 16:34:06 +03:00
*
* This will make sure that any pending transactions will be run .
* If the DMA controller is idle , it will be started . Otherwise ,
* the DMA controller ' s interrupt handler will start any pending
* transactions when it becomes idle .
2008-03-01 17:42:48 +03:00
*/
2010-01-06 16:34:05 +03:00
static void fsl_chan_xfer_ld_queue ( struct fsldma_chan * chan )
2008-03-01 17:42:48 +03:00
{
2010-01-06 16:34:06 +03:00
struct fsl_desc_sw * desc ;
2008-03-01 17:42:48 +03:00
unsigned long flags ;
2010-01-06 16:34:05 +03:00
spin_lock_irqsave ( & chan - > desc_lock , flags ) ;
2009-05-20 02:42:13 +04:00
2010-01-06 16:34:06 +03:00
/*
* If the list of pending descriptors is empty , then we
* don ' t need to do any work at all
*/
if ( list_empty ( & chan - > ld_pending ) ) {
dev_dbg ( chan - > dev , " no pending LDs \n " ) ;
2009-05-20 02:42:13 +04:00
goto out_unlock ;
2010-01-06 16:34:06 +03:00
}
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:06 +03:00
/*
* The DMA controller is not idle , which means the interrupt
* handler will start any queued transactions when it runs
* at the end of the current transaction
*/
if ( ! dma_is_idle ( chan ) ) {
dev_dbg ( chan - > dev , " DMA controller still busy \n " ) ;
goto out_unlock ;
}
/*
* TODO :
* make sure the dma_halt ( ) function really un - wedges the
* controller as much as possible
*/
2010-01-06 16:34:05 +03:00
dma_halt ( chan ) ;
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:06 +03:00
/*
* If there are some link descriptors which have not been
* transferred , we need to start the controller
2008-03-01 17:42:48 +03:00
*/
2010-01-06 16:34:06 +03:00
/*
* Move all elements from the queue of pending transactions
* onto the list of running transactions
*/
desc = list_first_entry ( & chan - > ld_pending , struct fsl_desc_sw , node ) ;
list_splice_tail_init ( & chan - > ld_pending , & chan - > ld_running ) ;
/*
* Program the descriptor ' s address into the DMA controller ,
* then start the DMA transaction
*/
set_cdar ( chan , desc - > async_tx . phys ) ;
dma_start ( chan ) ;
2009-05-20 02:42:13 +04:00
out_unlock :
2010-01-06 16:34:05 +03:00
spin_unlock_irqrestore ( & chan - > desc_lock , flags ) ;
2008-03-01 17:42:48 +03:00
}
/**
* fsl_dma_memcpy_issue_pending - Issue the DMA start command
2010-01-06 16:34:05 +03:00
* @ chan : Freescale DMA channel
2008-03-01 17:42:48 +03:00
*/
2010-01-06 16:34:05 +03:00
static void fsl_dma_memcpy_issue_pending ( struct dma_chan * dchan )
2008-03-01 17:42:48 +03:00
{
2010-01-06 16:34:05 +03:00
struct fsldma_chan * chan = to_fsl_chan ( dchan ) ;
fsl_chan_xfer_ld_queue ( chan ) ;
2008-03-01 17:42:48 +03:00
}
/**
* fsl_dma_is_complete - Determine the DMA status
2010-01-06 16:34:05 +03:00
* @ chan : Freescale DMA channel
2008-03-01 17:42:48 +03:00
*/
2010-01-06 16:34:05 +03:00
static enum dma_status fsl_dma_is_complete ( struct dma_chan * dchan ,
2008-03-01 17:42:48 +03:00
dma_cookie_t cookie ,
dma_cookie_t * done ,
dma_cookie_t * used )
{
2010-01-06 16:34:05 +03:00
struct fsldma_chan * chan = to_fsl_chan ( dchan ) ;
2008-03-01 17:42:48 +03:00
dma_cookie_t last_used ;
dma_cookie_t last_complete ;
2010-01-06 16:34:05 +03:00
fsl_chan_ld_cleanup ( chan ) ;
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:05 +03:00
last_used = dchan - > cookie ;
last_complete = chan - > completed_cookie ;
2008-03-01 17:42:48 +03:00
if ( done )
* done = last_complete ;
if ( used )
* used = last_used ;
return dma_async_is_complete ( cookie , last_complete , last_used ) ;
}
2010-01-06 16:34:04 +03:00
/*----------------------------------------------------------------------------*/
/* Interrupt Handling */
/*----------------------------------------------------------------------------*/
2010-01-06 16:34:03 +03:00
static irqreturn_t fsldma_chan_irq ( int irq , void * data )
2008-03-01 17:42:48 +03:00
{
2010-01-06 16:34:05 +03:00
struct fsldma_chan * chan = data ;
2008-04-18 07:17:25 +04:00
int update_cookie = 0 ;
int xfer_ld_q = 0 ;
2010-01-06 16:34:05 +03:00
u32 stat ;
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:06 +03:00
/* save and clear the status register */
2010-01-06 16:34:05 +03:00
stat = get_sr ( chan ) ;
2010-01-06 16:34:06 +03:00
set_sr ( chan , stat ) ;
dev_dbg ( chan - > dev , " irq: channel %d, stat = 0x%x \n " , chan - > id , stat ) ;
2008-03-01 17:42:48 +03:00
stat & = ~ ( FSL_DMA_SR_CB | FSL_DMA_SR_CH ) ;
if ( ! stat )
return IRQ_NONE ;
if ( stat & FSL_DMA_SR_TE )
2010-01-06 16:34:05 +03:00
dev_err ( chan - > dev , " Transfer Error! \n " ) ;
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:06 +03:00
/*
* Programming Error
2008-03-19 04:45:00 +03:00
* The DMA_INTERRUPT async_tx is a NULL transfer , which will
* triger a PE interrupt .
*/
if ( stat & FSL_DMA_SR_PE ) {
2010-01-06 16:34:06 +03:00
dev_dbg ( chan - > dev , " irq: Programming Error INT \n " ) ;
2010-01-06 16:34:05 +03:00
if ( get_bcr ( chan ) = = 0 ) {
2008-03-19 04:45:00 +03:00
/* BCR register is 0, this is a DMA_INTERRUPT async_tx.
* Now , update the completed cookie , and continue the
* next uncompleted transfer .
*/
2008-04-18 07:17:25 +04:00
update_cookie = 1 ;
xfer_ld_q = 1 ;
2008-03-19 04:45:00 +03:00
}
stat & = ~ FSL_DMA_SR_PE ;
}
2010-01-06 16:34:06 +03:00
/*
* If the link descriptor segment transfer finishes ,
2008-03-01 17:42:48 +03:00
* we will recycle the used descriptor .
*/
if ( stat & FSL_DMA_SR_EOSI ) {
2010-01-06 16:34:06 +03:00
dev_dbg ( chan - > dev , " irq: End-of-segments INT \n " ) ;
dev_dbg ( chan - > dev , " irq: clndar 0x%llx, nlndar 0x%llx \n " ,
2010-01-06 16:34:05 +03:00
( unsigned long long ) get_cdar ( chan ) ,
( unsigned long long ) get_ndar ( chan ) ) ;
2008-03-01 17:42:48 +03:00
stat & = ~ FSL_DMA_SR_EOSI ;
2008-04-18 07:17:25 +04:00
update_cookie = 1 ;
}
2010-01-06 16:34:06 +03:00
/*
* For MPC8349 , EOCDI event need to update cookie
2008-04-18 07:17:25 +04:00
* and start the next transfer if it exist .
*/
if ( stat & FSL_DMA_SR_EOCDI ) {
2010-01-06 16:34:06 +03:00
dev_dbg ( chan - > dev , " irq: End-of-Chain link INT \n " ) ;
2008-04-18 07:17:25 +04:00
stat & = ~ FSL_DMA_SR_EOCDI ;
update_cookie = 1 ;
xfer_ld_q = 1 ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:06 +03:00
/*
* If it current transfer is the end - of - transfer ,
2008-03-01 17:42:48 +03:00
* we should clear the Channel Start bit for
* prepare next transfer .
*/
2008-04-18 07:17:25 +04:00
if ( stat & FSL_DMA_SR_EOLNI ) {
2010-01-06 16:34:06 +03:00
dev_dbg ( chan - > dev , " irq: End-of-link INT \n " ) ;
2008-03-01 17:42:48 +03:00
stat & = ~ FSL_DMA_SR_EOLNI ;
2008-04-18 07:17:25 +04:00
xfer_ld_q = 1 ;
2008-03-01 17:42:48 +03:00
}
2008-04-18 07:17:25 +04:00
if ( update_cookie )
2010-01-06 16:34:05 +03:00
fsl_dma_update_completed_cookie ( chan ) ;
2008-04-18 07:17:25 +04:00
if ( xfer_ld_q )
2010-01-06 16:34:05 +03:00
fsl_chan_xfer_ld_queue ( chan ) ;
2008-03-01 17:42:48 +03:00
if ( stat )
2010-01-06 16:34:06 +03:00
dev_dbg ( chan - > dev , " irq: unhandled sr 0x%02x \n " , stat ) ;
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:06 +03:00
dev_dbg ( chan - > dev , " irq: Exit \n " ) ;
2010-01-06 16:34:05 +03:00
tasklet_schedule ( & chan - > tasklet ) ;
2008-03-01 17:42:48 +03:00
return IRQ_HANDLED ;
}
2010-01-06 16:34:04 +03:00
static void dma_do_tasklet ( unsigned long data )
{
2010-01-06 16:34:05 +03:00
struct fsldma_chan * chan = ( struct fsldma_chan * ) data ;
fsl_chan_ld_cleanup ( chan ) ;
2010-01-06 16:34:04 +03:00
}
static irqreturn_t fsldma_ctrl_irq ( int irq , void * data )
2008-03-01 17:42:48 +03:00
{
2010-01-06 16:34:01 +03:00
struct fsldma_device * fdev = data ;
2010-01-06 16:34:04 +03:00
struct fsldma_chan * chan ;
unsigned int handled = 0 ;
u32 gsr , mask ;
int i ;
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:03 +03:00
gsr = ( fdev - > feature & FSL_DMA_BIG_ENDIAN ) ? in_be32 ( fdev - > regs )
2010-01-06 16:34:04 +03:00
: in_le32 ( fdev - > regs ) ;
mask = 0xff000000 ;
dev_dbg ( fdev - > dev , " IRQ: gsr 0x%.8x \n " , gsr ) ;
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:04 +03:00
for ( i = 0 ; i < FSL_DMA_MAX_CHANS_PER_DEVICE ; i + + ) {
chan = fdev - > chan [ i ] ;
if ( ! chan )
continue ;
if ( gsr & mask ) {
dev_dbg ( fdev - > dev , " IRQ: chan %d \n " , chan - > id ) ;
fsldma_chan_irq ( irq , chan ) ;
handled + + ;
}
gsr & = ~ mask ;
mask > > = 8 ;
}
return IRQ_RETVAL ( handled ) ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:04 +03:00
static void fsldma_free_irqs ( struct fsldma_device * fdev )
2008-03-01 17:42:48 +03:00
{
2010-01-06 16:34:04 +03:00
struct fsldma_chan * chan ;
int i ;
if ( fdev - > irq ! = NO_IRQ ) {
dev_dbg ( fdev - > dev , " free per-controller IRQ \n " ) ;
free_irq ( fdev - > irq , fdev ) ;
return ;
}
for ( i = 0 ; i < FSL_DMA_MAX_CHANS_PER_DEVICE ; i + + ) {
chan = fdev - > chan [ i ] ;
if ( chan & & chan - > irq ! = NO_IRQ ) {
dev_dbg ( fdev - > dev , " free channel %d IRQ \n " , chan - > id ) ;
free_irq ( chan - > irq , chan ) ;
}
}
}
static int fsldma_request_irqs ( struct fsldma_device * fdev )
{
struct fsldma_chan * chan ;
int ret ;
int i ;
/* if we have a per-controller IRQ, use that */
if ( fdev - > irq ! = NO_IRQ ) {
dev_dbg ( fdev - > dev , " request per-controller IRQ \n " ) ;
ret = request_irq ( fdev - > irq , fsldma_ctrl_irq , IRQF_SHARED ,
" fsldma-controller " , fdev ) ;
return ret ;
}
/* no per-controller IRQ, use the per-channel IRQs */
for ( i = 0 ; i < FSL_DMA_MAX_CHANS_PER_DEVICE ; i + + ) {
chan = fdev - > chan [ i ] ;
if ( ! chan )
continue ;
if ( chan - > irq = = NO_IRQ ) {
dev_err ( fdev - > dev , " no interrupts property defined for "
" DMA channel %d. Please fix your "
" device tree \n " , chan - > id ) ;
ret = - ENODEV ;
goto out_unwind ;
}
dev_dbg ( fdev - > dev , " request channel %d IRQ \n " , chan - > id ) ;
ret = request_irq ( chan - > irq , fsldma_chan_irq , IRQF_SHARED ,
" fsldma-chan " , chan ) ;
if ( ret ) {
dev_err ( fdev - > dev , " unable to request IRQ for DMA "
" channel %d \n " , chan - > id ) ;
goto out_unwind ;
}
}
return 0 ;
out_unwind :
for ( /* none */ ; i > = 0 ; i - - ) {
chan = fdev - > chan [ i ] ;
if ( ! chan )
continue ;
if ( chan - > irq = = NO_IRQ )
continue ;
free_irq ( chan - > irq , chan ) ;
}
return ret ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:01 +03:00
/*----------------------------------------------------------------------------*/
/* OpenFirmware Subsystem */
/*----------------------------------------------------------------------------*/
static int __devinit fsl_dma_chan_probe ( struct fsldma_device * fdev ,
2008-09-27 04:00:11 +04:00
struct device_node * node , u32 feature , const char * compatible )
2008-03-01 17:42:48 +03:00
{
2010-01-06 16:34:05 +03:00
struct fsldma_chan * chan ;
2010-01-06 16:34:00 +03:00
struct resource res ;
2008-03-01 17:42:48 +03:00
int err ;
/* alloc channel */
2010-01-06 16:34:05 +03:00
chan = kzalloc ( sizeof ( * chan ) , GFP_KERNEL ) ;
if ( ! chan ) {
2010-01-06 16:34:03 +03:00
dev_err ( fdev - > dev , " no free memory for DMA channels! \n " ) ;
err = - ENOMEM ;
goto out_return ;
}
/* ioremap registers for use */
2010-01-06 16:34:05 +03:00
chan - > regs = of_iomap ( node , 0 ) ;
if ( ! chan - > regs ) {
2010-01-06 16:34:03 +03:00
dev_err ( fdev - > dev , " unable to ioremap registers \n " ) ;
err = - ENOMEM ;
2010-01-06 16:34:05 +03:00
goto out_free_chan ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:00 +03:00
err = of_address_to_resource ( node , 0 , & res ) ;
2008-03-01 17:42:48 +03:00
if ( err ) {
2010-01-06 16:34:03 +03:00
dev_err ( fdev - > dev , " unable to find 'reg' property \n " ) ;
goto out_iounmap_regs ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:05 +03:00
chan - > feature = feature ;
2008-03-01 17:42:48 +03:00
if ( ! fdev - > feature )
2010-01-06 16:34:05 +03:00
fdev - > feature = chan - > feature ;
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:03 +03:00
/*
* If the DMA device ' s feature is different than the feature
* of its channels , report the bug
2008-03-01 17:42:48 +03:00
*/
2010-01-06 16:34:05 +03:00
WARN_ON ( fdev - > feature ! = chan - > feature ) ;
2010-01-06 16:34:03 +03:00
2010-01-06 16:34:05 +03:00
chan - > dev = fdev - > dev ;
chan - > id = ( ( res . start - 0x100 ) & 0xfff ) > > 7 ;
if ( chan - > id > = FSL_DMA_MAX_CHANS_PER_DEVICE ) {
2010-01-06 16:34:03 +03:00
dev_err ( fdev - > dev , " too many channels for device \n " ) ;
2008-03-01 17:42:48 +03:00
err = - EINVAL ;
2010-01-06 16:34:03 +03:00
goto out_iounmap_regs ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:05 +03:00
fdev - > chan [ chan - > id ] = chan ;
tasklet_init ( & chan - > tasklet , dma_do_tasklet , ( unsigned long ) chan ) ;
2010-01-06 16:34:03 +03:00
/* Initialize the channel */
2010-01-06 16:34:05 +03:00
dma_init ( chan ) ;
2008-03-01 17:42:48 +03:00
/* Clear cdar registers */
2010-01-06 16:34:05 +03:00
set_cdar ( chan , 0 ) ;
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:05 +03:00
switch ( chan - > feature & FSL_DMA_IP_MASK ) {
2008-03-01 17:42:48 +03:00
case FSL_DMA_IP_85XX :
2010-01-06 16:34:05 +03:00
chan - > toggle_ext_pause = fsl_chan_toggle_ext_pause ;
2008-03-01 17:42:48 +03:00
case FSL_DMA_IP_83XX :
2010-01-06 16:34:05 +03:00
chan - > toggle_ext_start = fsl_chan_toggle_ext_start ;
chan - > set_src_loop_size = fsl_chan_set_src_loop_size ;
chan - > set_dst_loop_size = fsl_chan_set_dst_loop_size ;
chan - > set_request_count = fsl_chan_set_request_count ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:05 +03:00
spin_lock_init ( & chan - > desc_lock ) ;
2010-01-06 16:34:06 +03:00
INIT_LIST_HEAD ( & chan - > ld_pending ) ;
INIT_LIST_HEAD ( & chan - > ld_running ) ;
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:05 +03:00
chan - > common . device = & fdev - > common ;
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:04 +03:00
/* find the IRQ line, if it exists in the device tree */
2010-01-06 16:34:05 +03:00
chan - > irq = irq_of_parse_and_map ( node , 0 ) ;
2010-01-06 16:34:04 +03:00
2008-03-01 17:42:48 +03:00
/* Add the channel to DMA device channel list */
2010-01-06 16:34:05 +03:00
list_add_tail ( & chan - > common . device_node , & fdev - > common . channels ) ;
2008-03-01 17:42:48 +03:00
fdev - > common . chancnt + + ;
2010-01-06 16:34:05 +03:00
dev_info ( fdev - > dev , " #%d (%s), irq %d \n " , chan - > id , compatible ,
chan - > irq ! = NO_IRQ ? chan - > irq : fdev - > irq ) ;
2008-03-01 17:42:48 +03:00
return 0 ;
2008-05-30 10:25:45 +04:00
2010-01-06 16:34:03 +03:00
out_iounmap_regs :
2010-01-06 16:34:05 +03:00
iounmap ( chan - > regs ) ;
out_free_chan :
kfree ( chan ) ;
2010-01-06 16:34:03 +03:00
out_return :
2008-03-01 17:42:48 +03:00
return err ;
}
2010-01-06 16:34:05 +03:00
static void fsl_dma_chan_remove ( struct fsldma_chan * chan )
2008-03-01 17:42:48 +03:00
{
2010-01-06 16:34:05 +03:00
irq_dispose_mapping ( chan - > irq ) ;
list_del ( & chan - > common . device_node ) ;
iounmap ( chan - > regs ) ;
kfree ( chan ) ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:03 +03:00
static int __devinit fsldma_of_probe ( struct of_device * op ,
2008-03-01 17:42:48 +03:00
const struct of_device_id * match )
{
2010-01-06 16:34:01 +03:00
struct fsldma_device * fdev ;
2008-09-27 04:00:11 +04:00
struct device_node * child ;
2010-01-06 16:34:03 +03:00
int err ;
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:01 +03:00
fdev = kzalloc ( sizeof ( * fdev ) , GFP_KERNEL ) ;
2008-03-01 17:42:48 +03:00
if ( ! fdev ) {
2010-01-06 16:34:03 +03:00
dev_err ( & op - > dev , " No enough memory for 'priv' \n " ) ;
err = - ENOMEM ;
goto out_return ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:03 +03:00
fdev - > dev = & op - > dev ;
2008-03-01 17:42:48 +03:00
INIT_LIST_HEAD ( & fdev - > common . channels ) ;
2010-01-06 16:34:03 +03:00
/* ioremap the registers for use */
fdev - > regs = of_iomap ( op - > node , 0 ) ;
if ( ! fdev - > regs ) {
dev_err ( & op - > dev , " unable to ioremap registers \n " ) ;
err = - ENOMEM ;
goto out_free_fdev ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:04 +03:00
/* map the channel IRQ if it exists, but don't hookup the handler yet */
fdev - > irq = irq_of_parse_and_map ( op - > node , 0 ) ;
2008-03-01 17:42:48 +03:00
dma_cap_set ( DMA_MEMCPY , fdev - > common . cap_mask ) ;
dma_cap_set ( DMA_INTERRUPT , fdev - > common . cap_mask ) ;
2009-09-09 04:53:04 +04:00
dma_cap_set ( DMA_SLAVE , fdev - > common . cap_mask ) ;
2008-03-01 17:42:48 +03:00
fdev - > common . device_alloc_chan_resources = fsl_dma_alloc_chan_resources ;
fdev - > common . device_free_chan_resources = fsl_dma_free_chan_resources ;
2008-03-14 03:45:28 +03:00
fdev - > common . device_prep_dma_interrupt = fsl_dma_prep_interrupt ;
2008-03-01 17:42:48 +03:00
fdev - > common . device_prep_dma_memcpy = fsl_dma_prep_memcpy ;
fdev - > common . device_is_tx_complete = fsl_dma_is_complete ;
fdev - > common . device_issue_pending = fsl_dma_memcpy_issue_pending ;
2009-09-09 04:53:04 +04:00
fdev - > common . device_prep_slave_sg = fsl_dma_prep_slave_sg ;
2010-03-27 02:44:01 +03:00
fdev - > common . device_control = fsl_dma_device_control ;
2010-01-06 16:34:03 +03:00
fdev - > common . dev = & op - > dev ;
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:03 +03:00
dev_set_drvdata ( & op - > dev , fdev ) ;
2008-09-27 04:00:11 +04:00
2010-01-06 16:34:03 +03:00
/*
* We cannot use of_platform_bus_probe ( ) because there is no
* of_platform_bus_remove ( ) . Instead , we manually instantiate every DMA
2008-09-27 04:00:11 +04:00
* channel object .
*/
2010-01-06 16:34:03 +03:00
for_each_child_of_node ( op - > node , child ) {
if ( of_device_is_compatible ( child , " fsl,eloplus-dma-channel " ) ) {
2008-09-27 04:00:11 +04:00
fsl_dma_chan_probe ( fdev , child ,
FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN ,
" fsl,eloplus-dma-channel " ) ;
2010-01-06 16:34:03 +03:00
}
if ( of_device_is_compatible ( child , " fsl,elo-dma-channel " ) ) {
2008-09-27 04:00:11 +04:00
fsl_dma_chan_probe ( fdev , child ,
FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN ,
" fsl,elo-dma-channel " ) ;
2010-01-06 16:34:03 +03:00
}
2008-09-27 04:00:11 +04:00
}
2008-03-01 17:42:48 +03:00
2010-01-06 16:34:04 +03:00
/*
* Hookup the IRQ handler ( s )
*
* If we have a per - controller interrupt , we prefer that to the
* per - channel interrupts to reduce the number of shared interrupt
* handlers on the same IRQ line
*/
err = fsldma_request_irqs ( fdev ) ;
if ( err ) {
dev_err ( fdev - > dev , " unable to request IRQs \n " ) ;
goto out_free_fdev ;
}
2008-03-01 17:42:48 +03:00
dma_async_device_register ( & fdev - > common ) ;
return 0 ;
2010-01-06 16:34:03 +03:00
out_free_fdev :
2010-01-06 16:34:04 +03:00
irq_dispose_mapping ( fdev - > irq ) ;
2008-03-01 17:42:48 +03:00
kfree ( fdev ) ;
2010-01-06 16:34:03 +03:00
out_return :
2008-03-01 17:42:48 +03:00
return err ;
}
2010-01-06 16:34:03 +03:00
static int fsldma_of_remove ( struct of_device * op )
2008-09-27 04:00:11 +04:00
{
2010-01-06 16:34:01 +03:00
struct fsldma_device * fdev ;
2008-09-27 04:00:11 +04:00
unsigned int i ;
2010-01-06 16:34:03 +03:00
fdev = dev_get_drvdata ( & op - > dev ) ;
2008-09-27 04:00:11 +04:00
dma_async_device_unregister ( & fdev - > common ) ;
2010-01-06 16:34:04 +03:00
fsldma_free_irqs ( fdev ) ;
2010-01-06 16:34:03 +03:00
for ( i = 0 ; i < FSL_DMA_MAX_CHANS_PER_DEVICE ; i + + ) {
2008-09-27 04:00:11 +04:00
if ( fdev - > chan [ i ] )
fsl_dma_chan_remove ( fdev - > chan [ i ] ) ;
2010-01-06 16:34:03 +03:00
}
2008-09-27 04:00:11 +04:00
2010-01-06 16:34:03 +03:00
iounmap ( fdev - > regs ) ;
dev_set_drvdata ( & op - > dev , NULL ) ;
2008-09-27 04:00:11 +04:00
kfree ( fdev ) ;
return 0 ;
}
2010-02-03 09:41:06 +03:00
static const struct of_device_id fsldma_of_ids [ ] = {
2008-03-31 20:13:21 +04:00
{ . compatible = " fsl,eloplus-dma " , } ,
{ . compatible = " fsl,elo-dma " , } ,
2008-03-01 17:42:48 +03:00
{ }
} ;
2010-01-06 16:34:01 +03:00
static struct of_platform_driver fsldma_of_driver = {
. name = " fsl-elo-dma " ,
. match_table = fsldma_of_ids ,
. probe = fsldma_of_probe ,
. remove = fsldma_of_remove ,
2008-03-01 17:42:48 +03:00
} ;
2010-01-06 16:34:01 +03:00
/*----------------------------------------------------------------------------*/
/* Module Init / Exit */
/*----------------------------------------------------------------------------*/
static __init int fsldma_init ( void )
2008-03-01 17:42:48 +03:00
{
2008-09-27 04:00:11 +04:00
int ret ;
pr_info ( " Freescale Elo / Elo Plus DMA driver \n " ) ;
2010-01-06 16:34:01 +03:00
ret = of_register_platform_driver ( & fsldma_of_driver ) ;
2008-09-27 04:00:11 +04:00
if ( ret )
pr_err ( " fsldma: failed to register platform driver \n " ) ;
return ret ;
}
2010-01-06 16:34:01 +03:00
static void __exit fsldma_exit ( void )
2008-09-27 04:00:11 +04:00
{
2010-01-06 16:34:01 +03:00
of_unregister_platform_driver ( & fsldma_of_driver ) ;
2008-03-01 17:42:48 +03:00
}
2010-01-06 16:34:01 +03:00
subsys_initcall ( fsldma_init ) ;
module_exit ( fsldma_exit ) ;
2008-09-27 04:00:11 +04:00
MODULE_DESCRIPTION ( " Freescale Elo / Elo Plus DMA driver " ) ;
MODULE_LICENSE ( " GPL " ) ;