2008-07-08 22:59:42 +04:00
/*
2012-10-18 18:34:08 +04:00
* Core driver for the Synopsys DesignWare DMA Controller
2008-07-08 22:59:42 +04:00
*
* Copyright ( C ) 2007 - 2008 Atmel Corporation
2011-05-24 12:34:09 +04:00
* Copyright ( C ) 2010 - 2011 ST Microelectronics
2008-07-08 22:59:42 +04:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
2012-10-18 18:34:08 +04:00
2012-02-01 14:42:26 +04:00
# include <linux/bitops.h>
2008-07-08 22:59:42 +04:00
# include <linux/clk.h>
# include <linux/delay.h>
# include <linux/dmaengine.h>
# include <linux/dma-mapping.h>
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/io.h>
2012-04-20 18:45:34 +04:00
# include <linux/of.h>
2008-07-08 22:59:42 +04:00
# include <linux/mm.h>
# include <linux/module.h>
# include <linux/platform_device.h>
# include <linux/slab.h>
# include "dw_dmac_regs.h"
2012-03-07 02:34:26 +04:00
# include "dmaengine.h"
2008-07-08 22:59:42 +04:00
/*
* This supports the Synopsys " DesignWare AHB Central DMA Controller " ,
* ( DW_ahb_dmac ) which is used with various AMBA 2.0 systems ( not all
* of which use ARM any more ) . See the " Databook " from Synopsys for
* information beyond what licensees probably provide .
*
* The driver has currently been tested only with the Atmel AT32AP7000 ,
* which does not support descriptor writeback .
*/
2012-09-21 16:05:48 +04:00
static inline unsigned int dwc_get_dms ( struct dw_dma_slave * slave )
{
return slave ? slave - > dst_master : 0 ;
}
static inline unsigned int dwc_get_sms ( struct dw_dma_slave * slave )
{
return slave ? slave - > src_master : 1 ;
}
2012-02-01 14:42:26 +04:00
# define DWC_DEFAULT_CTLLO(_chan) ({ \
struct dw_dma_slave * __slave = ( _chan - > private ) ; \
struct dw_dma_chan * _dwc = to_dw_dma_chan ( _chan ) ; \
struct dma_slave_config * _sconfig = & _dwc - > dma_sconfig ; \
2012-09-21 16:05:48 +04:00
int _dms = dwc_get_dms ( __slave ) ; \
int _sms = dwc_get_sms ( __slave ) ; \
2012-02-01 14:42:26 +04:00
u8 _smsize = __slave ? _sconfig - > src_maxburst : \
DW_DMA_MSIZE_16 ; \
u8 _dmsize = __slave ? _sconfig - > dst_maxburst : \
DW_DMA_MSIZE_16 ; \
2011-01-21 17:11:53 +03:00
\
2012-02-01 14:42:26 +04:00
( DWC_CTLL_DST_MSIZE ( _dmsize ) \
| DWC_CTLL_SRC_MSIZE ( _smsize ) \
2011-01-21 17:11:53 +03:00
| DWC_CTLL_LLP_D_EN \
| DWC_CTLL_LLP_S_EN \
2012-02-01 14:42:26 +04:00
| DWC_CTLL_DMS ( _dms ) \
| DWC_CTLL_SMS ( _sms ) ) ; \
2011-01-21 17:11:53 +03:00
} )
2008-07-08 22:59:42 +04:00
/*
* Number of descriptors to allocate for each channel . This should be
* made configurable somehow ; preferably , the clients ( at least the
* ones using slave transfers ) should be able to give us a hint .
*/
# define NR_DESCS_PER_CHANNEL 64
/*----------------------------------------------------------------------*/
/*
* Because we ' re not relying on writeback from the controller ( it may not
* even be configured into the core ! ) we don ' t need to use dma_pool . These
* descriptors - - and associated data - - are cacheable . We do need to make
* sure their dcache entries are written back before handing them off to
* the controller , though .
*/
2009-01-06 21:38:21 +03:00
static struct device * chan2dev ( struct dma_chan * chan )
{
return & chan - > dev - > device ;
}
static struct device * chan2parent ( struct dma_chan * chan )
{
return chan - > dev - > device . parent ;
}
2008-07-08 22:59:42 +04:00
static struct dw_desc * dwc_first_active ( struct dw_dma_chan * dwc )
{
return list_entry ( dwc - > active_list . next , struct dw_desc , desc_node ) ;
}
static struct dw_desc * dwc_desc_get ( struct dw_dma_chan * dwc )
{
struct dw_desc * desc , * _desc ;
struct dw_desc * ret = NULL ;
unsigned int i = 0 ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2008-07-08 22:59:42 +04:00
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
list_for_each_entry_safe ( desc , _desc , & dwc - > free_list , desc_node ) {
2012-06-19 14:34:04 +04:00
i + + ;
2008-07-08 22:59:42 +04:00
if ( async_tx_test_ack ( & desc - > txd ) ) {
list_del ( & desc - > desc_node ) ;
ret = desc ;
break ;
}
2009-01-06 21:38:21 +03:00
dev_dbg ( chan2dev ( & dwc - > chan ) , " desc %p not ACKed \n " , desc ) ;
2008-07-08 22:59:42 +04:00
}
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
2009-01-06 21:38:21 +03:00
dev_vdbg ( chan2dev ( & dwc - > chan ) , " scanned %u descriptors on freelist \n " , i ) ;
2008-07-08 22:59:42 +04:00
return ret ;
}
static void dwc_sync_desc_for_cpu ( struct dw_dma_chan * dwc , struct dw_desc * desc )
{
struct dw_desc * child ;
2009-09-09 04:53:02 +04:00
list_for_each_entry ( child , & desc - > tx_list , desc_node )
2009-01-06 21:38:21 +03:00
dma_sync_single_for_cpu ( chan2parent ( & dwc - > chan ) ,
2008-07-08 22:59:42 +04:00
child - > txd . phys , sizeof ( child - > lli ) ,
DMA_TO_DEVICE ) ;
2009-01-06 21:38:21 +03:00
dma_sync_single_for_cpu ( chan2parent ( & dwc - > chan ) ,
2008-07-08 22:59:42 +04:00
desc - > txd . phys , sizeof ( desc - > lli ) ,
DMA_TO_DEVICE ) ;
}
/*
* Move a descriptor , including any children , to the free list .
* ` desc ' must not be on any lists .
*/
static void dwc_desc_put ( struct dw_dma_chan * dwc , struct dw_desc * desc )
{
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2008-07-08 22:59:42 +04:00
if ( desc ) {
struct dw_desc * child ;
dwc_sync_desc_for_cpu ( dwc , desc ) ;
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2009-09-09 04:53:02 +04:00
list_for_each_entry ( child , & desc - > tx_list , desc_node )
2009-01-06 21:38:21 +03:00
dev_vdbg ( chan2dev ( & dwc - > chan ) ,
2008-07-08 22:59:42 +04:00
" moving child desc %p to freelist \n " ,
child ) ;
2009-09-09 04:53:02 +04:00
list_splice_init ( & desc - > tx_list , & dwc - > free_list ) ;
2009-01-06 21:38:21 +03:00
dev_vdbg ( chan2dev ( & dwc - > chan ) , " moving desc %p to freelist \n " , desc ) ;
2008-07-08 22:59:42 +04:00
list_add ( & desc - > desc_node , & dwc - > free_list ) ;
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
}
}
2011-11-17 14:31:29 +04:00
static void dwc_initialize ( struct dw_dma_chan * dwc )
{
struct dw_dma * dw = to_dw_dma ( dwc - > chan . device ) ;
struct dw_dma_slave * dws = dwc - > chan . private ;
u32 cfghi = DWC_CFGH_FIFO_MODE ;
u32 cfglo = DWC_CFGL_CH_PRIOR ( dwc - > priority ) ;
if ( dwc - > initialized = = true )
return ;
if ( dws ) {
/*
* We need controller - specific data to set up slave
* transfers .
*/
BUG_ON ( ! dws - > dma_dev | | dws - > dma_dev ! = dw - > dma . dev ) ;
cfghi = dws - > cfg_hi ;
cfglo | = dws - > cfg_lo & ~ DWC_CFGL_CH_PRIOR_MASK ;
2012-09-03 14:46:19 +04:00
} else {
if ( dwc - > dma_sconfig . direction = = DMA_MEM_TO_DEV )
cfghi = DWC_CFGH_DST_PER ( dwc - > dma_sconfig . slave_id ) ;
else if ( dwc - > dma_sconfig . direction = = DMA_DEV_TO_MEM )
cfghi = DWC_CFGH_SRC_PER ( dwc - > dma_sconfig . slave_id ) ;
2011-11-17 14:31:29 +04:00
}
channel_writel ( dwc , CFG_LO , cfglo ) ;
channel_writel ( dwc , CFG_HI , cfghi ) ;
/* Enable interrupts */
channel_set_bit ( dw , MASK . XFER , dwc - > mask ) ;
channel_set_bit ( dw , MASK . ERROR , dwc - > mask ) ;
dwc - > initialized = true ;
}
2008-07-08 22:59:42 +04:00
/*----------------------------------------------------------------------*/
2012-06-19 14:34:08 +04:00
static inline unsigned int dwc_fast_fls ( unsigned long long v )
{
/*
* We can be a lot more clever here , but this should take care
* of the most common optimization .
*/
if ( ! ( v & 7 ) )
return 3 ;
else if ( ! ( v & 3 ) )
return 2 ;
else if ( ! ( v & 1 ) )
return 1 ;
return 0 ;
}
2012-09-21 16:05:44 +04:00
static inline void dwc_dump_chan_regs ( struct dw_dma_chan * dwc )
2012-06-19 14:34:03 +04:00
{
dev_err ( chan2dev ( & dwc - > chan ) ,
" SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x \n " ,
channel_readl ( dwc , SAR ) ,
channel_readl ( dwc , DAR ) ,
channel_readl ( dwc , LLP ) ,
channel_readl ( dwc , CTL_HI ) ,
channel_readl ( dwc , CTL_LO ) ) ;
}
2012-06-19 14:46:32 +04:00
static inline void dwc_chan_disable ( struct dw_dma * dw , struct dw_dma_chan * dwc )
{
channel_clear_bit ( dw , CH_EN , dwc - > mask ) ;
while ( dma_readl ( dw , CH_EN ) & dwc - > mask )
cpu_relax ( ) ;
}
2012-06-19 14:34:03 +04:00
/*----------------------------------------------------------------------*/
2012-09-21 16:05:49 +04:00
/* Perform single block transfer */
static inline void dwc_do_single_block ( struct dw_dma_chan * dwc ,
struct dw_desc * desc )
{
struct dw_dma * dw = to_dw_dma ( dwc - > chan . device ) ;
u32 ctllo ;
/* Software emulation of LLP mode relies on interrupts to continue
* multi block transfer . */
ctllo = desc - > lli . ctllo | DWC_CTLL_INT_EN ;
channel_writel ( dwc , SAR , desc - > lli . sar ) ;
channel_writel ( dwc , DAR , desc - > lli . dar ) ;
channel_writel ( dwc , CTL_LO , ctllo ) ;
channel_writel ( dwc , CTL_HI , desc - > lli . ctlhi ) ;
channel_set_bit ( dw , CH_EN , dwc - > mask ) ;
}
2008-07-08 22:59:42 +04:00
/* Called with dwc->lock held and bh disabled */
static void dwc_dostart ( struct dw_dma_chan * dwc , struct dw_desc * first )
{
struct dw_dma * dw = to_dw_dma ( dwc - > chan . device ) ;
2012-09-21 16:05:49 +04:00
unsigned long was_soft_llp ;
2008-07-08 22:59:42 +04:00
/* ASSERT: channel is idle */
if ( dma_readl ( dw , CH_EN ) & dwc - > mask ) {
2009-01-06 21:38:21 +03:00
dev_err ( chan2dev ( & dwc - > chan ) ,
2008-07-08 22:59:42 +04:00
" BUG: Attempted to start non-idle channel \n " ) ;
2012-06-19 14:34:03 +04:00
dwc_dump_chan_regs ( dwc ) ;
2008-07-08 22:59:42 +04:00
/* The tasklet will hopefully advance the queue... */
return ;
}
2012-09-21 16:05:49 +04:00
if ( dwc - > nollp ) {
was_soft_llp = test_and_set_bit ( DW_DMA_IS_SOFT_LLP ,
& dwc - > flags ) ;
if ( was_soft_llp ) {
dev_err ( chan2dev ( & dwc - > chan ) ,
" BUG: Attempted to start new LLP transfer "
" inside ongoing one \n " ) ;
return ;
}
dwc_initialize ( dwc ) ;
dwc - > tx_list = & first - > tx_list ;
dwc - > tx_node_active = first - > tx_list . next ;
dwc_do_single_block ( dwc , first ) ;
return ;
}
2011-11-17 14:31:29 +04:00
dwc_initialize ( dwc ) ;
2008-07-08 22:59:42 +04:00
channel_writel ( dwc , LLP , first - > txd . phys ) ;
channel_writel ( dwc , CTL_LO ,
DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN ) ;
channel_writel ( dwc , CTL_HI , 0 ) ;
channel_set_bit ( dw , CH_EN , dwc - > mask ) ;
}
/*----------------------------------------------------------------------*/
static void
2011-04-15 14:33:35 +04:00
dwc_descriptor_complete ( struct dw_dma_chan * dwc , struct dw_desc * desc ,
bool callback_required )
2008-07-08 22:59:42 +04:00
{
2011-04-15 14:33:35 +04:00
dma_async_tx_callback callback = NULL ;
void * param = NULL ;
2008-07-08 22:59:42 +04:00
struct dma_async_tx_descriptor * txd = & desc - > txd ;
2011-03-03 13:17:20 +03:00
struct dw_desc * child ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2008-07-08 22:59:42 +04:00
2009-01-06 21:38:21 +03:00
dev_vdbg ( chan2dev ( & dwc - > chan ) , " descriptor %u complete \n " , txd - > cookie ) ;
2008-07-08 22:59:42 +04:00
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2012-03-07 02:35:07 +04:00
dma_cookie_complete ( txd ) ;
2011-04-15 14:33:35 +04:00
if ( callback_required ) {
callback = txd - > callback ;
param = txd - > callback_param ;
}
2008-07-08 22:59:42 +04:00
dwc_sync_desc_for_cpu ( dwc , desc ) ;
2011-03-03 13:17:20 +03:00
/* async_tx_ack */
list_for_each_entry ( child , & desc - > tx_list , desc_node )
async_tx_ack ( & child - > txd ) ;
async_tx_ack ( & desc - > txd ) ;
2009-09-09 04:53:02 +04:00
list_splice_init ( & desc - > tx_list , & dwc - > free_list ) ;
2008-07-08 22:59:42 +04:00
list_move ( & desc - > desc_node , & dwc - > free_list ) ;
2009-09-09 04:53:05 +04:00
if ( ! dwc - > chan . private ) {
struct device * parent = chan2parent ( & dwc - > chan ) ;
if ( ! ( txd - > flags & DMA_COMPL_SKIP_DEST_UNMAP ) ) {
if ( txd - > flags & DMA_COMPL_DEST_UNMAP_SINGLE )
dma_unmap_single ( parent , desc - > lli . dar ,
desc - > len , DMA_FROM_DEVICE ) ;
else
dma_unmap_page ( parent , desc - > lli . dar ,
desc - > len , DMA_FROM_DEVICE ) ;
}
if ( ! ( txd - > flags & DMA_COMPL_SKIP_SRC_UNMAP ) ) {
if ( txd - > flags & DMA_COMPL_SRC_UNMAP_SINGLE )
dma_unmap_single ( parent , desc - > lli . sar ,
desc - > len , DMA_TO_DEVICE ) ;
else
dma_unmap_page ( parent , desc - > lli . sar ,
desc - > len , DMA_TO_DEVICE ) ;
}
}
2008-07-08 22:59:42 +04:00
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2011-04-15 14:33:35 +04:00
if ( callback_required & & callback )
2008-07-08 22:59:42 +04:00
callback ( param ) ;
}
static void dwc_complete_all ( struct dw_dma * dw , struct dw_dma_chan * dwc )
{
struct dw_desc * desc , * _desc ;
LIST_HEAD ( list ) ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2008-07-08 22:59:42 +04:00
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
if ( dma_readl ( dw , CH_EN ) & dwc - > mask ) {
2009-01-06 21:38:21 +03:00
dev_err ( chan2dev ( & dwc - > chan ) ,
2008-07-08 22:59:42 +04:00
" BUG: XFER bit set, but channel not idle! \n " ) ;
/* Try to continue after resetting the channel... */
2012-06-19 14:46:32 +04:00
dwc_chan_disable ( dw , dwc ) ;
2008-07-08 22:59:42 +04:00
}
/*
* Submit queued descriptors ASAP , i . e . before we go through
* the completed ones .
*/
list_splice_init ( & dwc - > active_list , & list ) ;
2011-03-03 13:17:16 +03:00
if ( ! list_empty ( & dwc - > queue ) ) {
list_move ( dwc - > queue . next , & dwc - > active_list ) ;
dwc_dostart ( dwc , dwc_first_active ( dwc ) ) ;
}
2008-07-08 22:59:42 +04:00
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
list_for_each_entry_safe ( desc , _desc , & list , desc_node )
2011-04-15 14:33:35 +04:00
dwc_descriptor_complete ( dwc , desc , true ) ;
2008-07-08 22:59:42 +04:00
}
static void dwc_scan_descriptors ( struct dw_dma * dw , struct dw_dma_chan * dwc )
{
dma_addr_t llp ;
struct dw_desc * desc , * _desc ;
struct dw_desc * child ;
u32 status_xfer ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2008-07-08 22:59:42 +04:00
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
llp = channel_readl ( dwc , LLP ) ;
status_xfer = dma_readl ( dw , RAW . XFER ) ;
if ( status_xfer & dwc - > mask ) {
/* Everything we've submitted is done */
dma_writel ( dw , CLEAR . XFER , dwc - > mask ) ;
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
dwc_complete_all ( dw , dwc ) ;
return ;
}
2011-04-15 14:33:35 +04:00
if ( list_empty ( & dwc - > active_list ) ) {
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2011-01-21 17:11:52 +03:00
return ;
2011-04-15 14:33:35 +04:00
}
2011-01-21 17:11:52 +03:00
2012-06-19 14:34:05 +04:00
dev_vdbg ( chan2dev ( & dwc - > chan ) , " %s: llp=0x%llx \n " , __func__ ,
2012-06-19 14:34:02 +04:00
( unsigned long long ) llp ) ;
2008-07-08 22:59:42 +04:00
list_for_each_entry_safe ( desc , _desc , & dwc - > active_list , desc_node ) {
2011-03-24 09:02:15 +03:00
/* check first descriptors addr */
2011-04-15 14:33:35 +04:00
if ( desc - > txd . phys = = llp ) {
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2011-03-24 09:02:15 +03:00
return ;
2011-04-15 14:33:35 +04:00
}
2011-03-24 09:02:15 +03:00
/* check first descriptors llp */
2011-04-15 14:33:35 +04:00
if ( desc - > lli . llp = = llp ) {
2008-07-08 22:59:42 +04:00
/* This one is currently in progress */
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
return ;
2011-04-15 14:33:35 +04:00
}
2008-07-08 22:59:42 +04:00
2009-09-09 04:53:02 +04:00
list_for_each_entry ( child , & desc - > tx_list , desc_node )
2011-04-15 14:33:35 +04:00
if ( child - > lli . llp = = llp ) {
2008-07-08 22:59:42 +04:00
/* Currently in progress */
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
return ;
2011-04-15 14:33:35 +04:00
}
2008-07-08 22:59:42 +04:00
/*
* No descriptors so far seem to be in progress , i . e .
* this one must be done .
*/
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2011-04-15 14:33:35 +04:00
dwc_descriptor_complete ( dwc , desc , true ) ;
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
}
2009-01-06 21:38:21 +03:00
dev_err ( chan2dev ( & dwc - > chan ) ,
2008-07-08 22:59:42 +04:00
" BUG: All descriptors done, but channel not idle! \n " ) ;
/* Try to continue after resetting the channel... */
2012-06-19 14:46:32 +04:00
dwc_chan_disable ( dw , dwc ) ;
2008-07-08 22:59:42 +04:00
if ( ! list_empty ( & dwc - > queue ) ) {
2011-03-03 13:17:16 +03:00
list_move ( dwc - > queue . next , & dwc - > active_list ) ;
dwc_dostart ( dwc , dwc_first_active ( dwc ) ) ;
2008-07-08 22:59:42 +04:00
}
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
}
2012-07-13 12:09:32 +04:00
static inline void dwc_dump_lli ( struct dw_dma_chan * dwc , struct dw_lli * lli )
2008-07-08 22:59:42 +04:00
{
2012-10-18 18:34:09 +04:00
dev_crit ( chan2dev ( & dwc - > chan ) , " desc: s0x%x d0x%x l0x%x c0x%x:%x \n " ,
lli - > sar , lli - > dar , lli - > llp , lli - > ctlhi , lli - > ctllo ) ;
2008-07-08 22:59:42 +04:00
}
static void dwc_handle_error ( struct dw_dma * dw , struct dw_dma_chan * dwc )
{
struct dw_desc * bad_desc ;
struct dw_desc * child ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2008-07-08 22:59:42 +04:00
dwc_scan_descriptors ( dw , dwc ) ;
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
/*
* The descriptor currently at the head of the active list is
* borked . Since we don ' t have any way to report errors , we ' ll
* just have to scream loudly and try to carry on .
*/
bad_desc = dwc_first_active ( dwc ) ;
list_del_init ( & bad_desc - > desc_node ) ;
2011-03-03 13:17:16 +03:00
list_move ( dwc - > queue . next , dwc - > active_list . prev ) ;
2008-07-08 22:59:42 +04:00
/* Clear the error flag and try to restart the controller */
dma_writel ( dw , CLEAR . ERROR , dwc - > mask ) ;
if ( ! list_empty ( & dwc - > active_list ) )
dwc_dostart ( dwc , dwc_first_active ( dwc ) ) ;
/*
2012-10-18 18:34:11 +04:00
* WARN may seem harsh , but since this only happens
2008-07-08 22:59:42 +04:00
* when someone submits a bad physical address in a
* descriptor , we should consider ourselves lucky that the
* controller flagged an error instead of scribbling over
* random memory locations .
*/
2012-10-18 18:34:11 +04:00
dev_WARN ( chan2dev ( & dwc - > chan ) , " Bad descriptor submitted for DMA! \n "
" cookie: %d \n " , bad_desc - > txd . cookie ) ;
2008-07-08 22:59:42 +04:00
dwc_dump_lli ( dwc , & bad_desc - > lli ) ;
2009-09-09 04:53:02 +04:00
list_for_each_entry ( child , & bad_desc - > tx_list , desc_node )
2008-07-08 22:59:42 +04:00
dwc_dump_lli ( dwc , & child - > lli ) ;
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
/* Pretend the descriptor completed successfully */
2011-04-15 14:33:35 +04:00
dwc_descriptor_complete ( dwc , bad_desc , true ) ;
2008-07-08 22:59:42 +04:00
}
2009-04-01 17:47:02 +04:00
/* --------------------- Cyclic DMA API extensions -------------------- */
inline dma_addr_t dw_dma_get_src_addr ( struct dma_chan * chan )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
return channel_readl ( dwc , SAR ) ;
}
EXPORT_SYMBOL ( dw_dma_get_src_addr ) ;
inline dma_addr_t dw_dma_get_dst_addr ( struct dma_chan * chan )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
return channel_readl ( dwc , DAR ) ;
}
EXPORT_SYMBOL ( dw_dma_get_dst_addr ) ;
/* called with dwc->lock held and all DMAC interrupts disabled */
static void dwc_handle_cyclic ( struct dw_dma * dw , struct dw_dma_chan * dwc ,
2012-02-01 14:42:23 +04:00
u32 status_err , u32 status_xfer )
2009-04-01 17:47:02 +04:00
{
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2012-02-01 14:42:23 +04:00
if ( dwc - > mask ) {
2009-04-01 17:47:02 +04:00
void ( * callback ) ( void * param ) ;
void * callback_param ;
dev_vdbg ( chan2dev ( & dwc - > chan ) , " new cyclic period llp 0x%08x \n " ,
channel_readl ( dwc , LLP ) ) ;
callback = dwc - > cdesc - > period_callback ;
callback_param = dwc - > cdesc - > period_callback_param ;
2011-04-15 14:33:35 +04:00
if ( callback )
2009-04-01 17:47:02 +04:00
callback ( callback_param ) ;
}
/*
* Error and transfer complete are highly unlikely , and will most
* likely be due to a configuration error by the user .
*/
if ( unlikely ( status_err & dwc - > mask ) | |
unlikely ( status_xfer & dwc - > mask ) ) {
int i ;
dev_err ( chan2dev ( & dwc - > chan ) , " cyclic DMA unexpected %s "
" interrupt, stopping DMA transfer \n " ,
status_xfer ? " xfer " : " error " ) ;
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2012-06-19 14:34:03 +04:00
dwc_dump_chan_regs ( dwc ) ;
2009-04-01 17:47:02 +04:00
2012-06-19 14:46:32 +04:00
dwc_chan_disable ( dw , dwc ) ;
2009-04-01 17:47:02 +04:00
/* make sure DMA does not restart by loading a new list */
channel_writel ( dwc , LLP , 0 ) ;
channel_writel ( dwc , CTL_LO , 0 ) ;
channel_writel ( dwc , CTL_HI , 0 ) ;
dma_writel ( dw , CLEAR . ERROR , dwc - > mask ) ;
dma_writel ( dw , CLEAR . XFER , dwc - > mask ) ;
for ( i = 0 ; i < dwc - > cdesc - > periods ; i + + )
dwc_dump_lli ( dwc , & dwc - > cdesc - > desc [ i ] - > lli ) ;
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2009-04-01 17:47:02 +04:00
}
}
/* ------------------------------------------------------------------------- */
2008-07-08 22:59:42 +04:00
static void dw_dma_tasklet ( unsigned long data )
{
struct dw_dma * dw = ( struct dw_dma * ) data ;
struct dw_dma_chan * dwc ;
u32 status_xfer ;
u32 status_err ;
int i ;
2008-10-04 02:23:46 +04:00
status_xfer = dma_readl ( dw , RAW . XFER ) ;
2008-07-08 22:59:42 +04:00
status_err = dma_readl ( dw , RAW . ERROR ) ;
2012-06-19 14:34:05 +04:00
dev_vdbg ( dw - > dma . dev , " %s: status_err=%x \n " , __func__ , status_err ) ;
2008-07-08 22:59:42 +04:00
for ( i = 0 ; i < dw - > dma . chancnt ; i + + ) {
dwc = & dw - > chan [ i ] ;
2009-04-01 17:47:02 +04:00
if ( test_bit ( DW_DMA_IS_CYCLIC , & dwc - > flags ) )
2012-02-01 14:42:23 +04:00
dwc_handle_cyclic ( dw , dwc , status_err , status_xfer ) ;
2009-04-01 17:47:02 +04:00
else if ( status_err & ( 1 < < i ) )
2008-07-08 22:59:42 +04:00
dwc_handle_error ( dw , dwc ) ;
2012-09-21 16:05:49 +04:00
else if ( status_xfer & ( 1 < < i ) ) {
unsigned long flags ;
spin_lock_irqsave ( & dwc - > lock , flags ) ;
if ( test_bit ( DW_DMA_IS_SOFT_LLP , & dwc - > flags ) ) {
if ( dwc - > tx_node_active ! = dwc - > tx_list ) {
struct dw_desc * desc =
list_entry ( dwc - > tx_node_active ,
struct dw_desc ,
desc_node ) ;
dma_writel ( dw , CLEAR . XFER , dwc - > mask ) ;
/* move pointer to next descriptor */
dwc - > tx_node_active =
dwc - > tx_node_active - > next ;
dwc_do_single_block ( dwc , desc ) ;
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
continue ;
} else {
/* we are done here */
clear_bit ( DW_DMA_IS_SOFT_LLP , & dwc - > flags ) ;
}
}
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
dwc_scan_descriptors ( dw , dwc ) ;
2012-09-21 16:05:49 +04:00
}
2008-07-08 22:59:42 +04:00
}
/*
2012-02-01 14:42:23 +04:00
* Re - enable interrupts .
2008-07-08 22:59:42 +04:00
*/
channel_set_bit ( dw , MASK . XFER , dw - > all_chan_mask ) ;
channel_set_bit ( dw , MASK . ERROR , dw - > all_chan_mask ) ;
}
static irqreturn_t dw_dma_interrupt ( int irq , void * dev_id )
{
struct dw_dma * dw = dev_id ;
u32 status ;
2012-06-19 14:34:05 +04:00
dev_vdbg ( dw - > dma . dev , " %s: status=0x%x \n " , __func__ ,
2008-07-08 22:59:42 +04:00
dma_readl ( dw , STATUS_INT ) ) ;
/*
* Just disable the interrupts . We ' ll turn them back on in the
* softirq handler .
*/
channel_clear_bit ( dw , MASK . XFER , dw - > all_chan_mask ) ;
channel_clear_bit ( dw , MASK . ERROR , dw - > all_chan_mask ) ;
status = dma_readl ( dw , STATUS_INT ) ;
if ( status ) {
dev_err ( dw - > dma . dev ,
" BUG: Unexpected interrupts pending: 0x%x \n " ,
status ) ;
/* Try to recover */
channel_clear_bit ( dw , MASK . XFER , ( 1 < < 8 ) - 1 ) ;
channel_clear_bit ( dw , MASK . SRC_TRAN , ( 1 < < 8 ) - 1 ) ;
channel_clear_bit ( dw , MASK . DST_TRAN , ( 1 < < 8 ) - 1 ) ;
channel_clear_bit ( dw , MASK . ERROR , ( 1 < < 8 ) - 1 ) ;
}
tasklet_schedule ( & dw - > tasklet ) ;
return IRQ_HANDLED ;
}
/*----------------------------------------------------------------------*/
static dma_cookie_t dwc_tx_submit ( struct dma_async_tx_descriptor * tx )
{
struct dw_desc * desc = txd_to_dw_desc ( tx ) ;
struct dw_dma_chan * dwc = to_dw_dma_chan ( tx - > chan ) ;
dma_cookie_t cookie ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2008-07-08 22:59:42 +04:00
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2012-03-07 02:34:46 +04:00
cookie = dma_cookie_assign ( tx ) ;
2008-07-08 22:59:42 +04:00
/*
* REVISIT : We should attempt to chain as many descriptors as
* possible , perhaps even appending to those already submitted
* for DMA . But this is hard to do in a race - free manner .
*/
if ( list_empty ( & dwc - > active_list ) ) {
2012-06-19 14:34:05 +04:00
dev_vdbg ( chan2dev ( tx - > chan ) , " %s: started %u \n " , __func__ ,
2008-07-08 22:59:42 +04:00
desc - > txd . cookie ) ;
list_add_tail ( & desc - > desc_node , & dwc - > active_list ) ;
2011-03-03 13:17:16 +03:00
dwc_dostart ( dwc , dwc_first_active ( dwc ) ) ;
2008-07-08 22:59:42 +04:00
} else {
2012-06-19 14:34:05 +04:00
dev_vdbg ( chan2dev ( tx - > chan ) , " %s: queued %u \n " , __func__ ,
2008-07-08 22:59:42 +04:00
desc - > txd . cookie ) ;
list_add_tail ( & desc - > desc_node , & dwc - > queue ) ;
}
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
return cookie ;
}
static struct dma_async_tx_descriptor *
dwc_prep_dma_memcpy ( struct dma_chan * chan , dma_addr_t dest , dma_addr_t src ,
size_t len , unsigned long flags )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
2012-09-21 16:05:48 +04:00
struct dw_dma_slave * dws = chan - > private ;
2008-07-08 22:59:42 +04:00
struct dw_desc * desc ;
struct dw_desc * first ;
struct dw_desc * prev ;
size_t xfer_count ;
size_t offset ;
unsigned int src_width ;
unsigned int dst_width ;
2012-10-01 14:06:25 +04:00
unsigned int data_width ;
2008-07-08 22:59:42 +04:00
u32 ctllo ;
2012-06-19 14:34:02 +04:00
dev_vdbg ( chan2dev ( chan ) ,
2012-06-19 14:34:05 +04:00
" %s: d0x%llx s0x%llx l0x%zx f0x%lx \n " , __func__ ,
2012-06-19 14:34:02 +04:00
( unsigned long long ) dest , ( unsigned long long ) src ,
len , flags ) ;
2008-07-08 22:59:42 +04:00
if ( unlikely ( ! len ) ) {
2012-06-19 14:34:05 +04:00
dev_dbg ( chan2dev ( chan ) , " %s: length is zero! \n " , __func__ ) ;
2008-07-08 22:59:42 +04:00
return NULL ;
}
2012-10-01 14:06:25 +04:00
data_width = min_t ( unsigned int , dwc - > dw - > data_width [ dwc_get_sms ( dws ) ] ,
dwc - > dw - > data_width [ dwc_get_dms ( dws ) ] ) ;
2012-09-21 16:05:48 +04:00
2012-10-01 14:06:25 +04:00
src_width = dst_width = min_t ( unsigned int , data_width ,
dwc_fast_fls ( src | dest | len ) ) ;
2008-07-08 22:59:42 +04:00
2012-02-01 14:42:26 +04:00
ctllo = DWC_DEFAULT_CTLLO ( chan )
2008-07-08 22:59:42 +04:00
| DWC_CTLL_DST_WIDTH ( dst_width )
| DWC_CTLL_SRC_WIDTH ( src_width )
| DWC_CTLL_DST_INC
| DWC_CTLL_SRC_INC
| DWC_CTLL_FC_M2M ;
prev = first = NULL ;
for ( offset = 0 ; offset < len ; offset + = xfer_count < < src_width ) {
xfer_count = min_t ( size_t , ( len - offset ) > > src_width ,
2012-09-21 16:05:47 +04:00
dwc - > block_size ) ;
2008-07-08 22:59:42 +04:00
desc = dwc_desc_get ( dwc ) ;
if ( ! desc )
goto err_desc_get ;
desc - > lli . sar = src + offset ;
desc - > lli . dar = dest + offset ;
desc - > lli . ctllo = ctllo ;
desc - > lli . ctlhi = xfer_count ;
if ( ! first ) {
first = desc ;
} else {
prev - > lli . llp = desc - > txd . phys ;
2009-01-06 21:38:21 +03:00
dma_sync_single_for_device ( chan2parent ( chan ) ,
2008-07-08 22:59:42 +04:00
prev - > txd . phys , sizeof ( prev - > lli ) ,
DMA_TO_DEVICE ) ;
list_add_tail ( & desc - > desc_node ,
2009-09-09 04:53:02 +04:00
& first - > tx_list ) ;
2008-07-08 22:59:42 +04:00
}
prev = desc ;
}
if ( flags & DMA_PREP_INTERRUPT )
/* Trigger interrupt after last block */
prev - > lli . ctllo | = DWC_CTLL_INT_EN ;
prev - > lli . llp = 0 ;
2009-01-06 21:38:21 +03:00
dma_sync_single_for_device ( chan2parent ( chan ) ,
2008-07-08 22:59:42 +04:00
prev - > txd . phys , sizeof ( prev - > lli ) ,
DMA_TO_DEVICE ) ;
first - > txd . flags = flags ;
first - > len = len ;
return & first - > txd ;
err_desc_get :
dwc_desc_put ( dwc , first ) ;
return NULL ;
}
static struct dma_async_tx_descriptor *
dwc_prep_slave_sg ( struct dma_chan * chan , struct scatterlist * sgl ,
2011-10-13 21:04:23 +04:00
unsigned int sg_len , enum dma_transfer_direction direction ,
2012-03-09 00:35:13 +04:00
unsigned long flags , void * context )
2008-07-08 22:59:42 +04:00
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
2009-02-19 01:48:26 +03:00
struct dw_dma_slave * dws = chan - > private ;
2012-02-01 14:42:26 +04:00
struct dma_slave_config * sconfig = & dwc - > dma_sconfig ;
2008-07-08 22:59:42 +04:00
struct dw_desc * prev ;
struct dw_desc * first ;
u32 ctllo ;
dma_addr_t reg ;
unsigned int reg_width ;
unsigned int mem_width ;
2012-09-21 16:05:48 +04:00
unsigned int data_width ;
2008-07-08 22:59:42 +04:00
unsigned int i ;
struct scatterlist * sg ;
size_t total_len = 0 ;
2012-06-19 14:34:05 +04:00
dev_vdbg ( chan2dev ( chan ) , " %s \n " , __func__ ) ;
2008-07-08 22:59:42 +04:00
if ( unlikely ( ! dws | | ! sg_len ) )
return NULL ;
prev = first = NULL ;
switch ( direction ) {
2011-10-13 21:04:23 +04:00
case DMA_MEM_TO_DEV :
2012-02-01 14:42:26 +04:00
reg_width = __fls ( sconfig - > dst_addr_width ) ;
reg = sconfig - > dst_addr ;
ctllo = ( DWC_DEFAULT_CTLLO ( chan )
2008-07-08 22:59:42 +04:00
| DWC_CTLL_DST_WIDTH ( reg_width )
| DWC_CTLL_DST_FIX
2012-02-01 14:42:26 +04:00
| DWC_CTLL_SRC_INC ) ;
ctllo | = sconfig - > device_fc ? DWC_CTLL_FC ( DW_DMA_FC_P_M2P ) :
DWC_CTLL_FC ( DW_DMA_FC_D_M2P ) ;
2012-09-21 16:05:48 +04:00
data_width = dwc - > dw - > data_width [ dwc_get_sms ( dws ) ] ;
2008-07-08 22:59:42 +04:00
for_each_sg ( sgl , sg , sg_len , i ) {
struct dw_desc * desc ;
2011-04-18 13:24:56 +04:00
u32 len , dlen , mem ;
2008-07-08 22:59:42 +04:00
2012-04-25 22:50:51 +04:00
mem = sg_dma_address ( sg ) ;
2011-04-18 13:24:56 +04:00
len = sg_dma_len ( sg ) ;
2012-02-01 14:42:25 +04:00
2012-09-21 16:05:48 +04:00
mem_width = min_t ( unsigned int ,
data_width , dwc_fast_fls ( mem | len ) ) ;
2008-07-08 22:59:42 +04:00
2011-04-18 13:24:56 +04:00
slave_sg_todev_fill_desc :
2008-07-08 22:59:42 +04:00
desc = dwc_desc_get ( dwc ) ;
if ( ! desc ) {
2009-01-06 21:38:21 +03:00
dev_err ( chan2dev ( chan ) ,
2008-07-08 22:59:42 +04:00
" not enough descriptors available \n " ) ;
goto err_desc_get ;
}
desc - > lli . sar = mem ;
desc - > lli . dar = reg ;
desc - > lli . ctllo = ctllo | DWC_CTLL_SRC_WIDTH ( mem_width ) ;
2012-09-21 16:05:47 +04:00
if ( ( len > > mem_width ) > dwc - > block_size ) {
dlen = dwc - > block_size < < mem_width ;
2011-04-18 13:24:56 +04:00
mem + = dlen ;
len - = dlen ;
} else {
dlen = len ;
len = 0 ;
}
desc - > lli . ctlhi = dlen > > mem_width ;
2008-07-08 22:59:42 +04:00
if ( ! first ) {
first = desc ;
} else {
prev - > lli . llp = desc - > txd . phys ;
2009-01-06 21:38:21 +03:00
dma_sync_single_for_device ( chan2parent ( chan ) ,
2008-07-08 22:59:42 +04:00
prev - > txd . phys ,
sizeof ( prev - > lli ) ,
DMA_TO_DEVICE ) ;
list_add_tail ( & desc - > desc_node ,
2009-09-09 04:53:02 +04:00
& first - > tx_list ) ;
2008-07-08 22:59:42 +04:00
}
prev = desc ;
2011-04-18 13:24:56 +04:00
total_len + = dlen ;
if ( len )
goto slave_sg_todev_fill_desc ;
2008-07-08 22:59:42 +04:00
}
break ;
2011-10-13 21:04:23 +04:00
case DMA_DEV_TO_MEM :
2012-02-01 14:42:26 +04:00
reg_width = __fls ( sconfig - > src_addr_width ) ;
reg = sconfig - > src_addr ;
ctllo = ( DWC_DEFAULT_CTLLO ( chan )
2008-07-08 22:59:42 +04:00
| DWC_CTLL_SRC_WIDTH ( reg_width )
| DWC_CTLL_DST_INC
2012-02-01 14:42:26 +04:00
| DWC_CTLL_SRC_FIX ) ;
ctllo | = sconfig - > device_fc ? DWC_CTLL_FC ( DW_DMA_FC_P_P2M ) :
DWC_CTLL_FC ( DW_DMA_FC_D_P2M ) ;
2008-07-08 22:59:42 +04:00
2012-09-21 16:05:48 +04:00
data_width = dwc - > dw - > data_width [ dwc_get_dms ( dws ) ] ;
2008-07-08 22:59:42 +04:00
for_each_sg ( sgl , sg , sg_len , i ) {
struct dw_desc * desc ;
2011-04-18 13:24:56 +04:00
u32 len , dlen , mem ;
2008-07-08 22:59:42 +04:00
2012-04-25 22:50:51 +04:00
mem = sg_dma_address ( sg ) ;
2008-07-08 22:59:42 +04:00
len = sg_dma_len ( sg ) ;
2012-02-01 14:42:25 +04:00
2012-09-21 16:05:48 +04:00
mem_width = min_t ( unsigned int ,
data_width , dwc_fast_fls ( mem | len ) ) ;
2008-07-08 22:59:42 +04:00
2011-04-18 13:24:56 +04:00
slave_sg_fromdev_fill_desc :
desc = dwc_desc_get ( dwc ) ;
if ( ! desc ) {
dev_err ( chan2dev ( chan ) ,
" not enough descriptors available \n " ) ;
goto err_desc_get ;
}
2008-07-08 22:59:42 +04:00
desc - > lli . sar = reg ;
desc - > lli . dar = mem ;
desc - > lli . ctllo = ctllo | DWC_CTLL_DST_WIDTH ( mem_width ) ;
2012-09-21 16:05:47 +04:00
if ( ( len > > reg_width ) > dwc - > block_size ) {
dlen = dwc - > block_size < < reg_width ;
2011-04-18 13:24:56 +04:00
mem + = dlen ;
len - = dlen ;
} else {
dlen = len ;
len = 0 ;
}
desc - > lli . ctlhi = dlen > > reg_width ;
2008-07-08 22:59:42 +04:00
if ( ! first ) {
first = desc ;
} else {
prev - > lli . llp = desc - > txd . phys ;
2009-01-06 21:38:21 +03:00
dma_sync_single_for_device ( chan2parent ( chan ) ,
2008-07-08 22:59:42 +04:00
prev - > txd . phys ,
sizeof ( prev - > lli ) ,
DMA_TO_DEVICE ) ;
list_add_tail ( & desc - > desc_node ,
2009-09-09 04:53:02 +04:00
& first - > tx_list ) ;
2008-07-08 22:59:42 +04:00
}
prev = desc ;
2011-04-18 13:24:56 +04:00
total_len + = dlen ;
if ( len )
goto slave_sg_fromdev_fill_desc ;
2008-07-08 22:59:42 +04:00
}
break ;
default :
return NULL ;
}
if ( flags & DMA_PREP_INTERRUPT )
/* Trigger interrupt after last block */
prev - > lli . ctllo | = DWC_CTLL_INT_EN ;
prev - > lli . llp = 0 ;
2009-01-06 21:38:21 +03:00
dma_sync_single_for_device ( chan2parent ( chan ) ,
2008-07-08 22:59:42 +04:00
prev - > txd . phys , sizeof ( prev - > lli ) ,
DMA_TO_DEVICE ) ;
first - > len = total_len ;
return & first - > txd ;
err_desc_get :
dwc_desc_put ( dwc , first ) ;
return NULL ;
}
2012-02-01 14:42:26 +04:00
/*
* Fix sconfig ' s burst size according to dw_dmac . We need to convert them as :
* 1 - > 0 , 4 - > 1 , 8 - > 2 , 16 - > 3.
*
* NOTE : burst size 2 is not supported by controller .
*
* This can be done by finding least significant bit set : n & ( n - 1 )
*/
static inline void convert_burst ( u32 * maxburst )
{
if ( * maxburst > 1 )
* maxburst = fls ( * maxburst ) - 2 ;
else
* maxburst = 0 ;
}
static int
set_runtime_config ( struct dma_chan * chan , struct dma_slave_config * sconfig )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
/* Check if it is chan is configured for slave transfers */
if ( ! chan - > private )
return - EINVAL ;
memcpy ( & dwc - > dma_sconfig , sconfig , sizeof ( * sconfig ) ) ;
convert_burst ( & dwc - > dma_sconfig . src_maxburst ) ;
convert_burst ( & dwc - > dma_sconfig . dst_maxburst ) ;
return 0 ;
}
2010-05-18 03:30:42 +04:00
static int dwc_control ( struct dma_chan * chan , enum dma_ctrl_cmd cmd ,
unsigned long arg )
2008-07-08 22:59:42 +04:00
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
struct dw_dma * dw = to_dw_dma ( chan - > device ) ;
struct dw_desc * desc , * _desc ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2011-04-19 04:31:32 +04:00
u32 cfglo ;
2008-07-08 22:59:42 +04:00
LIST_HEAD ( list ) ;
2011-04-19 04:31:32 +04:00
if ( cmd = = DMA_PAUSE ) {
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2010-03-27 02:44:01 +03:00
2011-04-19 04:31:32 +04:00
cfglo = channel_readl ( dwc , CFG_LO ) ;
channel_writel ( dwc , CFG_LO , cfglo | DWC_CFGL_CH_SUSP ) ;
while ( ! ( channel_readl ( dwc , CFG_LO ) & DWC_CFGL_FIFO_EMPTY ) )
cpu_relax ( ) ;
2008-07-08 22:59:42 +04:00
2011-04-19 04:31:32 +04:00
dwc - > paused = true ;
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
} else if ( cmd = = DMA_RESUME ) {
if ( ! dwc - > paused )
return 0 ;
2008-07-08 22:59:42 +04:00
2011-04-19 04:31:32 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
2011-04-19 04:31:32 +04:00
cfglo = channel_readl ( dwc , CFG_LO ) ;
channel_writel ( dwc , CFG_LO , cfglo & ~ DWC_CFGL_CH_SUSP ) ;
dwc - > paused = false ;
2008-07-08 22:59:42 +04:00
2011-04-19 04:31:32 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
} else if ( cmd = = DMA_TERMINATE_ALL ) {
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
2012-09-21 16:05:49 +04:00
clear_bit ( DW_DMA_IS_SOFT_LLP , & dwc - > flags ) ;
2012-06-19 14:46:32 +04:00
dwc_chan_disable ( dw , dwc ) ;
2011-04-19 04:31:32 +04:00
dwc - > paused = false ;
/* active_list entries will end up before queued entries */
list_splice_init ( & dwc - > queue , & list ) ;
list_splice_init ( & dwc - > active_list , & list ) ;
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
/* Flush all pending and queued descriptors */
list_for_each_entry_safe ( desc , _desc , & list , desc_node )
dwc_descriptor_complete ( dwc , desc , false ) ;
2012-02-01 14:42:26 +04:00
} else if ( cmd = = DMA_SLAVE_CONFIG ) {
return set_runtime_config ( chan , ( struct dma_slave_config * ) arg ) ;
} else {
2011-04-19 04:31:32 +04:00
return - ENXIO ;
2012-02-01 14:42:26 +04:00
}
2010-03-27 02:44:01 +03:00
return 0 ;
2008-07-08 22:59:42 +04:00
}
static enum dma_status
2010-03-27 02:50:49 +03:00
dwc_tx_status ( struct dma_chan * chan ,
dma_cookie_t cookie ,
struct dma_tx_state * txstate )
2008-07-08 22:59:42 +04:00
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
2012-03-07 02:35:27 +04:00
enum dma_status ret ;
2008-07-08 22:59:42 +04:00
2012-03-07 02:35:27 +04:00
ret = dma_cookie_status ( chan , cookie , txstate ) ;
2008-07-08 22:59:42 +04:00
if ( ret ! = DMA_SUCCESS ) {
dwc_scan_descriptors ( to_dw_dma ( chan - > device ) , dwc ) ;
2012-03-07 02:35:27 +04:00
ret = dma_cookie_status ( chan , cookie , txstate ) ;
2008-07-08 22:59:42 +04:00
}
2011-04-15 14:33:35 +04:00
if ( ret ! = DMA_SUCCESS )
2012-03-07 02:35:27 +04:00
dma_set_residue ( txstate , dwc_first_active ( dwc ) - > len ) ;
2008-07-08 22:59:42 +04:00
2011-04-19 04:31:32 +04:00
if ( dwc - > paused )
return DMA_PAUSED ;
2008-07-08 22:59:42 +04:00
return ret ;
}
static void dwc_issue_pending ( struct dma_chan * chan )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
if ( ! list_empty ( & dwc - > queue ) )
dwc_scan_descriptors ( to_dw_dma ( chan - > device ) , dwc ) ;
}
2009-01-06 21:38:17 +03:00
static int dwc_alloc_chan_resources ( struct dma_chan * chan )
2008-07-08 22:59:42 +04:00
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
struct dw_dma * dw = to_dw_dma ( chan - > device ) ;
struct dw_desc * desc ;
int i ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2008-07-08 22:59:42 +04:00
2012-06-19 14:34:05 +04:00
dev_vdbg ( chan2dev ( chan ) , " %s \n " , __func__ ) ;
2008-07-08 22:59:42 +04:00
/* ASSERT: channel is idle */
if ( dma_readl ( dw , CH_EN ) & dwc - > mask ) {
2009-01-06 21:38:21 +03:00
dev_dbg ( chan2dev ( chan ) , " DMA channel not idle? \n " ) ;
2008-07-08 22:59:42 +04:00
return - EIO ;
}
2012-03-07 02:35:47 +04:00
dma_cookie_init ( chan ) ;
2008-07-08 22:59:42 +04:00
/*
* NOTE : some controllers may have additional features that we
* need to initialize here , like " scatter-gather " ( which
* doesn ' t mean what you think it means ) , and status writeback .
*/
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
i = dwc - > descs_allocated ;
while ( dwc - > descs_allocated < NR_DESCS_PER_CHANNEL ) {
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
desc = kzalloc ( sizeof ( struct dw_desc ) , GFP_KERNEL ) ;
if ( ! desc ) {
2009-01-06 21:38:21 +03:00
dev_info ( chan2dev ( chan ) ,
2008-07-08 22:59:42 +04:00
" only allocated %d descriptors \n " , i ) ;
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
break ;
}
2009-09-09 04:53:02 +04:00
INIT_LIST_HEAD ( & desc - > tx_list ) ;
2008-07-08 22:59:42 +04:00
dma_async_tx_descriptor_init ( & desc - > txd , chan ) ;
desc - > txd . tx_submit = dwc_tx_submit ;
desc - > txd . flags = DMA_CTRL_ACK ;
2009-01-06 21:38:21 +03:00
desc - > txd . phys = dma_map_single ( chan2parent ( chan ) , & desc - > lli ,
2008-07-08 22:59:42 +04:00
sizeof ( desc - > lli ) , DMA_TO_DEVICE ) ;
dwc_desc_put ( dwc , desc ) ;
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
i = + + dwc - > descs_allocated ;
}
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
2012-06-19 14:34:05 +04:00
dev_dbg ( chan2dev ( chan ) , " %s: allocated %d descriptors \n " , __func__ , i ) ;
2008-07-08 22:59:42 +04:00
return i ;
}
static void dwc_free_chan_resources ( struct dma_chan * chan )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
struct dw_dma * dw = to_dw_dma ( chan - > device ) ;
struct dw_desc * desc , * _desc ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2008-07-08 22:59:42 +04:00
LIST_HEAD ( list ) ;
2012-06-19 14:34:05 +04:00
dev_dbg ( chan2dev ( chan ) , " %s: descs allocated=%u \n " , __func__ ,
2008-07-08 22:59:42 +04:00
dwc - > descs_allocated ) ;
/* ASSERT: channel is idle */
BUG_ON ( ! list_empty ( & dwc - > active_list ) ) ;
BUG_ON ( ! list_empty ( & dwc - > queue ) ) ;
BUG_ON ( dma_readl ( to_dw_dma ( chan - > device ) , CH_EN ) & dwc - > mask ) ;
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
list_splice_init ( & dwc - > free_list , & list ) ;
dwc - > descs_allocated = 0 ;
2011-11-17 14:31:29 +04:00
dwc - > initialized = false ;
2008-07-08 22:59:42 +04:00
/* Disable interrupts */
channel_clear_bit ( dw , MASK . XFER , dwc - > mask ) ;
channel_clear_bit ( dw , MASK . ERROR , dwc - > mask ) ;
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
list_for_each_entry_safe ( desc , _desc , & list , desc_node ) {
2009-01-06 21:38:21 +03:00
dev_vdbg ( chan2dev ( chan ) , " freeing descriptor %p \n " , desc ) ;
dma_unmap_single ( chan2parent ( chan ) , desc - > txd . phys ,
2008-07-08 22:59:42 +04:00
sizeof ( desc - > lli ) , DMA_TO_DEVICE ) ;
kfree ( desc ) ;
}
2012-06-19 14:34:05 +04:00
dev_vdbg ( chan2dev ( chan ) , " %s: done \n " , __func__ ) ;
2008-07-08 22:59:42 +04:00
}
2012-10-16 08:19:17 +04:00
bool dw_dma_generic_filter ( struct dma_chan * chan , void * param )
{
struct dw_dma * dw = to_dw_dma ( chan - > device ) ;
static struct dw_dma * last_dw ;
static char * last_bus_id ;
int i = - 1 ;
/*
* dmaengine framework calls this routine for all channels of all dma
* controller , until true is returned . If ' param ' bus_id is not
* registered with a dma controller ( dw ) , then there is no need of
* running below function for all channels of dw .
*
* This block of code does this by saving the parameters of last
* failure . If dw and param are same , i . e . trying on same dw with
* different channel , return false .
*/
if ( ( last_dw = = dw ) & & ( last_bus_id = = param ) )
return false ;
/*
* Return true :
* - If dw_dma ' s platform data is not filled with slave info , then all
* dma controllers are fine for transfer .
* - Or if param is NULL
*/
if ( ! dw - > sd | | ! param )
return true ;
while ( + + i < dw - > sd_count ) {
if ( ! strcmp ( dw - > sd [ i ] . bus_id , param ) ) {
chan - > private = & dw - > sd [ i ] ;
last_dw = NULL ;
last_bus_id = NULL ;
return true ;
}
}
last_dw = dw ;
last_bus_id = param ;
return false ;
}
EXPORT_SYMBOL ( dw_dma_generic_filter ) ;
2009-04-01 17:47:02 +04:00
/* --------------------- Cyclic DMA API extensions -------------------- */
/**
* dw_dma_cyclic_start - start the cyclic DMA transfer
* @ chan : the DMA channel to start
*
* Must be called with soft interrupts disabled . Returns zero on success or
* - errno on failure .
*/
int dw_dma_cyclic_start ( struct dma_chan * chan )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
struct dw_dma * dw = to_dw_dma ( dwc - > chan . device ) ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2009-04-01 17:47:02 +04:00
if ( ! test_bit ( DW_DMA_IS_CYCLIC , & dwc - > flags ) ) {
dev_err ( chan2dev ( & dwc - > chan ) , " missing prep for cyclic DMA \n " ) ;
return - ENODEV ;
}
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2009-04-01 17:47:02 +04:00
/* assert channel is idle */
if ( dma_readl ( dw , CH_EN ) & dwc - > mask ) {
dev_err ( chan2dev ( & dwc - > chan ) ,
" BUG: Attempted to start non-idle channel \n " ) ;
2012-06-19 14:34:03 +04:00
dwc_dump_chan_regs ( dwc ) ;
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2009-04-01 17:47:02 +04:00
return - EBUSY ;
}
dma_writel ( dw , CLEAR . ERROR , dwc - > mask ) ;
dma_writel ( dw , CLEAR . XFER , dwc - > mask ) ;
/* setup DMAC channel registers */
channel_writel ( dwc , LLP , dwc - > cdesc - > desc [ 0 ] - > txd . phys ) ;
channel_writel ( dwc , CTL_LO , DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN ) ;
channel_writel ( dwc , CTL_HI , 0 ) ;
channel_set_bit ( dw , CH_EN , dwc - > mask ) ;
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2009-04-01 17:47:02 +04:00
return 0 ;
}
EXPORT_SYMBOL ( dw_dma_cyclic_start ) ;
/**
* dw_dma_cyclic_stop - stop the cyclic DMA transfer
* @ chan : the DMA channel to stop
*
* Must be called with soft interrupts disabled .
*/
void dw_dma_cyclic_stop ( struct dma_chan * chan )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
struct dw_dma * dw = to_dw_dma ( dwc - > chan . device ) ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2009-04-01 17:47:02 +04:00
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2009-04-01 17:47:02 +04:00
2012-06-19 14:46:32 +04:00
dwc_chan_disable ( dw , dwc ) ;
2009-04-01 17:47:02 +04:00
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2009-04-01 17:47:02 +04:00
}
EXPORT_SYMBOL ( dw_dma_cyclic_stop ) ;
/**
* dw_dma_cyclic_prep - prepare the cyclic DMA transfer
* @ chan : the DMA channel to prepare
* @ buf_addr : physical DMA address where the buffer starts
* @ buf_len : total number of bytes for the entire buffer
* @ period_len : number of bytes for each period
* @ direction : transfer direction , to or from device
*
* Must be called before trying to start the transfer . Returns a valid struct
* dw_cyclic_desc if successful or an ERR_PTR ( - errno ) if not successful .
*/
struct dw_cyclic_desc * dw_dma_cyclic_prep ( struct dma_chan * chan ,
dma_addr_t buf_addr , size_t buf_len , size_t period_len ,
2011-10-13 21:04:23 +04:00
enum dma_transfer_direction direction )
2009-04-01 17:47:02 +04:00
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
2012-02-01 14:42:26 +04:00
struct dma_slave_config * sconfig = & dwc - > dma_sconfig ;
2009-04-01 17:47:02 +04:00
struct dw_cyclic_desc * cdesc ;
struct dw_cyclic_desc * retval = NULL ;
struct dw_desc * desc ;
struct dw_desc * last = NULL ;
unsigned long was_cyclic ;
unsigned int reg_width ;
unsigned int periods ;
unsigned int i ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2009-04-01 17:47:02 +04:00
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2012-09-21 16:05:49 +04:00
if ( dwc - > nollp ) {
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
dev_dbg ( chan2dev ( & dwc - > chan ) ,
" channel doesn't support LLP transfers \n " ) ;
return ERR_PTR ( - EINVAL ) ;
}
2009-04-01 17:47:02 +04:00
if ( ! list_empty ( & dwc - > queue ) | | ! list_empty ( & dwc - > active_list ) ) {
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2009-04-01 17:47:02 +04:00
dev_dbg ( chan2dev ( & dwc - > chan ) ,
" queue and/or active list are not empty \n " ) ;
return ERR_PTR ( - EBUSY ) ;
}
was_cyclic = test_and_set_bit ( DW_DMA_IS_CYCLIC , & dwc - > flags ) ;
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2009-04-01 17:47:02 +04:00
if ( was_cyclic ) {
dev_dbg ( chan2dev ( & dwc - > chan ) ,
" channel already prepared for cyclic DMA \n " ) ;
return ERR_PTR ( - EBUSY ) ;
}
retval = ERR_PTR ( - EINVAL ) ;
2012-02-01 14:42:26 +04:00
if ( direction = = DMA_MEM_TO_DEV )
reg_width = __ffs ( sconfig - > dst_addr_width ) ;
else
reg_width = __ffs ( sconfig - > src_addr_width ) ;
2009-04-01 17:47:02 +04:00
periods = buf_len / period_len ;
/* Check for too big/unaligned periods and unaligned DMA buffer. */
2012-09-21 16:05:47 +04:00
if ( period_len > ( dwc - > block_size < < reg_width ) )
2009-04-01 17:47:02 +04:00
goto out_err ;
if ( unlikely ( period_len & ( ( 1 < < reg_width ) - 1 ) ) )
goto out_err ;
if ( unlikely ( buf_addr & ( ( 1 < < reg_width ) - 1 ) ) )
goto out_err ;
2011-10-13 21:04:23 +04:00
if ( unlikely ( ! ( direction & ( DMA_MEM_TO_DEV | DMA_DEV_TO_MEM ) ) ) )
2009-04-01 17:47:02 +04:00
goto out_err ;
retval = ERR_PTR ( - ENOMEM ) ;
if ( periods > NR_DESCS_PER_CHANNEL )
goto out_err ;
cdesc = kzalloc ( sizeof ( struct dw_cyclic_desc ) , GFP_KERNEL ) ;
if ( ! cdesc )
goto out_err ;
cdesc - > desc = kzalloc ( sizeof ( struct dw_desc * ) * periods , GFP_KERNEL ) ;
if ( ! cdesc - > desc )
goto out_err_alloc ;
for ( i = 0 ; i < periods ; i + + ) {
desc = dwc_desc_get ( dwc ) ;
if ( ! desc )
goto out_err_desc_get ;
switch ( direction ) {
2011-10-13 21:04:23 +04:00
case DMA_MEM_TO_DEV :
2012-02-01 14:42:26 +04:00
desc - > lli . dar = sconfig - > dst_addr ;
2009-04-01 17:47:02 +04:00
desc - > lli . sar = buf_addr + ( period_len * i ) ;
2012-02-01 14:42:26 +04:00
desc - > lli . ctllo = ( DWC_DEFAULT_CTLLO ( chan )
2009-04-01 17:47:02 +04:00
| DWC_CTLL_DST_WIDTH ( reg_width )
| DWC_CTLL_SRC_WIDTH ( reg_width )
| DWC_CTLL_DST_FIX
| DWC_CTLL_SRC_INC
| DWC_CTLL_INT_EN ) ;
2012-02-01 14:42:26 +04:00
desc - > lli . ctllo | = sconfig - > device_fc ?
DWC_CTLL_FC ( DW_DMA_FC_P_M2P ) :
DWC_CTLL_FC ( DW_DMA_FC_D_M2P ) ;
2009-04-01 17:47:02 +04:00
break ;
2011-10-13 21:04:23 +04:00
case DMA_DEV_TO_MEM :
2009-04-01 17:47:02 +04:00
desc - > lli . dar = buf_addr + ( period_len * i ) ;
2012-02-01 14:42:26 +04:00
desc - > lli . sar = sconfig - > src_addr ;
desc - > lli . ctllo = ( DWC_DEFAULT_CTLLO ( chan )
2009-04-01 17:47:02 +04:00
| DWC_CTLL_SRC_WIDTH ( reg_width )
| DWC_CTLL_DST_WIDTH ( reg_width )
| DWC_CTLL_DST_INC
| DWC_CTLL_SRC_FIX
| DWC_CTLL_INT_EN ) ;
2012-02-01 14:42:26 +04:00
desc - > lli . ctllo | = sconfig - > device_fc ?
DWC_CTLL_FC ( DW_DMA_FC_P_P2M ) :
DWC_CTLL_FC ( DW_DMA_FC_D_P2M ) ;
2009-04-01 17:47:02 +04:00
break ;
default :
break ;
}
desc - > lli . ctlhi = ( period_len > > reg_width ) ;
cdesc - > desc [ i ] = desc ;
if ( last ) {
last - > lli . llp = desc - > txd . phys ;
dma_sync_single_for_device ( chan2parent ( chan ) ,
last - > txd . phys , sizeof ( last - > lli ) ,
DMA_TO_DEVICE ) ;
}
last = desc ;
}
/* lets make a cyclic list */
last - > lli . llp = cdesc - > desc [ 0 ] - > txd . phys ;
dma_sync_single_for_device ( chan2parent ( chan ) , last - > txd . phys ,
sizeof ( last - > lli ) , DMA_TO_DEVICE ) ;
2012-06-19 14:34:02 +04:00
dev_dbg ( chan2dev ( & dwc - > chan ) , " cyclic prepared buf 0x%llx len %zu "
" period %zu periods %d \n " , ( unsigned long long ) buf_addr ,
buf_len , period_len , periods ) ;
2009-04-01 17:47:02 +04:00
cdesc - > periods = periods ;
dwc - > cdesc = cdesc ;
return cdesc ;
out_err_desc_get :
while ( i - - )
dwc_desc_put ( dwc , cdesc - > desc [ i ] ) ;
out_err_alloc :
kfree ( cdesc ) ;
out_err :
clear_bit ( DW_DMA_IS_CYCLIC , & dwc - > flags ) ;
return ( struct dw_cyclic_desc * ) retval ;
}
EXPORT_SYMBOL ( dw_dma_cyclic_prep ) ;
/**
* dw_dma_cyclic_free - free a prepared cyclic DMA transfer
* @ chan : the DMA channel to free
*/
void dw_dma_cyclic_free ( struct dma_chan * chan )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
struct dw_dma * dw = to_dw_dma ( dwc - > chan . device ) ;
struct dw_cyclic_desc * cdesc = dwc - > cdesc ;
int i ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2009-04-01 17:47:02 +04:00
2012-06-19 14:34:05 +04:00
dev_dbg ( chan2dev ( & dwc - > chan ) , " %s \n " , __func__ ) ;
2009-04-01 17:47:02 +04:00
if ( ! cdesc )
return ;
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2009-04-01 17:47:02 +04:00
2012-06-19 14:46:32 +04:00
dwc_chan_disable ( dw , dwc ) ;
2009-04-01 17:47:02 +04:00
dma_writel ( dw , CLEAR . ERROR , dwc - > mask ) ;
dma_writel ( dw , CLEAR . XFER , dwc - > mask ) ;
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2009-04-01 17:47:02 +04:00
for ( i = 0 ; i < cdesc - > periods ; i + + )
dwc_desc_put ( dwc , cdesc - > desc [ i ] ) ;
kfree ( cdesc - > desc ) ;
kfree ( cdesc ) ;
clear_bit ( DW_DMA_IS_CYCLIC , & dwc - > flags ) ;
}
EXPORT_SYMBOL ( dw_dma_cyclic_free ) ;
2008-07-08 22:59:42 +04:00
/*----------------------------------------------------------------------*/
static void dw_dma_off ( struct dw_dma * dw )
{
2011-11-17 14:31:29 +04:00
int i ;
2008-07-08 22:59:42 +04:00
dma_writel ( dw , CFG , 0 ) ;
channel_clear_bit ( dw , MASK . XFER , dw - > all_chan_mask ) ;
channel_clear_bit ( dw , MASK . SRC_TRAN , dw - > all_chan_mask ) ;
channel_clear_bit ( dw , MASK . DST_TRAN , dw - > all_chan_mask ) ;
channel_clear_bit ( dw , MASK . ERROR , dw - > all_chan_mask ) ;
while ( dma_readl ( dw , CFG ) & DW_CFG_DMA_EN )
cpu_relax ( ) ;
2011-11-17 14:31:29 +04:00
for ( i = 0 ; i < dw - > dma . chancnt ; i + + )
dw - > chan [ i ] . initialized = false ;
2008-07-08 22:59:42 +04:00
}
2012-10-16 08:19:17 +04:00
# ifdef CONFIG_OF
static struct dw_dma_platform_data *
dw_dma_parse_dt ( struct platform_device * pdev )
{
struct device_node * sn , * cn , * np = pdev - > dev . of_node ;
struct dw_dma_platform_data * pdata ;
struct dw_dma_slave * sd ;
u32 tmp , arr [ 4 ] ;
if ( ! np ) {
dev_err ( & pdev - > dev , " Missing DT data \n " ) ;
return NULL ;
}
pdata = devm_kzalloc ( & pdev - > dev , sizeof ( * pdata ) , GFP_KERNEL ) ;
if ( ! pdata )
return NULL ;
if ( of_property_read_u32 ( np , " nr_channels " , & pdata - > nr_channels ) )
return NULL ;
if ( of_property_read_bool ( np , " is_private " ) )
pdata - > is_private = true ;
if ( ! of_property_read_u32 ( np , " chan_allocation_order " , & tmp ) )
pdata - > chan_allocation_order = ( unsigned char ) tmp ;
if ( ! of_property_read_u32 ( np , " chan_priority " , & tmp ) )
pdata - > chan_priority = tmp ;
if ( ! of_property_read_u32 ( np , " block_size " , & tmp ) )
pdata - > block_size = tmp ;
if ( ! of_property_read_u32 ( np , " nr_masters " , & tmp ) ) {
if ( tmp > 4 )
return NULL ;
pdata - > nr_masters = tmp ;
}
if ( ! of_property_read_u32_array ( np , " data_width " , arr ,
pdata - > nr_masters ) )
for ( tmp = 0 ; tmp < pdata - > nr_masters ; tmp + + )
pdata - > data_width [ tmp ] = arr [ tmp ] ;
/* parse slave data */
sn = of_find_node_by_name ( np , " slave_info " ) ;
if ( ! sn )
return pdata ;
/* calculate number of slaves */
tmp = of_get_child_count ( sn ) ;
if ( ! tmp )
return NULL ;
sd = devm_kzalloc ( & pdev - > dev , sizeof ( * sd ) * tmp , GFP_KERNEL ) ;
if ( ! sd )
return NULL ;
pdata - > sd = sd ;
pdata - > sd_count = tmp ;
for_each_child_of_node ( sn , cn ) {
sd - > dma_dev = & pdev - > dev ;
of_property_read_string ( cn , " bus_id " , & sd - > bus_id ) ;
of_property_read_u32 ( cn , " cfg_hi " , & sd - > cfg_hi ) ;
of_property_read_u32 ( cn , " cfg_lo " , & sd - > cfg_lo ) ;
if ( ! of_property_read_u32 ( cn , " src_master " , & tmp ) )
sd - > src_master = tmp ;
if ( ! of_property_read_u32 ( cn , " dst_master " , & tmp ) )
sd - > dst_master = tmp ;
sd + + ;
}
return pdata ;
}
# else
static inline struct dw_dma_platform_data *
dw_dma_parse_dt ( struct platform_device * pdev )
{
return NULL ;
}
# endif
2012-11-19 22:22:55 +04:00
static int dw_probe ( struct platform_device * pdev )
2008-07-08 22:59:42 +04:00
{
struct dw_dma_platform_data * pdata ;
struct resource * io ;
struct dw_dma * dw ;
size_t size ;
2012-09-21 16:05:46 +04:00
void __iomem * regs ;
bool autocfg ;
unsigned int dw_params ;
unsigned int nr_channels ;
2012-09-21 16:05:47 +04:00
unsigned int max_blk_size = 0 ;
2008-07-08 22:59:42 +04:00
int irq ;
int err ;
int i ;
2012-02-01 14:42:22 +04:00
pdata = dev_get_platdata ( & pdev - > dev ) ;
2012-10-16 08:19:17 +04:00
if ( ! pdata )
pdata = dw_dma_parse_dt ( pdev ) ;
2008-07-08 22:59:42 +04:00
if ( ! pdata | | pdata - > nr_channels > DW_DMA_MAX_NR_CHANNELS )
return - EINVAL ;
io = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
if ( ! io )
return - EINVAL ;
irq = platform_get_irq ( pdev , 0 ) ;
if ( irq < 0 )
return irq ;
2012-09-21 16:05:46 +04:00
regs = devm_request_and_ioremap ( & pdev - > dev , io ) ;
if ( ! regs )
return - EBUSY ;
dw_params = dma_read_byaddr ( regs , DW_PARAMS ) ;
autocfg = dw_params > > DW_PARAMS_EN & 0x1 ;
if ( autocfg )
nr_channels = ( dw_params > > DW_PARAMS_NR_CHAN & 0x7 ) + 1 ;
else
nr_channels = pdata - > nr_channels ;
size = sizeof ( struct dw_dma ) + nr_channels * sizeof ( struct dw_dma_chan ) ;
2012-07-24 12:00:55 +04:00
dw = devm_kzalloc ( & pdev - > dev , size , GFP_KERNEL ) ;
2008-07-08 22:59:42 +04:00
if ( ! dw )
return - ENOMEM ;
2012-07-24 12:00:55 +04:00
dw - > clk = devm_clk_get ( & pdev - > dev , " hclk " ) ;
if ( IS_ERR ( dw - > clk ) )
return PTR_ERR ( dw - > clk ) ;
2012-04-17 15:40:07 +04:00
clk_prepare_enable ( dw - > clk ) ;
2008-07-08 22:59:42 +04:00
2012-09-21 16:05:46 +04:00
dw - > regs = regs ;
2012-10-16 08:19:17 +04:00
dw - > sd = pdata - > sd ;
dw - > sd_count = pdata - > sd_count ;
2012-09-21 16:05:46 +04:00
2012-09-21 16:05:47 +04:00
/* get hardware configuration parameters */
2012-09-21 16:05:48 +04:00
if ( autocfg ) {
2012-09-21 16:05:47 +04:00
max_blk_size = dma_readl ( dw , MAX_BLK_SIZE ) ;
2012-09-21 16:05:48 +04:00
dw - > nr_masters = ( dw_params > > DW_PARAMS_NR_MASTER & 3 ) + 1 ;
for ( i = 0 ; i < dw - > nr_masters ; i + + ) {
dw - > data_width [ i ] =
( dw_params > > DW_PARAMS_DATA_WIDTH ( i ) & 3 ) + 2 ;
}
} else {
dw - > nr_masters = pdata - > nr_masters ;
memcpy ( dw - > data_width , pdata - > data_width , 4 ) ;
}
2012-06-19 14:34:06 +04:00
/* Calculate all channel mask before DMA setup */
2012-09-21 16:05:46 +04:00
dw - > all_chan_mask = ( 1 < < nr_channels ) - 1 ;
2012-06-19 14:34:06 +04:00
2008-07-08 22:59:42 +04:00
/* force dma off, just in case */
dw_dma_off ( dw ) ;
2012-06-19 14:34:07 +04:00
/* disable BLOCK interrupts as well */
channel_clear_bit ( dw , MASK . BLOCK , dw - > all_chan_mask ) ;
2012-07-24 12:00:55 +04:00
err = devm_request_irq ( & pdev - > dev , irq , dw_dma_interrupt , 0 ,
" dw_dmac " , dw ) ;
2008-07-08 22:59:42 +04:00
if ( err )
2012-07-24 12:00:55 +04:00
return err ;
2008-07-08 22:59:42 +04:00
platform_set_drvdata ( pdev , dw ) ;
tasklet_init ( & dw - > tasklet , dw_dma_tasklet , ( unsigned long ) dw ) ;
INIT_LIST_HEAD ( & dw - > dma . channels ) ;
2012-09-21 16:05:46 +04:00
for ( i = 0 ; i < nr_channels ; i + + ) {
2008-07-08 22:59:42 +04:00
struct dw_dma_chan * dwc = & dw - > chan [ i ] ;
2012-09-21 16:05:49 +04:00
int r = nr_channels - i - 1 ;
2008-07-08 22:59:42 +04:00
dwc - > chan . device = & dw - > dma ;
2012-03-07 02:35:47 +04:00
dma_cookie_init ( & dwc - > chan ) ;
2011-03-03 13:17:21 +03:00
if ( pdata - > chan_allocation_order = = CHAN_ALLOCATION_ASCENDING )
list_add_tail ( & dwc - > chan . device_node ,
& dw - > dma . channels ) ;
else
list_add ( & dwc - > chan . device_node , & dw - > dma . channels ) ;
2008-07-08 22:59:42 +04:00
2011-03-03 13:17:22 +03:00
/* 7 is highest priority & 0 is lowest. */
if ( pdata - > chan_priority = = CHAN_PRIORITY_ASCENDING )
2012-09-21 16:05:49 +04:00
dwc - > priority = r ;
2011-03-03 13:17:22 +03:00
else
dwc - > priority = i ;
2008-07-08 22:59:42 +04:00
dwc - > ch_regs = & __dw_regs ( dw ) - > CHAN [ i ] ;
spin_lock_init ( & dwc - > lock ) ;
dwc - > mask = 1 < < i ;
INIT_LIST_HEAD ( & dwc - > active_list ) ;
INIT_LIST_HEAD ( & dwc - > queue ) ;
INIT_LIST_HEAD ( & dwc - > free_list ) ;
channel_clear_bit ( dw , CH_EN , dwc - > mask ) ;
2012-09-21 16:05:47 +04:00
2012-09-21 16:05:48 +04:00
dwc - > dw = dw ;
2012-09-21 16:05:47 +04:00
/* hardware configuration */
2012-09-21 16:05:49 +04:00
if ( autocfg ) {
unsigned int dwc_params ;
dwc_params = dma_read_byaddr ( regs + r * sizeof ( u32 ) ,
DWC_PARAMS ) ;
2012-09-21 16:05:47 +04:00
/* Decode maximum block size for given channel. The
* stored 4 bit value represents blocks from 0x00 for 3
* up to 0x0a for 4095. */
dwc - > block_size =
( 4 < < ( ( max_blk_size > > 4 * i ) & 0xf ) ) - 1 ;
2012-09-21 16:05:49 +04:00
dwc - > nollp =
( dwc_params > > DWC_PARAMS_MBLK_EN & 0x1 ) = = 0 ;
} else {
2012-09-21 16:05:47 +04:00
dwc - > block_size = pdata - > block_size ;
2012-09-21 16:05:49 +04:00
/* Check if channel supports multi block transfer */
channel_writel ( dwc , LLP , 0xfffffffc ) ;
dwc - > nollp =
( channel_readl ( dwc , LLP ) & 0xfffffffc ) = = 0 ;
channel_writel ( dwc , LLP , 0 ) ;
}
2008-07-08 22:59:42 +04:00
}
2012-06-19 14:34:06 +04:00
/* Clear all interrupts on all channels. */
2008-07-08 22:59:42 +04:00
dma_writel ( dw , CLEAR . XFER , dw - > all_chan_mask ) ;
2012-06-19 14:34:07 +04:00
dma_writel ( dw , CLEAR . BLOCK , dw - > all_chan_mask ) ;
2008-07-08 22:59:42 +04:00
dma_writel ( dw , CLEAR . SRC_TRAN , dw - > all_chan_mask ) ;
dma_writel ( dw , CLEAR . DST_TRAN , dw - > all_chan_mask ) ;
dma_writel ( dw , CLEAR . ERROR , dw - > all_chan_mask ) ;
dma_cap_set ( DMA_MEMCPY , dw - > dma . cap_mask ) ;
dma_cap_set ( DMA_SLAVE , dw - > dma . cap_mask ) ;
2011-01-21 17:11:54 +03:00
if ( pdata - > is_private )
dma_cap_set ( DMA_PRIVATE , dw - > dma . cap_mask ) ;
2008-07-08 22:59:42 +04:00
dw - > dma . dev = & pdev - > dev ;
dw - > dma . device_alloc_chan_resources = dwc_alloc_chan_resources ;
dw - > dma . device_free_chan_resources = dwc_free_chan_resources ;
dw - > dma . device_prep_dma_memcpy = dwc_prep_dma_memcpy ;
dw - > dma . device_prep_slave_sg = dwc_prep_slave_sg ;
2010-03-27 02:44:01 +03:00
dw - > dma . device_control = dwc_control ;
2008-07-08 22:59:42 +04:00
2010-03-27 02:50:49 +03:00
dw - > dma . device_tx_status = dwc_tx_status ;
2008-07-08 22:59:42 +04:00
dw - > dma . device_issue_pending = dwc_issue_pending ;
dma_writel ( dw , CFG , DW_CFG_DMA_EN ) ;
2012-10-18 18:34:09 +04:00
dev_info ( & pdev - > dev , " DesignWare DMA Controller, %d channels \n " ,
nr_channels ) ;
2008-07-08 22:59:42 +04:00
dma_async_device_register ( & dw - > dma ) ;
return 0 ;
}
2012-06-19 14:34:09 +04:00
static int __devexit dw_remove ( struct platform_device * pdev )
2008-07-08 22:59:42 +04:00
{
struct dw_dma * dw = platform_get_drvdata ( pdev ) ;
struct dw_dma_chan * dwc , * _dwc ;
dw_dma_off ( dw ) ;
dma_async_device_unregister ( & dw - > dma ) ;
tasklet_kill ( & dw - > tasklet ) ;
list_for_each_entry_safe ( dwc , _dwc , & dw - > dma . channels ,
chan . device_node ) {
list_del ( & dwc - > chan . device_node ) ;
channel_clear_bit ( dw , CH_EN , dwc - > mask ) ;
}
return 0 ;
}
static void dw_shutdown ( struct platform_device * pdev )
{
struct dw_dma * dw = platform_get_drvdata ( pdev ) ;
2012-10-18 18:34:10 +04:00
dw_dma_off ( dw ) ;
2012-04-17 15:40:07 +04:00
clk_disable_unprepare ( dw - > clk ) ;
2008-07-08 22:59:42 +04:00
}
2009-07-08 15:22:18 +04:00
static int dw_suspend_noirq ( struct device * dev )
2008-07-08 22:59:42 +04:00
{
2009-07-08 15:22:18 +04:00
struct platform_device * pdev = to_platform_device ( dev ) ;
2008-07-08 22:59:42 +04:00
struct dw_dma * dw = platform_get_drvdata ( pdev ) ;
2012-10-18 18:34:10 +04:00
dw_dma_off ( dw ) ;
2012-04-17 15:40:07 +04:00
clk_disable_unprepare ( dw - > clk ) ;
2011-11-17 14:31:29 +04:00
2008-07-08 22:59:42 +04:00
return 0 ;
}
2009-07-08 15:22:18 +04:00
static int dw_resume_noirq ( struct device * dev )
2008-07-08 22:59:42 +04:00
{
2009-07-08 15:22:18 +04:00
struct platform_device * pdev = to_platform_device ( dev ) ;
2008-07-08 22:59:42 +04:00
struct dw_dma * dw = platform_get_drvdata ( pdev ) ;
2012-04-17 15:40:07 +04:00
clk_prepare_enable ( dw - > clk ) ;
2008-07-08 22:59:42 +04:00
dma_writel ( dw , CFG , DW_CFG_DMA_EN ) ;
2012-10-18 18:34:08 +04:00
2008-07-08 22:59:42 +04:00
return 0 ;
}
2009-12-15 05:00:08 +03:00
static const struct dev_pm_ops dw_dev_pm_ops = {
2009-07-08 15:22:18 +04:00
. suspend_noirq = dw_suspend_noirq ,
. resume_noirq = dw_resume_noirq ,
2012-02-01 14:42:17 +04:00
. freeze_noirq = dw_suspend_noirq ,
. thaw_noirq = dw_resume_noirq ,
. restore_noirq = dw_resume_noirq ,
. poweroff_noirq = dw_suspend_noirq ,
2009-07-08 15:22:18 +04:00
} ;
2012-04-20 18:45:34 +04:00
# ifdef CONFIG_OF
static const struct of_device_id dw_dma_id_table [ ] = {
{ . compatible = " snps,dma-spear1340 " } ,
{ }
} ;
MODULE_DEVICE_TABLE ( of , dw_dma_id_table ) ;
# endif
2008-07-08 22:59:42 +04:00
static struct platform_driver dw_driver = {
2012-11-19 22:20:04 +04:00
. remove = dw_remove ,
2008-07-08 22:59:42 +04:00
. shutdown = dw_shutdown ,
. driver = {
. name = " dw_dmac " ,
2009-07-08 15:22:18 +04:00
. pm = & dw_dev_pm_ops ,
2012-04-20 18:45:34 +04:00
. of_match_table = of_match_ptr ( dw_dma_id_table ) ,
2008-07-08 22:59:42 +04:00
} ,
} ;
static int __init dw_init ( void )
{
return platform_driver_probe ( & dw_driver , dw_probe ) ;
}
2011-03-03 13:17:15 +03:00
subsys_initcall ( dw_init ) ;
2008-07-08 22:59:42 +04:00
static void __exit dw_exit ( void )
{
platform_driver_unregister ( & dw_driver ) ;
}
module_exit ( dw_exit ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_DESCRIPTION ( " Synopsys DesignWare DMA Controller driver " ) ;
2011-05-18 18:49:24 +04:00
MODULE_AUTHOR ( " Haavard Skinnemoen (Atmel) " ) ;
2012-06-20 23:53:02 +04:00
MODULE_AUTHOR ( " Viresh Kumar <viresh.linux@gmail.com> " ) ;