2008-07-08 22:59:42 +04:00
/*
* Driver for the Synopsys DesignWare DMA Controller ( aka DMACA on
* AVR32 systems . )
*
* Copyright ( C ) 2007 - 2008 Atmel Corporation
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/clk.h>
# include <linux/delay.h>
# include <linux/dmaengine.h>
# include <linux/dma-mapping.h>
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/io.h>
# include <linux/mm.h>
# include <linux/module.h>
# include <linux/platform_device.h>
# include <linux/slab.h>
# include "dw_dmac_regs.h"
/*
* This supports the Synopsys " DesignWare AHB Central DMA Controller " ,
* ( DW_ahb_dmac ) which is used with various AMBA 2.0 systems ( not all
* of which use ARM any more ) . See the " Databook " from Synopsys for
* information beyond what licensees probably provide .
*
* The driver has currently been tested only with the Atmel AT32AP7000 ,
* which does not support descriptor writeback .
*/
/* NOTE: DMS+SMS is system-specific. We should get this information
* from the platform code somehow .
*/
# define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \
| DWC_CTLL_SRC_MSIZE ( 0 ) \
| DWC_CTLL_DMS ( 0 ) \
| DWC_CTLL_SMS ( 1 ) \
| DWC_CTLL_LLP_D_EN \
| DWC_CTLL_LLP_S_EN )
/*
* This is configuration - dependent and usually a funny size like 4095.
* Let ' s round it down to the nearest power of two .
*
* Note that this is a transfer count , i . e . if we transfer 32 - bit
* words , we can do 8192 bytes per descriptor .
*
* This parameter is also system - specific .
*/
# define DWC_MAX_COUNT 2048U
/*
* Number of descriptors to allocate for each channel . This should be
* made configurable somehow ; preferably , the clients ( at least the
* ones using slave transfers ) should be able to give us a hint .
*/
# define NR_DESCS_PER_CHANNEL 64
/*----------------------------------------------------------------------*/
/*
* Because we ' re not relying on writeback from the controller ( it may not
* even be configured into the core ! ) we don ' t need to use dma_pool . These
* descriptors - - and associated data - - are cacheable . We do need to make
* sure their dcache entries are written back before handing them off to
* the controller , though .
*/
2009-01-06 21:38:21 +03:00
static struct device * chan2dev ( struct dma_chan * chan )
{
return & chan - > dev - > device ;
}
static struct device * chan2parent ( struct dma_chan * chan )
{
return chan - > dev - > device . parent ;
}
2008-07-08 22:59:42 +04:00
static struct dw_desc * dwc_first_active ( struct dw_dma_chan * dwc )
{
return list_entry ( dwc - > active_list . next , struct dw_desc , desc_node ) ;
}
static struct dw_desc * dwc_first_queued ( struct dw_dma_chan * dwc )
{
return list_entry ( dwc - > queue . next , struct dw_desc , desc_node ) ;
}
static struct dw_desc * dwc_desc_get ( struct dw_dma_chan * dwc )
{
struct dw_desc * desc , * _desc ;
struct dw_desc * ret = NULL ;
unsigned int i = 0 ;
spin_lock_bh ( & dwc - > lock ) ;
list_for_each_entry_safe ( desc , _desc , & dwc - > free_list , desc_node ) {
if ( async_tx_test_ack ( & desc - > txd ) ) {
list_del ( & desc - > desc_node ) ;
ret = desc ;
break ;
}
2009-01-06 21:38:21 +03:00
dev_dbg ( chan2dev ( & dwc - > chan ) , " desc %p not ACKed \n " , desc ) ;
2008-07-08 22:59:42 +04:00
i + + ;
}
spin_unlock_bh ( & dwc - > lock ) ;
2009-01-06 21:38:21 +03:00
dev_vdbg ( chan2dev ( & dwc - > chan ) , " scanned %u descriptors on freelist \n " , i ) ;
2008-07-08 22:59:42 +04:00
return ret ;
}
static void dwc_sync_desc_for_cpu ( struct dw_dma_chan * dwc , struct dw_desc * desc )
{
struct dw_desc * child ;
2009-09-09 04:53:02 +04:00
list_for_each_entry ( child , & desc - > tx_list , desc_node )
2009-01-06 21:38:21 +03:00
dma_sync_single_for_cpu ( chan2parent ( & dwc - > chan ) ,
2008-07-08 22:59:42 +04:00
child - > txd . phys , sizeof ( child - > lli ) ,
DMA_TO_DEVICE ) ;
2009-01-06 21:38:21 +03:00
dma_sync_single_for_cpu ( chan2parent ( & dwc - > chan ) ,
2008-07-08 22:59:42 +04:00
desc - > txd . phys , sizeof ( desc - > lli ) ,
DMA_TO_DEVICE ) ;
}
/*
* Move a descriptor , including any children , to the free list .
* ` desc ' must not be on any lists .
*/
static void dwc_desc_put ( struct dw_dma_chan * dwc , struct dw_desc * desc )
{
if ( desc ) {
struct dw_desc * child ;
dwc_sync_desc_for_cpu ( dwc , desc ) ;
spin_lock_bh ( & dwc - > lock ) ;
2009-09-09 04:53:02 +04:00
list_for_each_entry ( child , & desc - > tx_list , desc_node )
2009-01-06 21:38:21 +03:00
dev_vdbg ( chan2dev ( & dwc - > chan ) ,
2008-07-08 22:59:42 +04:00
" moving child desc %p to freelist \n " ,
child ) ;
2009-09-09 04:53:02 +04:00
list_splice_init ( & desc - > tx_list , & dwc - > free_list ) ;
2009-01-06 21:38:21 +03:00
dev_vdbg ( chan2dev ( & dwc - > chan ) , " moving desc %p to freelist \n " , desc ) ;
2008-07-08 22:59:42 +04:00
list_add ( & desc - > desc_node , & dwc - > free_list ) ;
spin_unlock_bh ( & dwc - > lock ) ;
}
}
/* Called with dwc->lock held and bh disabled */
static dma_cookie_t
dwc_assign_cookie ( struct dw_dma_chan * dwc , struct dw_desc * desc )
{
dma_cookie_t cookie = dwc - > chan . cookie ;
if ( + + cookie < 0 )
cookie = 1 ;
dwc - > chan . cookie = cookie ;
desc - > txd . cookie = cookie ;
return cookie ;
}
/*----------------------------------------------------------------------*/
/* Called with dwc->lock held and bh disabled */
static void dwc_dostart ( struct dw_dma_chan * dwc , struct dw_desc * first )
{
struct dw_dma * dw = to_dw_dma ( dwc - > chan . device ) ;
/* ASSERT: channel is idle */
if ( dma_readl ( dw , CH_EN ) & dwc - > mask ) {
2009-01-06 21:38:21 +03:00
dev_err ( chan2dev ( & dwc - > chan ) ,
2008-07-08 22:59:42 +04:00
" BUG: Attempted to start non-idle channel \n " ) ;
2009-01-06 21:38:21 +03:00
dev_err ( chan2dev ( & dwc - > chan ) ,
2008-07-08 22:59:42 +04:00
" SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x \n " ,
channel_readl ( dwc , SAR ) ,
channel_readl ( dwc , DAR ) ,
channel_readl ( dwc , LLP ) ,
channel_readl ( dwc , CTL_HI ) ,
channel_readl ( dwc , CTL_LO ) ) ;
/* The tasklet will hopefully advance the queue... */
return ;
}
channel_writel ( dwc , LLP , first - > txd . phys ) ;
channel_writel ( dwc , CTL_LO ,
DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN ) ;
channel_writel ( dwc , CTL_HI , 0 ) ;
channel_set_bit ( dw , CH_EN , dwc - > mask ) ;
}
/*----------------------------------------------------------------------*/
static void
dwc_descriptor_complete ( struct dw_dma_chan * dwc , struct dw_desc * desc )
{
dma_async_tx_callback callback ;
void * param ;
struct dma_async_tx_descriptor * txd = & desc - > txd ;
2009-01-06 21:38:21 +03:00
dev_vdbg ( chan2dev ( & dwc - > chan ) , " descriptor %u complete \n " , txd - > cookie ) ;
2008-07-08 22:59:42 +04:00
dwc - > completed = txd - > cookie ;
callback = txd - > callback ;
param = txd - > callback_param ;
dwc_sync_desc_for_cpu ( dwc , desc ) ;
2009-09-09 04:53:02 +04:00
list_splice_init ( & desc - > tx_list , & dwc - > free_list ) ;
2008-07-08 22:59:42 +04:00
list_move ( & desc - > desc_node , & dwc - > free_list ) ;
2009-09-09 04:53:05 +04:00
if ( ! dwc - > chan . private ) {
struct device * parent = chan2parent ( & dwc - > chan ) ;
if ( ! ( txd - > flags & DMA_COMPL_SKIP_DEST_UNMAP ) ) {
if ( txd - > flags & DMA_COMPL_DEST_UNMAP_SINGLE )
dma_unmap_single ( parent , desc - > lli . dar ,
desc - > len , DMA_FROM_DEVICE ) ;
else
dma_unmap_page ( parent , desc - > lli . dar ,
desc - > len , DMA_FROM_DEVICE ) ;
}
if ( ! ( txd - > flags & DMA_COMPL_SKIP_SRC_UNMAP ) ) {
if ( txd - > flags & DMA_COMPL_SRC_UNMAP_SINGLE )
dma_unmap_single ( parent , desc - > lli . sar ,
desc - > len , DMA_TO_DEVICE ) ;
else
dma_unmap_page ( parent , desc - > lli . sar ,
desc - > len , DMA_TO_DEVICE ) ;
}
}
2008-07-08 22:59:42 +04:00
/*
* The API requires that no submissions are done from a
* callback , so we don ' t need to drop the lock here
*/
if ( callback )
callback ( param ) ;
}
static void dwc_complete_all ( struct dw_dma * dw , struct dw_dma_chan * dwc )
{
struct dw_desc * desc , * _desc ;
LIST_HEAD ( list ) ;
if ( dma_readl ( dw , CH_EN ) & dwc - > mask ) {
2009-01-06 21:38:21 +03:00
dev_err ( chan2dev ( & dwc - > chan ) ,
2008-07-08 22:59:42 +04:00
" BUG: XFER bit set, but channel not idle! \n " ) ;
/* Try to continue after resetting the channel... */
channel_clear_bit ( dw , CH_EN , dwc - > mask ) ;
while ( dma_readl ( dw , CH_EN ) & dwc - > mask )
cpu_relax ( ) ;
}
/*
* Submit queued descriptors ASAP , i . e . before we go through
* the completed ones .
*/
if ( ! list_empty ( & dwc - > queue ) )
dwc_dostart ( dwc , dwc_first_queued ( dwc ) ) ;
list_splice_init ( & dwc - > active_list , & list ) ;
list_splice_init ( & dwc - > queue , & dwc - > active_list ) ;
list_for_each_entry_safe ( desc , _desc , & list , desc_node )
dwc_descriptor_complete ( dwc , desc ) ;
}
static void dwc_scan_descriptors ( struct dw_dma * dw , struct dw_dma_chan * dwc )
{
dma_addr_t llp ;
struct dw_desc * desc , * _desc ;
struct dw_desc * child ;
u32 status_xfer ;
/*
* Clear block interrupt flag before scanning so that we don ' t
* miss any , and read LLP before RAW_XFER to ensure it is
* valid if we decide to scan the list .
*/
dma_writel ( dw , CLEAR . BLOCK , dwc - > mask ) ;
llp = channel_readl ( dwc , LLP ) ;
status_xfer = dma_readl ( dw , RAW . XFER ) ;
if ( status_xfer & dwc - > mask ) {
/* Everything we've submitted is done */
dma_writel ( dw , CLEAR . XFER , dwc - > mask ) ;
dwc_complete_all ( dw , dwc ) ;
return ;
}
2009-01-06 21:38:21 +03:00
dev_vdbg ( chan2dev ( & dwc - > chan ) , " scan_descriptors: llp=0x%x \n " , llp ) ;
2008-07-08 22:59:42 +04:00
list_for_each_entry_safe ( desc , _desc , & dwc - > active_list , desc_node ) {
if ( desc - > lli . llp = = llp )
/* This one is currently in progress */
return ;
2009-09-09 04:53:02 +04:00
list_for_each_entry ( child , & desc - > tx_list , desc_node )
2008-07-08 22:59:42 +04:00
if ( child - > lli . llp = = llp )
/* Currently in progress */
return ;
/*
* No descriptors so far seem to be in progress , i . e .
* this one must be done .
*/
dwc_descriptor_complete ( dwc , desc ) ;
}
2009-01-06 21:38:21 +03:00
dev_err ( chan2dev ( & dwc - > chan ) ,
2008-07-08 22:59:42 +04:00
" BUG: All descriptors done, but channel not idle! \n " ) ;
/* Try to continue after resetting the channel... */
channel_clear_bit ( dw , CH_EN , dwc - > mask ) ;
while ( dma_readl ( dw , CH_EN ) & dwc - > mask )
cpu_relax ( ) ;
if ( ! list_empty ( & dwc - > queue ) ) {
dwc_dostart ( dwc , dwc_first_queued ( dwc ) ) ;
list_splice_init ( & dwc - > queue , & dwc - > active_list ) ;
}
}
static void dwc_dump_lli ( struct dw_dma_chan * dwc , struct dw_lli * lli )
{
2009-01-06 21:38:21 +03:00
dev_printk ( KERN_CRIT , chan2dev ( & dwc - > chan ) ,
2008-07-08 22:59:42 +04:00
" desc: s0x%x d0x%x l0x%x c0x%x:%x \n " ,
lli - > sar , lli - > dar , lli - > llp ,
lli - > ctlhi , lli - > ctllo ) ;
}
static void dwc_handle_error ( struct dw_dma * dw , struct dw_dma_chan * dwc )
{
struct dw_desc * bad_desc ;
struct dw_desc * child ;
dwc_scan_descriptors ( dw , dwc ) ;
/*
* The descriptor currently at the head of the active list is
* borked . Since we don ' t have any way to report errors , we ' ll
* just have to scream loudly and try to carry on .
*/
bad_desc = dwc_first_active ( dwc ) ;
list_del_init ( & bad_desc - > desc_node ) ;
list_splice_init ( & dwc - > queue , dwc - > active_list . prev ) ;
/* Clear the error flag and try to restart the controller */
dma_writel ( dw , CLEAR . ERROR , dwc - > mask ) ;
if ( ! list_empty ( & dwc - > active_list ) )
dwc_dostart ( dwc , dwc_first_active ( dwc ) ) ;
/*
* KERN_CRITICAL may seem harsh , but since this only happens
* when someone submits a bad physical address in a
* descriptor , we should consider ourselves lucky that the
* controller flagged an error instead of scribbling over
* random memory locations .
*/
2009-01-06 21:38:21 +03:00
dev_printk ( KERN_CRIT , chan2dev ( & dwc - > chan ) ,
2008-07-08 22:59:42 +04:00
" Bad descriptor submitted for DMA! \n " ) ;
2009-01-06 21:38:21 +03:00
dev_printk ( KERN_CRIT , chan2dev ( & dwc - > chan ) ,
2008-07-08 22:59:42 +04:00
" cookie: %d \n " , bad_desc - > txd . cookie ) ;
dwc_dump_lli ( dwc , & bad_desc - > lli ) ;
2009-09-09 04:53:02 +04:00
list_for_each_entry ( child , & bad_desc - > tx_list , desc_node )
2008-07-08 22:59:42 +04:00
dwc_dump_lli ( dwc , & child - > lli ) ;
/* Pretend the descriptor completed successfully */
dwc_descriptor_complete ( dwc , bad_desc ) ;
}
2009-04-01 17:47:02 +04:00
/* --------------------- Cyclic DMA API extensions -------------------- */
inline dma_addr_t dw_dma_get_src_addr ( struct dma_chan * chan )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
return channel_readl ( dwc , SAR ) ;
}
EXPORT_SYMBOL ( dw_dma_get_src_addr ) ;
inline dma_addr_t dw_dma_get_dst_addr ( struct dma_chan * chan )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
return channel_readl ( dwc , DAR ) ;
}
EXPORT_SYMBOL ( dw_dma_get_dst_addr ) ;
/* called with dwc->lock held and all DMAC interrupts disabled */
static void dwc_handle_cyclic ( struct dw_dma * dw , struct dw_dma_chan * dwc ,
u32 status_block , u32 status_err , u32 status_xfer )
{
if ( status_block & dwc - > mask ) {
void ( * callback ) ( void * param ) ;
void * callback_param ;
dev_vdbg ( chan2dev ( & dwc - > chan ) , " new cyclic period llp 0x%08x \n " ,
channel_readl ( dwc , LLP ) ) ;
dma_writel ( dw , CLEAR . BLOCK , dwc - > mask ) ;
callback = dwc - > cdesc - > period_callback ;
callback_param = dwc - > cdesc - > period_callback_param ;
if ( callback ) {
spin_unlock ( & dwc - > lock ) ;
callback ( callback_param ) ;
spin_lock ( & dwc - > lock ) ;
}
}
/*
* Error and transfer complete are highly unlikely , and will most
* likely be due to a configuration error by the user .
*/
if ( unlikely ( status_err & dwc - > mask ) | |
unlikely ( status_xfer & dwc - > mask ) ) {
int i ;
dev_err ( chan2dev ( & dwc - > chan ) , " cyclic DMA unexpected %s "
" interrupt, stopping DMA transfer \n " ,
status_xfer ? " xfer " : " error " ) ;
dev_err ( chan2dev ( & dwc - > chan ) ,
" SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x \n " ,
channel_readl ( dwc , SAR ) ,
channel_readl ( dwc , DAR ) ,
channel_readl ( dwc , LLP ) ,
channel_readl ( dwc , CTL_HI ) ,
channel_readl ( dwc , CTL_LO ) ) ;
channel_clear_bit ( dw , CH_EN , dwc - > mask ) ;
while ( dma_readl ( dw , CH_EN ) & dwc - > mask )
cpu_relax ( ) ;
/* make sure DMA does not restart by loading a new list */
channel_writel ( dwc , LLP , 0 ) ;
channel_writel ( dwc , CTL_LO , 0 ) ;
channel_writel ( dwc , CTL_HI , 0 ) ;
dma_writel ( dw , CLEAR . BLOCK , dwc - > mask ) ;
dma_writel ( dw , CLEAR . ERROR , dwc - > mask ) ;
dma_writel ( dw , CLEAR . XFER , dwc - > mask ) ;
for ( i = 0 ; i < dwc - > cdesc - > periods ; i + + )
dwc_dump_lli ( dwc , & dwc - > cdesc - > desc [ i ] - > lli ) ;
}
}
/* ------------------------------------------------------------------------- */
2008-07-08 22:59:42 +04:00
static void dw_dma_tasklet ( unsigned long data )
{
struct dw_dma * dw = ( struct dw_dma * ) data ;
struct dw_dma_chan * dwc ;
u32 status_block ;
u32 status_xfer ;
u32 status_err ;
int i ;
status_block = dma_readl ( dw , RAW . BLOCK ) ;
2008-10-04 02:23:46 +04:00
status_xfer = dma_readl ( dw , RAW . XFER ) ;
2008-07-08 22:59:42 +04:00
status_err = dma_readl ( dw , RAW . ERROR ) ;
dev_vdbg ( dw - > dma . dev , " tasklet: status_block=%x status_err=%x \n " ,
status_block , status_err ) ;
for ( i = 0 ; i < dw - > dma . chancnt ; i + + ) {
dwc = & dw - > chan [ i ] ;
spin_lock ( & dwc - > lock ) ;
2009-04-01 17:47:02 +04:00
if ( test_bit ( DW_DMA_IS_CYCLIC , & dwc - > flags ) )
dwc_handle_cyclic ( dw , dwc , status_block , status_err ,
status_xfer ) ;
else if ( status_err & ( 1 < < i ) )
2008-07-08 22:59:42 +04:00
dwc_handle_error ( dw , dwc ) ;
else if ( ( status_block | status_xfer ) & ( 1 < < i ) )
dwc_scan_descriptors ( dw , dwc ) ;
spin_unlock ( & dwc - > lock ) ;
}
/*
* Re - enable interrupts . Block Complete interrupts are only
* enabled if the INT_EN bit in the descriptor is set . This
* will trigger a scan before the whole list is done .
*/
channel_set_bit ( dw , MASK . XFER , dw - > all_chan_mask ) ;
channel_set_bit ( dw , MASK . BLOCK , dw - > all_chan_mask ) ;
channel_set_bit ( dw , MASK . ERROR , dw - > all_chan_mask ) ;
}
static irqreturn_t dw_dma_interrupt ( int irq , void * dev_id )
{
struct dw_dma * dw = dev_id ;
u32 status ;
dev_vdbg ( dw - > dma . dev , " interrupt: status=0x%x \n " ,
dma_readl ( dw , STATUS_INT ) ) ;
/*
* Just disable the interrupts . We ' ll turn them back on in the
* softirq handler .
*/
channel_clear_bit ( dw , MASK . XFER , dw - > all_chan_mask ) ;
channel_clear_bit ( dw , MASK . BLOCK , dw - > all_chan_mask ) ;
channel_clear_bit ( dw , MASK . ERROR , dw - > all_chan_mask ) ;
status = dma_readl ( dw , STATUS_INT ) ;
if ( status ) {
dev_err ( dw - > dma . dev ,
" BUG: Unexpected interrupts pending: 0x%x \n " ,
status ) ;
/* Try to recover */
channel_clear_bit ( dw , MASK . XFER , ( 1 < < 8 ) - 1 ) ;
channel_clear_bit ( dw , MASK . BLOCK , ( 1 < < 8 ) - 1 ) ;
channel_clear_bit ( dw , MASK . SRC_TRAN , ( 1 < < 8 ) - 1 ) ;
channel_clear_bit ( dw , MASK . DST_TRAN , ( 1 < < 8 ) - 1 ) ;
channel_clear_bit ( dw , MASK . ERROR , ( 1 < < 8 ) - 1 ) ;
}
tasklet_schedule ( & dw - > tasklet ) ;
return IRQ_HANDLED ;
}
/*----------------------------------------------------------------------*/
static dma_cookie_t dwc_tx_submit ( struct dma_async_tx_descriptor * tx )
{
struct dw_desc * desc = txd_to_dw_desc ( tx ) ;
struct dw_dma_chan * dwc = to_dw_dma_chan ( tx - > chan ) ;
dma_cookie_t cookie ;
spin_lock_bh ( & dwc - > lock ) ;
cookie = dwc_assign_cookie ( dwc , desc ) ;
/*
* REVISIT : We should attempt to chain as many descriptors as
* possible , perhaps even appending to those already submitted
* for DMA . But this is hard to do in a race - free manner .
*/
if ( list_empty ( & dwc - > active_list ) ) {
2009-01-06 21:38:21 +03:00
dev_vdbg ( chan2dev ( tx - > chan ) , " tx_submit: started %u \n " ,
2008-07-08 22:59:42 +04:00
desc - > txd . cookie ) ;
dwc_dostart ( dwc , desc ) ;
list_add_tail ( & desc - > desc_node , & dwc - > active_list ) ;
} else {
2009-01-06 21:38:21 +03:00
dev_vdbg ( chan2dev ( tx - > chan ) , " tx_submit: queued %u \n " ,
2008-07-08 22:59:42 +04:00
desc - > txd . cookie ) ;
list_add_tail ( & desc - > desc_node , & dwc - > queue ) ;
}
spin_unlock_bh ( & dwc - > lock ) ;
return cookie ;
}
static struct dma_async_tx_descriptor *
dwc_prep_dma_memcpy ( struct dma_chan * chan , dma_addr_t dest , dma_addr_t src ,
size_t len , unsigned long flags )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
struct dw_desc * desc ;
struct dw_desc * first ;
struct dw_desc * prev ;
size_t xfer_count ;
size_t offset ;
unsigned int src_width ;
unsigned int dst_width ;
u32 ctllo ;
2009-01-06 21:38:21 +03:00
dev_vdbg ( chan2dev ( chan ) , " prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx \n " ,
2008-07-08 22:59:42 +04:00
dest , src , len , flags ) ;
if ( unlikely ( ! len ) ) {
2009-01-06 21:38:21 +03:00
dev_dbg ( chan2dev ( chan ) , " prep_dma_memcpy: length is zero! \n " ) ;
2008-07-08 22:59:42 +04:00
return NULL ;
}
/*
* We can be a lot more clever here , but this should take care
* of the most common optimization .
*/
if ( ! ( ( src | dest | len ) & 3 ) )
src_width = dst_width = 2 ;
else if ( ! ( ( src | dest | len ) & 1 ) )
src_width = dst_width = 1 ;
else
src_width = dst_width = 0 ;
ctllo = DWC_DEFAULT_CTLLO
| DWC_CTLL_DST_WIDTH ( dst_width )
| DWC_CTLL_SRC_WIDTH ( src_width )
| DWC_CTLL_DST_INC
| DWC_CTLL_SRC_INC
| DWC_CTLL_FC_M2M ;
prev = first = NULL ;
for ( offset = 0 ; offset < len ; offset + = xfer_count < < src_width ) {
xfer_count = min_t ( size_t , ( len - offset ) > > src_width ,
DWC_MAX_COUNT ) ;
desc = dwc_desc_get ( dwc ) ;
if ( ! desc )
goto err_desc_get ;
desc - > lli . sar = src + offset ;
desc - > lli . dar = dest + offset ;
desc - > lli . ctllo = ctllo ;
desc - > lli . ctlhi = xfer_count ;
if ( ! first ) {
first = desc ;
} else {
prev - > lli . llp = desc - > txd . phys ;
2009-01-06 21:38:21 +03:00
dma_sync_single_for_device ( chan2parent ( chan ) ,
2008-07-08 22:59:42 +04:00
prev - > txd . phys , sizeof ( prev - > lli ) ,
DMA_TO_DEVICE ) ;
list_add_tail ( & desc - > desc_node ,
2009-09-09 04:53:02 +04:00
& first - > tx_list ) ;
2008-07-08 22:59:42 +04:00
}
prev = desc ;
}
if ( flags & DMA_PREP_INTERRUPT )
/* Trigger interrupt after last block */
prev - > lli . ctllo | = DWC_CTLL_INT_EN ;
prev - > lli . llp = 0 ;
2009-01-06 21:38:21 +03:00
dma_sync_single_for_device ( chan2parent ( chan ) ,
2008-07-08 22:59:42 +04:00
prev - > txd . phys , sizeof ( prev - > lli ) ,
DMA_TO_DEVICE ) ;
first - > txd . flags = flags ;
first - > len = len ;
return & first - > txd ;
err_desc_get :
dwc_desc_put ( dwc , first ) ;
return NULL ;
}
static struct dma_async_tx_descriptor *
dwc_prep_slave_sg ( struct dma_chan * chan , struct scatterlist * sgl ,
unsigned int sg_len , enum dma_data_direction direction ,
unsigned long flags )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
2009-02-19 01:48:26 +03:00
struct dw_dma_slave * dws = chan - > private ;
2008-07-08 22:59:42 +04:00
struct dw_desc * prev ;
struct dw_desc * first ;
u32 ctllo ;
dma_addr_t reg ;
unsigned int reg_width ;
unsigned int mem_width ;
unsigned int i ;
struct scatterlist * sg ;
size_t total_len = 0 ;
2009-01-06 21:38:21 +03:00
dev_vdbg ( chan2dev ( chan ) , " prep_dma_slave \n " ) ;
2008-07-08 22:59:42 +04:00
if ( unlikely ( ! dws | | ! sg_len ) )
return NULL ;
2009-01-06 21:38:16 +03:00
reg_width = dws - > reg_width ;
2008-07-08 22:59:42 +04:00
prev = first = NULL ;
switch ( direction ) {
case DMA_TO_DEVICE :
ctllo = ( DWC_DEFAULT_CTLLO
| DWC_CTLL_DST_WIDTH ( reg_width )
| DWC_CTLL_DST_FIX
| DWC_CTLL_SRC_INC
| DWC_CTLL_FC_M2P ) ;
2009-01-06 21:38:16 +03:00
reg = dws - > tx_reg ;
2008-07-08 22:59:42 +04:00
for_each_sg ( sgl , sg , sg_len , i ) {
struct dw_desc * desc ;
u32 len ;
u32 mem ;
desc = dwc_desc_get ( dwc ) ;
if ( ! desc ) {
2009-01-06 21:38:21 +03:00
dev_err ( chan2dev ( chan ) ,
2008-07-08 22:59:42 +04:00
" not enough descriptors available \n " ) ;
goto err_desc_get ;
}
mem = sg_phys ( sg ) ;
len = sg_dma_len ( sg ) ;
mem_width = 2 ;
if ( unlikely ( mem & 3 | | len & 3 ) )
mem_width = 0 ;
desc - > lli . sar = mem ;
desc - > lli . dar = reg ;
desc - > lli . ctllo = ctllo | DWC_CTLL_SRC_WIDTH ( mem_width ) ;
desc - > lli . ctlhi = len > > mem_width ;
if ( ! first ) {
first = desc ;
} else {
prev - > lli . llp = desc - > txd . phys ;
2009-01-06 21:38:21 +03:00
dma_sync_single_for_device ( chan2parent ( chan ) ,
2008-07-08 22:59:42 +04:00
prev - > txd . phys ,
sizeof ( prev - > lli ) ,
DMA_TO_DEVICE ) ;
list_add_tail ( & desc - > desc_node ,
2009-09-09 04:53:02 +04:00
& first - > tx_list ) ;
2008-07-08 22:59:42 +04:00
}
prev = desc ;
total_len + = len ;
}
break ;
case DMA_FROM_DEVICE :
ctllo = ( DWC_DEFAULT_CTLLO
| DWC_CTLL_SRC_WIDTH ( reg_width )
| DWC_CTLL_DST_INC
| DWC_CTLL_SRC_FIX
| DWC_CTLL_FC_P2M ) ;
2009-01-06 21:38:16 +03:00
reg = dws - > rx_reg ;
2008-07-08 22:59:42 +04:00
for_each_sg ( sgl , sg , sg_len , i ) {
struct dw_desc * desc ;
u32 len ;
u32 mem ;
desc = dwc_desc_get ( dwc ) ;
if ( ! desc ) {
2009-01-06 21:38:21 +03:00
dev_err ( chan2dev ( chan ) ,
2008-07-08 22:59:42 +04:00
" not enough descriptors available \n " ) ;
goto err_desc_get ;
}
mem = sg_phys ( sg ) ;
len = sg_dma_len ( sg ) ;
mem_width = 2 ;
if ( unlikely ( mem & 3 | | len & 3 ) )
mem_width = 0 ;
desc - > lli . sar = reg ;
desc - > lli . dar = mem ;
desc - > lli . ctllo = ctllo | DWC_CTLL_DST_WIDTH ( mem_width ) ;
desc - > lli . ctlhi = len > > reg_width ;
if ( ! first ) {
first = desc ;
} else {
prev - > lli . llp = desc - > txd . phys ;
2009-01-06 21:38:21 +03:00
dma_sync_single_for_device ( chan2parent ( chan ) ,
2008-07-08 22:59:42 +04:00
prev - > txd . phys ,
sizeof ( prev - > lli ) ,
DMA_TO_DEVICE ) ;
list_add_tail ( & desc - > desc_node ,
2009-09-09 04:53:02 +04:00
& first - > tx_list ) ;
2008-07-08 22:59:42 +04:00
}
prev = desc ;
total_len + = len ;
}
break ;
default :
return NULL ;
}
if ( flags & DMA_PREP_INTERRUPT )
/* Trigger interrupt after last block */
prev - > lli . ctllo | = DWC_CTLL_INT_EN ;
prev - > lli . llp = 0 ;
2009-01-06 21:38:21 +03:00
dma_sync_single_for_device ( chan2parent ( chan ) ,
2008-07-08 22:59:42 +04:00
prev - > txd . phys , sizeof ( prev - > lli ) ,
DMA_TO_DEVICE ) ;
first - > len = total_len ;
return & first - > txd ;
err_desc_get :
dwc_desc_put ( dwc , first ) ;
return NULL ;
}
static void dwc_terminate_all ( struct dma_chan * chan )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
struct dw_dma * dw = to_dw_dma ( chan - > device ) ;
struct dw_desc * desc , * _desc ;
LIST_HEAD ( list ) ;
/*
* This is only called when something went wrong elsewhere , so
* we don ' t really care about the data . Just disable the
* channel . We still have to poll the channel enable bit due
* to AHB / HSB limitations .
*/
spin_lock_bh ( & dwc - > lock ) ;
channel_clear_bit ( dw , CH_EN , dwc - > mask ) ;
while ( dma_readl ( dw , CH_EN ) & dwc - > mask )
cpu_relax ( ) ;
/* active_list entries will end up before queued entries */
list_splice_init ( & dwc - > queue , & list ) ;
list_splice_init ( & dwc - > active_list , & list ) ;
spin_unlock_bh ( & dwc - > lock ) ;
/* Flush all pending and queued descriptors */
list_for_each_entry_safe ( desc , _desc , & list , desc_node )
dwc_descriptor_complete ( dwc , desc ) ;
}
static enum dma_status
dwc_is_tx_complete ( struct dma_chan * chan ,
dma_cookie_t cookie ,
dma_cookie_t * done , dma_cookie_t * used )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
dma_cookie_t last_used ;
dma_cookie_t last_complete ;
int ret ;
last_complete = dwc - > completed ;
last_used = chan - > cookie ;
ret = dma_async_is_complete ( cookie , last_complete , last_used ) ;
if ( ret ! = DMA_SUCCESS ) {
dwc_scan_descriptors ( to_dw_dma ( chan - > device ) , dwc ) ;
last_complete = dwc - > completed ;
last_used = chan - > cookie ;
ret = dma_async_is_complete ( cookie , last_complete , last_used ) ;
}
if ( done )
* done = last_complete ;
if ( used )
* used = last_used ;
return ret ;
}
static void dwc_issue_pending ( struct dma_chan * chan )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
spin_lock_bh ( & dwc - > lock ) ;
if ( ! list_empty ( & dwc - > queue ) )
dwc_scan_descriptors ( to_dw_dma ( chan - > device ) , dwc ) ;
spin_unlock_bh ( & dwc - > lock ) ;
}
2009-01-06 21:38:17 +03:00
static int dwc_alloc_chan_resources ( struct dma_chan * chan )
2008-07-08 22:59:42 +04:00
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
struct dw_dma * dw = to_dw_dma ( chan - > device ) ;
struct dw_desc * desc ;
struct dw_dma_slave * dws ;
int i ;
u32 cfghi ;
u32 cfglo ;
2009-01-06 21:38:21 +03:00
dev_vdbg ( chan2dev ( chan ) , " alloc_chan_resources \n " ) ;
2008-07-08 22:59:42 +04:00
/* ASSERT: channel is idle */
if ( dma_readl ( dw , CH_EN ) & dwc - > mask ) {
2009-01-06 21:38:21 +03:00
dev_dbg ( chan2dev ( chan ) , " DMA channel not idle? \n " ) ;
2008-07-08 22:59:42 +04:00
return - EIO ;
}
dwc - > completed = chan - > cookie = 1 ;
cfghi = DWC_CFGH_FIFO_MODE ;
cfglo = 0 ;
2009-02-19 01:48:26 +03:00
dws = chan - > private ;
2009-01-06 21:38:16 +03:00
if ( dws ) {
2008-07-08 22:59:42 +04:00
/*
* We need controller - specific data to set up slave
* transfers .
*/
2009-01-06 21:38:16 +03:00
BUG_ON ( ! dws - > dma_dev | | dws - > dma_dev ! = dw - > dma . dev ) ;
2008-07-08 22:59:42 +04:00
cfghi = dws - > cfg_hi ;
cfglo = dws - > cfg_lo ;
}
channel_writel ( dwc , CFG_LO , cfglo ) ;
channel_writel ( dwc , CFG_HI , cfghi ) ;
/*
* NOTE : some controllers may have additional features that we
* need to initialize here , like " scatter-gather " ( which
* doesn ' t mean what you think it means ) , and status writeback .
*/
spin_lock_bh ( & dwc - > lock ) ;
i = dwc - > descs_allocated ;
while ( dwc - > descs_allocated < NR_DESCS_PER_CHANNEL ) {
spin_unlock_bh ( & dwc - > lock ) ;
desc = kzalloc ( sizeof ( struct dw_desc ) , GFP_KERNEL ) ;
if ( ! desc ) {
2009-01-06 21:38:21 +03:00
dev_info ( chan2dev ( chan ) ,
2008-07-08 22:59:42 +04:00
" only allocated %d descriptors \n " , i ) ;
spin_lock_bh ( & dwc - > lock ) ;
break ;
}
2009-09-09 04:53:02 +04:00
INIT_LIST_HEAD ( & desc - > tx_list ) ;
2008-07-08 22:59:42 +04:00
dma_async_tx_descriptor_init ( & desc - > txd , chan ) ;
desc - > txd . tx_submit = dwc_tx_submit ;
desc - > txd . flags = DMA_CTRL_ACK ;
2009-01-06 21:38:21 +03:00
desc - > txd . phys = dma_map_single ( chan2parent ( chan ) , & desc - > lli ,
2008-07-08 22:59:42 +04:00
sizeof ( desc - > lli ) , DMA_TO_DEVICE ) ;
dwc_desc_put ( dwc , desc ) ;
spin_lock_bh ( & dwc - > lock ) ;
i = + + dwc - > descs_allocated ;
}
/* Enable interrupts */
channel_set_bit ( dw , MASK . XFER , dwc - > mask ) ;
channel_set_bit ( dw , MASK . BLOCK , dwc - > mask ) ;
channel_set_bit ( dw , MASK . ERROR , dwc - > mask ) ;
spin_unlock_bh ( & dwc - > lock ) ;
2009-01-06 21:38:21 +03:00
dev_dbg ( chan2dev ( chan ) ,
2008-07-08 22:59:42 +04:00
" alloc_chan_resources allocated %d descriptors \n " , i ) ;
return i ;
}
static void dwc_free_chan_resources ( struct dma_chan * chan )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
struct dw_dma * dw = to_dw_dma ( chan - > device ) ;
struct dw_desc * desc , * _desc ;
LIST_HEAD ( list ) ;
2009-01-06 21:38:21 +03:00
dev_dbg ( chan2dev ( chan ) , " free_chan_resources (descs allocated=%u) \n " ,
2008-07-08 22:59:42 +04:00
dwc - > descs_allocated ) ;
/* ASSERT: channel is idle */
BUG_ON ( ! list_empty ( & dwc - > active_list ) ) ;
BUG_ON ( ! list_empty ( & dwc - > queue ) ) ;
BUG_ON ( dma_readl ( to_dw_dma ( chan - > device ) , CH_EN ) & dwc - > mask ) ;
spin_lock_bh ( & dwc - > lock ) ;
list_splice_init ( & dwc - > free_list , & list ) ;
dwc - > descs_allocated = 0 ;
/* Disable interrupts */
channel_clear_bit ( dw , MASK . XFER , dwc - > mask ) ;
channel_clear_bit ( dw , MASK . BLOCK , dwc - > mask ) ;
channel_clear_bit ( dw , MASK . ERROR , dwc - > mask ) ;
spin_unlock_bh ( & dwc - > lock ) ;
list_for_each_entry_safe ( desc , _desc , & list , desc_node ) {
2009-01-06 21:38:21 +03:00
dev_vdbg ( chan2dev ( chan ) , " freeing descriptor %p \n " , desc ) ;
dma_unmap_single ( chan2parent ( chan ) , desc - > txd . phys ,
2008-07-08 22:59:42 +04:00
sizeof ( desc - > lli ) , DMA_TO_DEVICE ) ;
kfree ( desc ) ;
}
2009-01-06 21:38:21 +03:00
dev_vdbg ( chan2dev ( chan ) , " free_chan_resources done \n " ) ;
2008-07-08 22:59:42 +04:00
}
2009-04-01 17:47:02 +04:00
/* --------------------- Cyclic DMA API extensions -------------------- */
/**
* dw_dma_cyclic_start - start the cyclic DMA transfer
* @ chan : the DMA channel to start
*
* Must be called with soft interrupts disabled . Returns zero on success or
* - errno on failure .
*/
int dw_dma_cyclic_start ( struct dma_chan * chan )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
struct dw_dma * dw = to_dw_dma ( dwc - > chan . device ) ;
if ( ! test_bit ( DW_DMA_IS_CYCLIC , & dwc - > flags ) ) {
dev_err ( chan2dev ( & dwc - > chan ) , " missing prep for cyclic DMA \n " ) ;
return - ENODEV ;
}
spin_lock ( & dwc - > lock ) ;
/* assert channel is idle */
if ( dma_readl ( dw , CH_EN ) & dwc - > mask ) {
dev_err ( chan2dev ( & dwc - > chan ) ,
" BUG: Attempted to start non-idle channel \n " ) ;
dev_err ( chan2dev ( & dwc - > chan ) ,
" SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x \n " ,
channel_readl ( dwc , SAR ) ,
channel_readl ( dwc , DAR ) ,
channel_readl ( dwc , LLP ) ,
channel_readl ( dwc , CTL_HI ) ,
channel_readl ( dwc , CTL_LO ) ) ;
spin_unlock ( & dwc - > lock ) ;
return - EBUSY ;
}
dma_writel ( dw , CLEAR . BLOCK , dwc - > mask ) ;
dma_writel ( dw , CLEAR . ERROR , dwc - > mask ) ;
dma_writel ( dw , CLEAR . XFER , dwc - > mask ) ;
/* setup DMAC channel registers */
channel_writel ( dwc , LLP , dwc - > cdesc - > desc [ 0 ] - > txd . phys ) ;
channel_writel ( dwc , CTL_LO , DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN ) ;
channel_writel ( dwc , CTL_HI , 0 ) ;
channel_set_bit ( dw , CH_EN , dwc - > mask ) ;
spin_unlock ( & dwc - > lock ) ;
return 0 ;
}
EXPORT_SYMBOL ( dw_dma_cyclic_start ) ;
/**
* dw_dma_cyclic_stop - stop the cyclic DMA transfer
* @ chan : the DMA channel to stop
*
* Must be called with soft interrupts disabled .
*/
void dw_dma_cyclic_stop ( struct dma_chan * chan )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
struct dw_dma * dw = to_dw_dma ( dwc - > chan . device ) ;
spin_lock ( & dwc - > lock ) ;
channel_clear_bit ( dw , CH_EN , dwc - > mask ) ;
while ( dma_readl ( dw , CH_EN ) & dwc - > mask )
cpu_relax ( ) ;
spin_unlock ( & dwc - > lock ) ;
}
EXPORT_SYMBOL ( dw_dma_cyclic_stop ) ;
/**
* dw_dma_cyclic_prep - prepare the cyclic DMA transfer
* @ chan : the DMA channel to prepare
* @ buf_addr : physical DMA address where the buffer starts
* @ buf_len : total number of bytes for the entire buffer
* @ period_len : number of bytes for each period
* @ direction : transfer direction , to or from device
*
* Must be called before trying to start the transfer . Returns a valid struct
* dw_cyclic_desc if successful or an ERR_PTR ( - errno ) if not successful .
*/
struct dw_cyclic_desc * dw_dma_cyclic_prep ( struct dma_chan * chan ,
dma_addr_t buf_addr , size_t buf_len , size_t period_len ,
enum dma_data_direction direction )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
struct dw_cyclic_desc * cdesc ;
struct dw_cyclic_desc * retval = NULL ;
struct dw_desc * desc ;
struct dw_desc * last = NULL ;
struct dw_dma_slave * dws = chan - > private ;
unsigned long was_cyclic ;
unsigned int reg_width ;
unsigned int periods ;
unsigned int i ;
spin_lock_bh ( & dwc - > lock ) ;
if ( ! list_empty ( & dwc - > queue ) | | ! list_empty ( & dwc - > active_list ) ) {
spin_unlock_bh ( & dwc - > lock ) ;
dev_dbg ( chan2dev ( & dwc - > chan ) ,
" queue and/or active list are not empty \n " ) ;
return ERR_PTR ( - EBUSY ) ;
}
was_cyclic = test_and_set_bit ( DW_DMA_IS_CYCLIC , & dwc - > flags ) ;
spin_unlock_bh ( & dwc - > lock ) ;
if ( was_cyclic ) {
dev_dbg ( chan2dev ( & dwc - > chan ) ,
" channel already prepared for cyclic DMA \n " ) ;
return ERR_PTR ( - EBUSY ) ;
}
retval = ERR_PTR ( - EINVAL ) ;
reg_width = dws - > reg_width ;
periods = buf_len / period_len ;
/* Check for too big/unaligned periods and unaligned DMA buffer. */
if ( period_len > ( DWC_MAX_COUNT < < reg_width ) )
goto out_err ;
if ( unlikely ( period_len & ( ( 1 < < reg_width ) - 1 ) ) )
goto out_err ;
if ( unlikely ( buf_addr & ( ( 1 < < reg_width ) - 1 ) ) )
goto out_err ;
if ( unlikely ( ! ( direction & ( DMA_TO_DEVICE | DMA_FROM_DEVICE ) ) ) )
goto out_err ;
retval = ERR_PTR ( - ENOMEM ) ;
if ( periods > NR_DESCS_PER_CHANNEL )
goto out_err ;
cdesc = kzalloc ( sizeof ( struct dw_cyclic_desc ) , GFP_KERNEL ) ;
if ( ! cdesc )
goto out_err ;
cdesc - > desc = kzalloc ( sizeof ( struct dw_desc * ) * periods , GFP_KERNEL ) ;
if ( ! cdesc - > desc )
goto out_err_alloc ;
for ( i = 0 ; i < periods ; i + + ) {
desc = dwc_desc_get ( dwc ) ;
if ( ! desc )
goto out_err_desc_get ;
switch ( direction ) {
case DMA_TO_DEVICE :
desc - > lli . dar = dws - > tx_reg ;
desc - > lli . sar = buf_addr + ( period_len * i ) ;
desc - > lli . ctllo = ( DWC_DEFAULT_CTLLO
| DWC_CTLL_DST_WIDTH ( reg_width )
| DWC_CTLL_SRC_WIDTH ( reg_width )
| DWC_CTLL_DST_FIX
| DWC_CTLL_SRC_INC
| DWC_CTLL_FC_M2P
| DWC_CTLL_INT_EN ) ;
break ;
case DMA_FROM_DEVICE :
desc - > lli . dar = buf_addr + ( period_len * i ) ;
desc - > lli . sar = dws - > rx_reg ;
desc - > lli . ctllo = ( DWC_DEFAULT_CTLLO
| DWC_CTLL_SRC_WIDTH ( reg_width )
| DWC_CTLL_DST_WIDTH ( reg_width )
| DWC_CTLL_DST_INC
| DWC_CTLL_SRC_FIX
| DWC_CTLL_FC_P2M
| DWC_CTLL_INT_EN ) ;
break ;
default :
break ;
}
desc - > lli . ctlhi = ( period_len > > reg_width ) ;
cdesc - > desc [ i ] = desc ;
if ( last ) {
last - > lli . llp = desc - > txd . phys ;
dma_sync_single_for_device ( chan2parent ( chan ) ,
last - > txd . phys , sizeof ( last - > lli ) ,
DMA_TO_DEVICE ) ;
}
last = desc ;
}
/* lets make a cyclic list */
last - > lli . llp = cdesc - > desc [ 0 ] - > txd . phys ;
dma_sync_single_for_device ( chan2parent ( chan ) , last - > txd . phys ,
sizeof ( last - > lli ) , DMA_TO_DEVICE ) ;
dev_dbg ( chan2dev ( & dwc - > chan ) , " cyclic prepared buf 0x%08x len %zu "
" period %zu periods %d \n " , buf_addr , buf_len ,
period_len , periods ) ;
cdesc - > periods = periods ;
dwc - > cdesc = cdesc ;
return cdesc ;
out_err_desc_get :
while ( i - - )
dwc_desc_put ( dwc , cdesc - > desc [ i ] ) ;
out_err_alloc :
kfree ( cdesc ) ;
out_err :
clear_bit ( DW_DMA_IS_CYCLIC , & dwc - > flags ) ;
return ( struct dw_cyclic_desc * ) retval ;
}
EXPORT_SYMBOL ( dw_dma_cyclic_prep ) ;
/**
* dw_dma_cyclic_free - free a prepared cyclic DMA transfer
* @ chan : the DMA channel to free
*/
void dw_dma_cyclic_free ( struct dma_chan * chan )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
struct dw_dma * dw = to_dw_dma ( dwc - > chan . device ) ;
struct dw_cyclic_desc * cdesc = dwc - > cdesc ;
int i ;
dev_dbg ( chan2dev ( & dwc - > chan ) , " cyclic free \n " ) ;
if ( ! cdesc )
return ;
spin_lock_bh ( & dwc - > lock ) ;
channel_clear_bit ( dw , CH_EN , dwc - > mask ) ;
while ( dma_readl ( dw , CH_EN ) & dwc - > mask )
cpu_relax ( ) ;
dma_writel ( dw , CLEAR . BLOCK , dwc - > mask ) ;
dma_writel ( dw , CLEAR . ERROR , dwc - > mask ) ;
dma_writel ( dw , CLEAR . XFER , dwc - > mask ) ;
spin_unlock_bh ( & dwc - > lock ) ;
for ( i = 0 ; i < cdesc - > periods ; i + + )
dwc_desc_put ( dwc , cdesc - > desc [ i ] ) ;
kfree ( cdesc - > desc ) ;
kfree ( cdesc ) ;
clear_bit ( DW_DMA_IS_CYCLIC , & dwc - > flags ) ;
}
EXPORT_SYMBOL ( dw_dma_cyclic_free ) ;
2008-07-08 22:59:42 +04:00
/*----------------------------------------------------------------------*/
static void dw_dma_off ( struct dw_dma * dw )
{
dma_writel ( dw , CFG , 0 ) ;
channel_clear_bit ( dw , MASK . XFER , dw - > all_chan_mask ) ;
channel_clear_bit ( dw , MASK . BLOCK , dw - > all_chan_mask ) ;
channel_clear_bit ( dw , MASK . SRC_TRAN , dw - > all_chan_mask ) ;
channel_clear_bit ( dw , MASK . DST_TRAN , dw - > all_chan_mask ) ;
channel_clear_bit ( dw , MASK . ERROR , dw - > all_chan_mask ) ;
while ( dma_readl ( dw , CFG ) & DW_CFG_DMA_EN )
cpu_relax ( ) ;
}
static int __init dw_probe ( struct platform_device * pdev )
{
struct dw_dma_platform_data * pdata ;
struct resource * io ;
struct dw_dma * dw ;
size_t size ;
int irq ;
int err ;
int i ;
pdata = pdev - > dev . platform_data ;
if ( ! pdata | | pdata - > nr_channels > DW_DMA_MAX_NR_CHANNELS )
return - EINVAL ;
io = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
if ( ! io )
return - EINVAL ;
irq = platform_get_irq ( pdev , 0 ) ;
if ( irq < 0 )
return irq ;
size = sizeof ( struct dw_dma ) ;
size + = pdata - > nr_channels * sizeof ( struct dw_dma_chan ) ;
dw = kzalloc ( size , GFP_KERNEL ) ;
if ( ! dw )
return - ENOMEM ;
if ( ! request_mem_region ( io - > start , DW_REGLEN , pdev - > dev . driver - > name ) ) {
err = - EBUSY ;
goto err_kfree ;
}
memset ( dw , 0 , sizeof * dw ) ;
dw - > regs = ioremap ( io - > start , DW_REGLEN ) ;
if ( ! dw - > regs ) {
err = - ENOMEM ;
goto err_release_r ;
}
dw - > clk = clk_get ( & pdev - > dev , " hclk " ) ;
if ( IS_ERR ( dw - > clk ) ) {
err = PTR_ERR ( dw - > clk ) ;
goto err_clk ;
}
clk_enable ( dw - > clk ) ;
/* force dma off, just in case */
dw_dma_off ( dw ) ;
err = request_irq ( irq , dw_dma_interrupt , 0 , " dw_dmac " , dw ) ;
if ( err )
goto err_irq ;
platform_set_drvdata ( pdev , dw ) ;
tasklet_init ( & dw - > tasklet , dw_dma_tasklet , ( unsigned long ) dw ) ;
dw - > all_chan_mask = ( 1 < < pdata - > nr_channels ) - 1 ;
INIT_LIST_HEAD ( & dw - > dma . channels ) ;
for ( i = 0 ; i < pdata - > nr_channels ; i + + , dw - > dma . chancnt + + ) {
struct dw_dma_chan * dwc = & dw - > chan [ i ] ;
dwc - > chan . device = & dw - > dma ;
dwc - > chan . cookie = dwc - > completed = 1 ;
dwc - > chan . chan_id = i ;
list_add_tail ( & dwc - > chan . device_node , & dw - > dma . channels ) ;
dwc - > ch_regs = & __dw_regs ( dw ) - > CHAN [ i ] ;
spin_lock_init ( & dwc - > lock ) ;
dwc - > mask = 1 < < i ;
INIT_LIST_HEAD ( & dwc - > active_list ) ;
INIT_LIST_HEAD ( & dwc - > queue ) ;
INIT_LIST_HEAD ( & dwc - > free_list ) ;
channel_clear_bit ( dw , CH_EN , dwc - > mask ) ;
}
/* Clear/disable all interrupts on all channels. */
dma_writel ( dw , CLEAR . XFER , dw - > all_chan_mask ) ;
dma_writel ( dw , CLEAR . BLOCK , dw - > all_chan_mask ) ;
dma_writel ( dw , CLEAR . SRC_TRAN , dw - > all_chan_mask ) ;
dma_writel ( dw , CLEAR . DST_TRAN , dw - > all_chan_mask ) ;
dma_writel ( dw , CLEAR . ERROR , dw - > all_chan_mask ) ;
channel_clear_bit ( dw , MASK . XFER , dw - > all_chan_mask ) ;
channel_clear_bit ( dw , MASK . BLOCK , dw - > all_chan_mask ) ;
channel_clear_bit ( dw , MASK . SRC_TRAN , dw - > all_chan_mask ) ;
channel_clear_bit ( dw , MASK . DST_TRAN , dw - > all_chan_mask ) ;
channel_clear_bit ( dw , MASK . ERROR , dw - > all_chan_mask ) ;
dma_cap_set ( DMA_MEMCPY , dw - > dma . cap_mask ) ;
dma_cap_set ( DMA_SLAVE , dw - > dma . cap_mask ) ;
dw - > dma . dev = & pdev - > dev ;
dw - > dma . device_alloc_chan_resources = dwc_alloc_chan_resources ;
dw - > dma . device_free_chan_resources = dwc_free_chan_resources ;
dw - > dma . device_prep_dma_memcpy = dwc_prep_dma_memcpy ;
dw - > dma . device_prep_slave_sg = dwc_prep_slave_sg ;
dw - > dma . device_terminate_all = dwc_terminate_all ;
dw - > dma . device_is_tx_complete = dwc_is_tx_complete ;
dw - > dma . device_issue_pending = dwc_issue_pending ;
dma_writel ( dw , CFG , DW_CFG_DMA_EN ) ;
printk ( KERN_INFO " %s: DesignWare DMA Controller, %d channels \n " ,
2009-03-25 02:38:22 +03:00
dev_name ( & pdev - > dev ) , dw - > dma . chancnt ) ;
2008-07-08 22:59:42 +04:00
dma_async_device_register ( & dw - > dma ) ;
return 0 ;
err_irq :
clk_disable ( dw - > clk ) ;
clk_put ( dw - > clk ) ;
err_clk :
iounmap ( dw - > regs ) ;
dw - > regs = NULL ;
err_release_r :
release_resource ( io ) ;
err_kfree :
kfree ( dw ) ;
return err ;
}
static int __exit dw_remove ( struct platform_device * pdev )
{
struct dw_dma * dw = platform_get_drvdata ( pdev ) ;
struct dw_dma_chan * dwc , * _dwc ;
struct resource * io ;
dw_dma_off ( dw ) ;
dma_async_device_unregister ( & dw - > dma ) ;
free_irq ( platform_get_irq ( pdev , 0 ) , dw ) ;
tasklet_kill ( & dw - > tasklet ) ;
list_for_each_entry_safe ( dwc , _dwc , & dw - > dma . channels ,
chan . device_node ) {
list_del ( & dwc - > chan . device_node ) ;
channel_clear_bit ( dw , CH_EN , dwc - > mask ) ;
}
clk_disable ( dw - > clk ) ;
clk_put ( dw - > clk ) ;
iounmap ( dw - > regs ) ;
dw - > regs = NULL ;
io = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
release_mem_region ( io - > start , DW_REGLEN ) ;
kfree ( dw ) ;
return 0 ;
}
static void dw_shutdown ( struct platform_device * pdev )
{
struct dw_dma * dw = platform_get_drvdata ( pdev ) ;
dw_dma_off ( platform_get_drvdata ( pdev ) ) ;
clk_disable ( dw - > clk ) ;
}
2009-07-08 15:22:18 +04:00
static int dw_suspend_noirq ( struct device * dev )
2008-07-08 22:59:42 +04:00
{
2009-07-08 15:22:18 +04:00
struct platform_device * pdev = to_platform_device ( dev ) ;
2008-07-08 22:59:42 +04:00
struct dw_dma * dw = platform_get_drvdata ( pdev ) ;
dw_dma_off ( platform_get_drvdata ( pdev ) ) ;
clk_disable ( dw - > clk ) ;
return 0 ;
}
2009-07-08 15:22:18 +04:00
static int dw_resume_noirq ( struct device * dev )
2008-07-08 22:59:42 +04:00
{
2009-07-08 15:22:18 +04:00
struct platform_device * pdev = to_platform_device ( dev ) ;
2008-07-08 22:59:42 +04:00
struct dw_dma * dw = platform_get_drvdata ( pdev ) ;
clk_enable ( dw - > clk ) ;
dma_writel ( dw , CFG , DW_CFG_DMA_EN ) ;
return 0 ;
}
2009-07-08 15:22:18 +04:00
static struct dev_pm_ops dw_dev_pm_ops = {
. suspend_noirq = dw_suspend_noirq ,
. resume_noirq = dw_resume_noirq ,
} ;
2008-07-08 22:59:42 +04:00
static struct platform_driver dw_driver = {
. remove = __exit_p ( dw_remove ) ,
. shutdown = dw_shutdown ,
. driver = {
. name = " dw_dmac " ,
2009-07-08 15:22:18 +04:00
. pm = & dw_dev_pm_ops ,
2008-07-08 22:59:42 +04:00
} ,
} ;
static int __init dw_init ( void )
{
return platform_driver_probe ( & dw_driver , dw_probe ) ;
}
module_init ( dw_init ) ;
static void __exit dw_exit ( void )
{
platform_driver_unregister ( & dw_driver ) ;
}
module_exit ( dw_exit ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_DESCRIPTION ( " Synopsys DesignWare DMA Controller driver " ) ;
MODULE_AUTHOR ( " Haavard Skinnemoen <haavard.skinnemoen@atmel.com> " ) ;