2008-07-08 22:59:42 +04:00
/*
2012-10-18 18:34:08 +04:00
* Core driver for the Synopsys DesignWare DMA Controller
2008-07-08 22:59:42 +04:00
*
* Copyright ( C ) 2007 - 2008 Atmel Corporation
2011-05-24 12:34:09 +04:00
* Copyright ( C ) 2010 - 2011 ST Microelectronics
2013-06-05 16:26:45 +04:00
* Copyright ( C ) 2013 Intel Corporation
2008-07-08 22:59:42 +04:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
2012-10-18 18:34:08 +04:00
2012-02-01 14:42:26 +04:00
# include <linux/bitops.h>
2008-07-08 22:59:42 +04:00
# include <linux/delay.h>
# include <linux/dmaengine.h>
# include <linux/dma-mapping.h>
2013-01-16 17:48:50 +04:00
# include <linux/dmapool.h>
2013-01-21 14:09:00 +04:00
# include <linux/err.h>
2008-07-08 22:59:42 +04:00
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/io.h>
# include <linux/mm.h>
# include <linux/module.h>
# include <linux/slab.h>
2014-11-05 19:34:48 +03:00
# include <linux/pm_runtime.h>
2008-07-08 22:59:42 +04:00
2013-06-05 16:26:44 +04:00
# include "../dmaengine.h"
2013-06-05 16:26:45 +04:00
# include "internal.h"
2008-07-08 22:59:42 +04:00
/*
* This supports the Synopsys " DesignWare AHB Central DMA Controller " ,
* ( DW_ahb_dmac ) which is used with various AMBA 2.0 systems ( not all
* of which use ARM any more ) . See the " Databook " from Synopsys for
* information beyond what licensees probably provide .
*
2014-02-12 13:16:17 +04:00
* The driver has been tested with the Atmel AT32AP7000 , which does not
* support descriptor writeback .
2008-07-08 22:59:42 +04:00
*/
2012-02-01 14:42:26 +04:00
# define DWC_DEFAULT_CTLLO(_chan) ({ \
struct dw_dma_chan * _dwc = to_dw_dma_chan ( _chan ) ; \
struct dma_slave_config * _sconfig = & _dwc - > dma_sconfig ; \
2013-01-10 13:11:41 +04:00
bool _is_slave = is_slave_direction ( _dwc - > direction ) ; \
u8 _smsize = _is_slave ? _sconfig - > src_maxburst : \
2012-02-01 14:42:26 +04:00
DW_DMA_MSIZE_16 ; \
2013-01-10 13:11:41 +04:00
u8 _dmsize = _is_slave ? _sconfig - > dst_maxburst : \
2012-02-01 14:42:26 +04:00
DW_DMA_MSIZE_16 ; \
2016-03-18 17:24:42 +03:00
u8 _dms = ( _dwc - > direction = = DMA_MEM_TO_DEV ) ? \
_dwc - > p_master : _dwc - > m_master ; \
u8 _sms = ( _dwc - > direction = = DMA_DEV_TO_MEM ) ? \
_dwc - > p_master : _dwc - > m_master ; \
2011-01-21 17:11:53 +03:00
\
2012-02-01 14:42:26 +04:00
( DWC_CTLL_DST_MSIZE ( _dmsize ) \
| DWC_CTLL_SRC_MSIZE ( _smsize ) \
2011-01-21 17:11:53 +03:00
| DWC_CTLL_LLP_D_EN \
| DWC_CTLL_LLP_S_EN \
2016-03-18 17:24:42 +03:00
| DWC_CTLL_DMS ( _dms ) \
| DWC_CTLL_SMS ( _sms ) ) ; \
2011-01-21 17:11:53 +03:00
} )
2008-07-08 22:59:42 +04:00
2015-01-02 17:17:24 +03:00
/* The set of bus widths supported by the DMA controller */
# define DW_DMA_BUSWIDTHS \
BIT ( DMA_SLAVE_BUSWIDTH_UNDEFINED ) | \
BIT ( DMA_SLAVE_BUSWIDTH_1_BYTE ) | \
BIT ( DMA_SLAVE_BUSWIDTH_2_BYTES ) | \
BIT ( DMA_SLAVE_BUSWIDTH_4_BYTES )
2008-07-08 22:59:42 +04:00
/*----------------------------------------------------------------------*/
2009-01-06 21:38:21 +03:00
static struct device * chan2dev ( struct dma_chan * chan )
{
return & chan - > dev - > device ;
}
2008-07-08 22:59:42 +04:00
static struct dw_desc * dwc_first_active ( struct dw_dma_chan * dwc )
{
2012-10-18 18:34:12 +04:00
return to_dw_desc ( dwc - > active_list . next ) ;
2008-07-08 22:59:42 +04:00
}
2016-04-14 19:11:01 +03:00
static dma_cookie_t dwc_tx_submit ( struct dma_async_tx_descriptor * tx )
2008-07-08 22:59:42 +04:00
{
2016-04-14 19:11:01 +03:00
struct dw_desc * desc = txd_to_dw_desc ( tx ) ;
struct dw_dma_chan * dwc = to_dw_dma_chan ( tx - > chan ) ;
dma_cookie_t cookie ;
unsigned long flags ;
2008-07-08 22:59:42 +04:00
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2016-04-14 19:11:01 +03:00
cookie = dma_cookie_assign ( tx ) ;
/*
* REVISIT : We should attempt to chain as many descriptors as
* possible , perhaps even appending to those already submitted
* for DMA . But this is hard to do in a race - free manner .
*/
list_add_tail ( & desc - > desc_node , & dwc - > queue ) ;
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2016-04-14 19:11:01 +03:00
dev_vdbg ( chan2dev ( tx - > chan ) , " %s: queued %u \n " ,
__func__ , desc - > txd . cookie ) ;
2008-07-08 22:59:42 +04:00
2016-04-14 19:11:01 +03:00
return cookie ;
}
2008-07-08 22:59:42 +04:00
2016-04-14 19:11:01 +03:00
static struct dw_desc * dwc_desc_get ( struct dw_dma_chan * dwc )
{
struct dw_dma * dw = to_dw_dma ( dwc - > chan . device ) ;
struct dw_desc * desc ;
dma_addr_t phys ;
desc = dma_pool_zalloc ( dw - > desc_pool , GFP_ATOMIC , & phys ) ;
if ( ! desc )
return NULL ;
dwc - > descs_allocated + + ;
INIT_LIST_HEAD ( & desc - > tx_list ) ;
dma_async_tx_descriptor_init ( & desc - > txd , & dwc - > chan ) ;
desc - > txd . tx_submit = dwc_tx_submit ;
desc - > txd . flags = DMA_CTRL_ACK ;
desc - > txd . phys = phys ;
return desc ;
2008-07-08 22:59:42 +04:00
}
static void dwc_desc_put ( struct dw_dma_chan * dwc , struct dw_desc * desc )
{
2016-04-14 19:11:01 +03:00
struct dw_dma * dw = to_dw_dma ( dwc - > chan . device ) ;
struct dw_desc * child , * _next ;
2011-04-15 14:33:35 +04:00
2016-04-14 19:11:01 +03:00
if ( unlikely ( ! desc ) )
return ;
2008-07-08 22:59:42 +04:00
2016-04-14 19:11:01 +03:00
list_for_each_entry_safe ( child , _next , & desc - > tx_list , desc_node ) {
list_del ( & child - > desc_node ) ;
dma_pool_free ( dw - > desc_pool , child , child - > txd . phys ) ;
dwc - > descs_allocated - - ;
2008-07-08 22:59:42 +04:00
}
2016-04-14 19:11:01 +03:00
dma_pool_free ( dw - > desc_pool , desc , desc - > txd . phys ) ;
dwc - > descs_allocated - - ;
2008-07-08 22:59:42 +04:00
}
2011-11-17 14:31:29 +04:00
static void dwc_initialize ( struct dw_dma_chan * dwc )
{
struct dw_dma * dw = to_dw_dma ( dwc - > chan . device ) ;
u32 cfghi = DWC_CFGH_FIFO_MODE ;
u32 cfglo = DWC_CFGL_CH_PRIOR ( dwc - > priority ) ;
2016-03-18 17:24:52 +03:00
if ( test_bit ( DW_DMA_IS_INITIALIZED , & dwc - > flags ) )
2011-11-17 14:31:29 +04:00
return ;
2016-04-08 16:22:17 +03:00
cfghi | = DWC_CFGH_DST_PER ( dwc - > dst_id ) ;
cfghi | = DWC_CFGH_SRC_PER ( dwc - > src_id ) ;
2011-11-17 14:31:29 +04:00
channel_writel ( dwc , CFG_LO , cfglo ) ;
channel_writel ( dwc , CFG_HI , cfghi ) ;
/* Enable interrupts */
channel_set_bit ( dw , MASK . XFER , dwc - > mask ) ;
channel_set_bit ( dw , MASK . ERROR , dwc - > mask ) ;
2016-03-18 17:24:52 +03:00
set_bit ( DW_DMA_IS_INITIALIZED , & dwc - > flags ) ;
2011-11-17 14:31:29 +04:00
}
2008-07-08 22:59:42 +04:00
/*----------------------------------------------------------------------*/
2012-09-21 16:05:44 +04:00
static inline void dwc_dump_chan_regs ( struct dw_dma_chan * dwc )
2012-06-19 14:34:03 +04:00
{
dev_err ( chan2dev ( & dwc - > chan ) ,
" SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x \n " ,
channel_readl ( dwc , SAR ) ,
channel_readl ( dwc , DAR ) ,
channel_readl ( dwc , LLP ) ,
channel_readl ( dwc , CTL_HI ) ,
channel_readl ( dwc , CTL_LO ) ) ;
}
2012-06-19 14:46:32 +04:00
static inline void dwc_chan_disable ( struct dw_dma * dw , struct dw_dma_chan * dwc )
{
channel_clear_bit ( dw , CH_EN , dwc - > mask ) ;
while ( dma_readl ( dw , CH_EN ) & dwc - > mask )
cpu_relax ( ) ;
}
2012-06-19 14:34:03 +04:00
/*----------------------------------------------------------------------*/
2012-09-21 16:05:49 +04:00
/* Perform single block transfer */
static inline void dwc_do_single_block ( struct dw_dma_chan * dwc ,
struct dw_desc * desc )
{
struct dw_dma * dw = to_dw_dma ( dwc - > chan . device ) ;
u32 ctllo ;
2014-01-13 16:04:48 +04:00
/*
* Software emulation of LLP mode relies on interrupts to continue
* multi block transfer .
*/
2016-03-18 17:24:43 +03:00
ctllo = lli_read ( desc , ctllo ) | DWC_CTLL_INT_EN ;
2012-09-21 16:05:49 +04:00
2016-03-18 17:24:43 +03:00
channel_writel ( dwc , SAR , lli_read ( desc , sar ) ) ;
channel_writel ( dwc , DAR , lli_read ( desc , dar ) ) ;
2012-09-21 16:05:49 +04:00
channel_writel ( dwc , CTL_LO , ctllo ) ;
2016-03-18 17:24:43 +03:00
channel_writel ( dwc , CTL_HI , lli_read ( desc , ctlhi ) ) ;
2012-09-21 16:05:49 +04:00
channel_set_bit ( dw , CH_EN , dwc - > mask ) ;
2013-01-09 12:17:13 +04:00
/* Move pointer to next descriptor */
dwc - > tx_node_active = dwc - > tx_node_active - > next ;
2012-09-21 16:05:49 +04:00
}
2008-07-08 22:59:42 +04:00
/* Called with dwc->lock held and bh disabled */
static void dwc_dostart ( struct dw_dma_chan * dwc , struct dw_desc * first )
{
struct dw_dma * dw = to_dw_dma ( dwc - > chan . device ) ;
2016-03-18 17:24:44 +03:00
u8 lms = DWC_LLP_LMS ( dwc - > m_master ) ;
2012-09-21 16:05:49 +04:00
unsigned long was_soft_llp ;
2008-07-08 22:59:42 +04:00
/* ASSERT: channel is idle */
if ( dma_readl ( dw , CH_EN ) & dwc - > mask ) {
2009-01-06 21:38:21 +03:00
dev_err ( chan2dev ( & dwc - > chan ) ,
2015-03-10 12:37:23 +03:00
" %s: BUG: Attempted to start non-idle channel \n " ,
__func__ ) ;
2012-06-19 14:34:03 +04:00
dwc_dump_chan_regs ( dwc ) ;
2008-07-08 22:59:42 +04:00
/* The tasklet will hopefully advance the queue... */
return ;
}
2012-09-21 16:05:49 +04:00
if ( dwc - > nollp ) {
was_soft_llp = test_and_set_bit ( DW_DMA_IS_SOFT_LLP ,
& dwc - > flags ) ;
if ( was_soft_llp ) {
dev_err ( chan2dev ( & dwc - > chan ) ,
2014-01-13 16:04:49 +04:00
" BUG: Attempted to start new LLP transfer inside ongoing one \n " ) ;
2012-09-21 16:05:49 +04:00
return ;
}
dwc_initialize ( dwc ) ;
2016-03-18 17:24:53 +03:00
first - > residue = first - > total_len ;
2013-01-09 12:17:13 +04:00
dwc - > tx_node_active = & first - > tx_list ;
2012-09-21 16:05:49 +04:00
2013-01-25 13:48:00 +04:00
/* Submit first block */
2012-09-21 16:05:49 +04:00
dwc_do_single_block ( dwc , first ) ;
return ;
}
2011-11-17 14:31:29 +04:00
dwc_initialize ( dwc ) ;
2016-03-18 17:24:44 +03:00
channel_writel ( dwc , LLP , first - > txd . phys | lms ) ;
channel_writel ( dwc , CTL_LO , DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN ) ;
2008-07-08 22:59:42 +04:00
channel_writel ( dwc , CTL_HI , 0 ) ;
channel_set_bit ( dw , CH_EN , dwc - > mask ) ;
}
2014-06-18 13:15:36 +04:00
static void dwc_dostart_first_queued ( struct dw_dma_chan * dwc )
{
2014-06-18 13:15:37 +04:00
struct dw_desc * desc ;
2014-06-18 13:15:36 +04:00
if ( list_empty ( & dwc - > queue ) )
return ;
list_move ( dwc - > queue . next , & dwc - > active_list ) ;
2014-06-18 13:15:37 +04:00
desc = dwc_first_active ( dwc ) ;
dev_vdbg ( chan2dev ( & dwc - > chan ) , " %s: started %u \n " , __func__ , desc - > txd . cookie ) ;
dwc_dostart ( dwc , desc ) ;
2014-06-18 13:15:36 +04:00
}
2008-07-08 22:59:42 +04:00
/*----------------------------------------------------------------------*/
static void
2011-04-15 14:33:35 +04:00
dwc_descriptor_complete ( struct dw_dma_chan * dwc , struct dw_desc * desc ,
bool callback_required )
2008-07-08 22:59:42 +04:00
{
2011-04-15 14:33:35 +04:00
dma_async_tx_callback callback = NULL ;
void * param = NULL ;
2008-07-08 22:59:42 +04:00
struct dma_async_tx_descriptor * txd = & desc - > txd ;
2011-03-03 13:17:20 +03:00
struct dw_desc * child ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2008-07-08 22:59:42 +04:00
2009-01-06 21:38:21 +03:00
dev_vdbg ( chan2dev ( & dwc - > chan ) , " descriptor %u complete \n " , txd - > cookie ) ;
2008-07-08 22:59:42 +04:00
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2012-03-07 02:35:07 +04:00
dma_cookie_complete ( txd ) ;
2011-04-15 14:33:35 +04:00
if ( callback_required ) {
callback = txd - > callback ;
param = txd - > callback_param ;
}
2008-07-08 22:59:42 +04:00
2011-03-03 13:17:20 +03:00
/* async_tx_ack */
list_for_each_entry ( child , & desc - > tx_list , desc_node )
async_tx_ack ( & child - > txd ) ;
async_tx_ack ( & desc - > txd ) ;
2016-04-14 19:11:01 +03:00
dwc_desc_put ( dwc , desc ) ;
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2013-01-09 12:17:12 +04:00
if ( callback )
2008-07-08 22:59:42 +04:00
callback ( param ) ;
}
static void dwc_complete_all ( struct dw_dma * dw , struct dw_dma_chan * dwc )
{
struct dw_desc * desc , * _desc ;
LIST_HEAD ( list ) ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2008-07-08 22:59:42 +04:00
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
if ( dma_readl ( dw , CH_EN ) & dwc - > mask ) {
2009-01-06 21:38:21 +03:00
dev_err ( chan2dev ( & dwc - > chan ) ,
2008-07-08 22:59:42 +04:00
" BUG: XFER bit set, but channel not idle! \n " ) ;
/* Try to continue after resetting the channel... */
2012-06-19 14:46:32 +04:00
dwc_chan_disable ( dw , dwc ) ;
2008-07-08 22:59:42 +04:00
}
/*
* Submit queued descriptors ASAP , i . e . before we go through
* the completed ones .
*/
list_splice_init ( & dwc - > active_list , & list ) ;
2014-06-18 13:15:36 +04:00
dwc_dostart_first_queued ( dwc ) ;
2008-07-08 22:59:42 +04:00
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
list_for_each_entry_safe ( desc , _desc , & list , desc_node )
2011-04-15 14:33:35 +04:00
dwc_descriptor_complete ( dwc , desc , true ) ;
2008-07-08 22:59:42 +04:00
}
2013-01-25 13:48:03 +04:00
/* Returns how many bytes were already received from source */
static inline u32 dwc_get_sent ( struct dw_dma_chan * dwc )
{
u32 ctlhi = channel_readl ( dwc , CTL_HI ) ;
u32 ctllo = channel_readl ( dwc , CTL_LO ) ;
return ( ctlhi & DWC_CTLH_BLOCK_TS_MASK ) * ( 1 < < ( ctllo > > 4 & 7 ) ) ;
}
2008-07-08 22:59:42 +04:00
static void dwc_scan_descriptors ( struct dw_dma * dw , struct dw_dma_chan * dwc )
{
dma_addr_t llp ;
struct dw_desc * desc , * _desc ;
struct dw_desc * child ;
u32 status_xfer ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2008-07-08 22:59:42 +04:00
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
llp = channel_readl ( dwc , LLP ) ;
status_xfer = dma_readl ( dw , RAW . XFER ) ;
if ( status_xfer & dwc - > mask ) {
/* Everything we've submitted is done */
dma_writel ( dw , CLEAR . XFER , dwc - > mask ) ;
2013-01-18 16:14:15 +04:00
if ( test_bit ( DW_DMA_IS_SOFT_LLP , & dwc - > flags ) ) {
2013-01-25 13:48:00 +04:00
struct list_head * head , * active = dwc - > tx_node_active ;
/*
* We are inside first active descriptor .
* Otherwise something is really wrong .
*/
desc = dwc_first_active ( dwc ) ;
head = & desc - > tx_list ;
if ( active ! = head ) {
2016-03-18 17:24:53 +03:00
/* Update residue to reflect last sent descriptor */
if ( active = = head - > next )
desc - > residue - = desc - > len ;
else
desc - > residue - = to_dw_desc ( active - > prev ) - > len ;
2013-01-25 13:48:03 +04:00
2013-01-25 13:48:00 +04:00
child = to_dw_desc ( active ) ;
2013-01-18 16:14:15 +04:00
/* Submit next block */
2013-01-25 13:48:00 +04:00
dwc_do_single_block ( dwc , child ) ;
2013-01-18 16:14:15 +04:00
2013-01-25 13:48:00 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2013-01-18 16:14:15 +04:00
return ;
}
2013-01-25 13:48:00 +04:00
2013-01-18 16:14:15 +04:00
/* We are done here */
clear_bit ( DW_DMA_IS_SOFT_LLP , & dwc - > flags ) ;
}
2013-01-25 13:48:03 +04:00
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
dwc_complete_all ( dw , dwc ) ;
return ;
}
2011-04-15 14:33:35 +04:00
if ( list_empty ( & dwc - > active_list ) ) {
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2011-01-21 17:11:52 +03:00
return ;
2011-04-15 14:33:35 +04:00
}
2011-01-21 17:11:52 +03:00
2013-01-18 16:14:15 +04:00
if ( test_bit ( DW_DMA_IS_SOFT_LLP , & dwc - > flags ) ) {
dev_vdbg ( chan2dev ( & dwc - > chan ) , " %s: soft LLP mode \n " , __func__ ) ;
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2011-01-21 17:11:52 +03:00
return ;
2011-04-15 14:33:35 +04:00
}
2011-01-21 17:11:52 +03:00
2014-01-13 16:04:50 +04:00
dev_vdbg ( chan2dev ( & dwc - > chan ) , " %s: llp=%pad \n " , __func__ , & llp ) ;
2008-07-08 22:59:42 +04:00
list_for_each_entry_safe ( desc , _desc , & dwc - > active_list , desc_node ) {
2013-03-26 18:53:54 +04:00
/* Initial residue value */
2016-03-18 17:24:53 +03:00
desc - > residue = desc - > total_len ;
2013-01-25 13:48:03 +04:00
2013-03-26 18:53:54 +04:00
/* Check first descriptors addr */
2016-03-18 17:24:44 +03:00
if ( desc - > txd . phys = = DWC_LLP_LOC ( llp ) ) {
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2011-03-24 09:02:15 +03:00
return ;
2011-04-15 14:33:35 +04:00
}
2011-03-24 09:02:15 +03:00
2013-03-26 18:53:54 +04:00
/* Check first descriptors llp */
2016-03-18 17:24:43 +03:00
if ( lli_read ( desc , llp ) = = llp ) {
2008-07-08 22:59:42 +04:00
/* This one is currently in progress */
2016-03-18 17:24:53 +03:00
desc - > residue - = dwc_get_sent ( dwc ) ;
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
return ;
2011-04-15 14:33:35 +04:00
}
2008-07-08 22:59:42 +04:00
2016-03-18 17:24:53 +03:00
desc - > residue - = desc - > len ;
2013-01-25 13:48:03 +04:00
list_for_each_entry ( child , & desc - > tx_list , desc_node ) {
2016-03-18 17:24:43 +03:00
if ( lli_read ( child , llp ) = = llp ) {
2008-07-08 22:59:42 +04:00
/* Currently in progress */
2016-03-18 17:24:53 +03:00
desc - > residue - = dwc_get_sent ( dwc ) ;
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
return ;
2011-04-15 14:33:35 +04:00
}
2016-03-18 17:24:53 +03:00
desc - > residue - = child - > len ;
2013-01-25 13:48:03 +04:00
}
2008-07-08 22:59:42 +04:00
/*
* No descriptors so far seem to be in progress , i . e .
* this one must be done .
*/
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2011-04-15 14:33:35 +04:00
dwc_descriptor_complete ( dwc , desc , true ) ;
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
}
2009-01-06 21:38:21 +03:00
dev_err ( chan2dev ( & dwc - > chan ) ,
2008-07-08 22:59:42 +04:00
" BUG: All descriptors done, but channel not idle! \n " ) ;
/* Try to continue after resetting the channel... */
2012-06-19 14:46:32 +04:00
dwc_chan_disable ( dw , dwc ) ;
2008-07-08 22:59:42 +04:00
2014-06-18 13:15:36 +04:00
dwc_dostart_first_queued ( dwc ) ;
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
}
2016-03-18 17:24:43 +03:00
static inline void dwc_dump_lli ( struct dw_dma_chan * dwc , struct dw_desc * desc )
2008-07-08 22:59:42 +04:00
{
2012-10-18 18:34:09 +04:00
dev_crit ( chan2dev ( & dwc - > chan ) , " desc: s0x%x d0x%x l0x%x c0x%x:%x \n " ,
2016-03-18 17:24:43 +03:00
lli_read ( desc , sar ) ,
lli_read ( desc , dar ) ,
lli_read ( desc , llp ) ,
lli_read ( desc , ctlhi ) ,
lli_read ( desc , ctllo ) ) ;
2008-07-08 22:59:42 +04:00
}
static void dwc_handle_error ( struct dw_dma * dw , struct dw_dma_chan * dwc )
{
struct dw_desc * bad_desc ;
struct dw_desc * child ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2008-07-08 22:59:42 +04:00
dwc_scan_descriptors ( dw , dwc ) ;
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
/*
* The descriptor currently at the head of the active list is
* borked . Since we don ' t have any way to report errors , we ' ll
* just have to scream loudly and try to carry on .
*/
bad_desc = dwc_first_active ( dwc ) ;
list_del_init ( & bad_desc - > desc_node ) ;
2011-03-03 13:17:16 +03:00
list_move ( dwc - > queue . next , dwc - > active_list . prev ) ;
2008-07-08 22:59:42 +04:00
/* Clear the error flag and try to restart the controller */
dma_writel ( dw , CLEAR . ERROR , dwc - > mask ) ;
if ( ! list_empty ( & dwc - > active_list ) )
dwc_dostart ( dwc , dwc_first_active ( dwc ) ) ;
/*
2012-10-18 18:34:11 +04:00
* WARN may seem harsh , but since this only happens
2008-07-08 22:59:42 +04:00
* when someone submits a bad physical address in a
* descriptor , we should consider ourselves lucky that the
* controller flagged an error instead of scribbling over
* random memory locations .
*/
2012-10-18 18:34:11 +04:00
dev_WARN ( chan2dev ( & dwc - > chan ) , " Bad descriptor submitted for DMA! \n "
" cookie: %d \n " , bad_desc - > txd . cookie ) ;
2016-03-18 17:24:43 +03:00
dwc_dump_lli ( dwc , bad_desc ) ;
2009-09-09 04:53:02 +04:00
list_for_each_entry ( child , & bad_desc - > tx_list , desc_node )
2016-03-18 17:24:43 +03:00
dwc_dump_lli ( dwc , child ) ;
2008-07-08 22:59:42 +04:00
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
/* Pretend the descriptor completed successfully */
2011-04-15 14:33:35 +04:00
dwc_descriptor_complete ( dwc , bad_desc , true ) ;
2008-07-08 22:59:42 +04:00
}
2009-04-01 17:47:02 +04:00
/* --------------------- Cyclic DMA API extensions -------------------- */
2013-05-09 13:19:40 +04:00
dma_addr_t dw_dma_get_src_addr ( struct dma_chan * chan )
2009-04-01 17:47:02 +04:00
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
return channel_readl ( dwc , SAR ) ;
}
EXPORT_SYMBOL ( dw_dma_get_src_addr ) ;
2013-05-09 13:19:40 +04:00
dma_addr_t dw_dma_get_dst_addr ( struct dma_chan * chan )
2009-04-01 17:47:02 +04:00
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
return channel_readl ( dwc , DAR ) ;
}
EXPORT_SYMBOL ( dw_dma_get_dst_addr ) ;
2013-03-26 18:53:54 +04:00
/* Called with dwc->lock held and all DMAC interrupts disabled */
2009-04-01 17:47:02 +04:00
static void dwc_handle_cyclic ( struct dw_dma * dw , struct dw_dma_chan * dwc ,
2016-01-11 16:04:29 +03:00
u32 status_block , u32 status_err , u32 status_xfer )
2009-04-01 17:47:02 +04:00
{
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2016-01-11 16:04:29 +03:00
if ( status_block & dwc - > mask ) {
2009-04-01 17:47:02 +04:00
void ( * callback ) ( void * param ) ;
void * callback_param ;
dev_vdbg ( chan2dev ( & dwc - > chan ) , " new cyclic period llp 0x%08x \n " ,
channel_readl ( dwc , LLP ) ) ;
2016-01-11 16:04:29 +03:00
dma_writel ( dw , CLEAR . BLOCK , dwc - > mask ) ;
2009-04-01 17:47:02 +04:00
callback = dwc - > cdesc - > period_callback ;
callback_param = dwc - > cdesc - > period_callback_param ;
2011-04-15 14:33:35 +04:00
if ( callback )
2009-04-01 17:47:02 +04:00
callback ( callback_param ) ;
}
/*
* Error and transfer complete are highly unlikely , and will most
* likely be due to a configuration error by the user .
*/
if ( unlikely ( status_err & dwc - > mask ) | |
unlikely ( status_xfer & dwc - > mask ) ) {
2016-03-18 17:24:48 +03:00
unsigned int i ;
2009-04-01 17:47:02 +04:00
2014-01-13 16:04:49 +04:00
dev_err ( chan2dev ( & dwc - > chan ) ,
" cyclic DMA unexpected %s interrupt, stopping DMA transfer \n " ,
status_xfer ? " xfer " : " error " ) ;
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2012-06-19 14:34:03 +04:00
dwc_dump_chan_regs ( dwc ) ;
2009-04-01 17:47:02 +04:00
2012-06-19 14:46:32 +04:00
dwc_chan_disable ( dw , dwc ) ;
2009-04-01 17:47:02 +04:00
2013-03-26 18:53:54 +04:00
/* Make sure DMA does not restart by loading a new list */
2009-04-01 17:47:02 +04:00
channel_writel ( dwc , LLP , 0 ) ;
channel_writel ( dwc , CTL_LO , 0 ) ;
channel_writel ( dwc , CTL_HI , 0 ) ;
2016-01-11 16:04:29 +03:00
dma_writel ( dw , CLEAR . BLOCK , dwc - > mask ) ;
2009-04-01 17:47:02 +04:00
dma_writel ( dw , CLEAR . ERROR , dwc - > mask ) ;
dma_writel ( dw , CLEAR . XFER , dwc - > mask ) ;
for ( i = 0 ; i < dwc - > cdesc - > periods ; i + + )
2016-03-18 17:24:43 +03:00
dwc_dump_lli ( dwc , dwc - > cdesc - > desc [ i ] ) ;
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2009-04-01 17:47:02 +04:00
}
2016-02-10 16:59:42 +03:00
/* Re-enable interrupts */
channel_set_bit ( dw , MASK . BLOCK , dwc - > mask ) ;
2009-04-01 17:47:02 +04:00
}
/* ------------------------------------------------------------------------- */
2008-07-08 22:59:42 +04:00
static void dw_dma_tasklet ( unsigned long data )
{
struct dw_dma * dw = ( struct dw_dma * ) data ;
struct dw_dma_chan * dwc ;
2016-01-11 16:04:29 +03:00
u32 status_block ;
2008-07-08 22:59:42 +04:00
u32 status_xfer ;
u32 status_err ;
2016-03-18 17:24:48 +03:00
unsigned int i ;
2008-07-08 22:59:42 +04:00
2016-01-11 16:04:29 +03:00
status_block = dma_readl ( dw , RAW . BLOCK ) ;
2008-10-04 02:23:46 +04:00
status_xfer = dma_readl ( dw , RAW . XFER ) ;
2008-07-08 22:59:42 +04:00
status_err = dma_readl ( dw , RAW . ERROR ) ;
2012-06-19 14:34:05 +04:00
dev_vdbg ( dw - > dma . dev , " %s: status_err=%x \n " , __func__ , status_err ) ;
2008-07-08 22:59:42 +04:00
for ( i = 0 ; i < dw - > dma . chancnt ; i + + ) {
dwc = & dw - > chan [ i ] ;
2009-04-01 17:47:02 +04:00
if ( test_bit ( DW_DMA_IS_CYCLIC , & dwc - > flags ) )
2016-01-11 16:04:29 +03:00
dwc_handle_cyclic ( dw , dwc , status_block , status_err ,
status_xfer ) ;
2009-04-01 17:47:02 +04:00
else if ( status_err & ( 1 < < i ) )
2008-07-08 22:59:42 +04:00
dwc_handle_error ( dw , dwc ) ;
2013-01-18 16:14:15 +04:00
else if ( status_xfer & ( 1 < < i ) )
2008-07-08 22:59:42 +04:00
dwc_scan_descriptors ( dw , dwc ) ;
}
2016-02-10 16:59:42 +03:00
/* Re-enable interrupts */
2008-07-08 22:59:42 +04:00
channel_set_bit ( dw , MASK . XFER , dw - > all_chan_mask ) ;
channel_set_bit ( dw , MASK . ERROR , dw - > all_chan_mask ) ;
}
static irqreturn_t dw_dma_interrupt ( int irq , void * dev_id )
{
struct dw_dma * dw = dev_id ;
2015-12-05 00:49:24 +03:00
u32 status ;
2008-07-08 22:59:42 +04:00
2015-12-05 00:49:24 +03:00
/* Check if we have any interrupt from the DMAC which is not in use */
if ( ! dw - > in_use )
return IRQ_NONE ;
status = dma_readl ( dw , STATUS_INT ) ;
2013-07-15 16:04:39 +04:00
dev_vdbg ( dw - > dma . dev , " %s: status=0x%x \n " , __func__ , status ) ;
/* Check if we have any interrupt from the DMAC */
2015-12-05 00:49:24 +03:00
if ( ! status )
2013-07-15 16:04:39 +04:00
return IRQ_NONE ;
2008-07-08 22:59:42 +04:00
/*
* Just disable the interrupts . We ' ll turn them back on in the
* softirq handler .
*/
channel_clear_bit ( dw , MASK . XFER , dw - > all_chan_mask ) ;
2016-01-11 16:04:29 +03:00
channel_clear_bit ( dw , MASK . BLOCK , dw - > all_chan_mask ) ;
2008-07-08 22:59:42 +04:00
channel_clear_bit ( dw , MASK . ERROR , dw - > all_chan_mask ) ;
status = dma_readl ( dw , STATUS_INT ) ;
if ( status ) {
dev_err ( dw - > dma . dev ,
" BUG: Unexpected interrupts pending: 0x%x \n " ,
status ) ;
/* Try to recover */
channel_clear_bit ( dw , MASK . XFER , ( 1 < < 8 ) - 1 ) ;
2016-01-11 16:04:29 +03:00
channel_clear_bit ( dw , MASK . BLOCK , ( 1 < < 8 ) - 1 ) ;
2008-07-08 22:59:42 +04:00
channel_clear_bit ( dw , MASK . SRC_TRAN , ( 1 < < 8 ) - 1 ) ;
channel_clear_bit ( dw , MASK . DST_TRAN , ( 1 < < 8 ) - 1 ) ;
channel_clear_bit ( dw , MASK . ERROR , ( 1 < < 8 ) - 1 ) ;
}
tasklet_schedule ( & dw - > tasklet ) ;
return IRQ_HANDLED ;
}
/*----------------------------------------------------------------------*/
static struct dma_async_tx_descriptor *
dwc_prep_dma_memcpy ( struct dma_chan * chan , dma_addr_t dest , dma_addr_t src ,
size_t len , unsigned long flags )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
2013-03-26 18:53:57 +04:00
struct dw_dma * dw = to_dw_dma ( chan - > device ) ;
2008-07-08 22:59:42 +04:00
struct dw_desc * desc ;
struct dw_desc * first ;
struct dw_desc * prev ;
size_t xfer_count ;
size_t offset ;
2016-04-27 14:15:38 +03:00
u8 m_master = dwc - > m_master ;
2008-07-08 22:59:42 +04:00
unsigned int src_width ;
unsigned int dst_width ;
2016-04-27 14:15:39 +03:00
unsigned int data_width = dw - > pdata - > data_width [ m_master ] ;
2008-07-08 22:59:42 +04:00
u32 ctllo ;
2016-04-27 14:15:38 +03:00
u8 lms = DWC_LLP_LMS ( m_master ) ;
2008-07-08 22:59:42 +04:00
2012-06-19 14:34:02 +04:00
dev_vdbg ( chan2dev ( chan ) ,
2014-01-13 16:04:50 +04:00
" %s: d%pad s%pad l0x%zx f0x%lx \n " , __func__ ,
& dest , & src , len , flags ) ;
2008-07-08 22:59:42 +04:00
if ( unlikely ( ! len ) ) {
2012-06-19 14:34:05 +04:00
dev_dbg ( chan2dev ( chan ) , " %s: length is zero! \n " , __func__ ) ;
2008-07-08 22:59:42 +04:00
return NULL ;
}
2013-01-10 12:53:03 +04:00
dwc - > direction = DMA_MEM_TO_MEM ;
2016-04-27 14:15:38 +03:00
src_width = dst_width = __ffs ( data_width | src | dest | len ) ;
2008-07-08 22:59:42 +04:00
2012-02-01 14:42:26 +04:00
ctllo = DWC_DEFAULT_CTLLO ( chan )
2008-07-08 22:59:42 +04:00
| DWC_CTLL_DST_WIDTH ( dst_width )
| DWC_CTLL_SRC_WIDTH ( src_width )
| DWC_CTLL_DST_INC
| DWC_CTLL_SRC_INC
| DWC_CTLL_FC_M2M ;
prev = first = NULL ;
for ( offset = 0 ; offset < len ; offset + = xfer_count < < src_width ) {
xfer_count = min_t ( size_t , ( len - offset ) > > src_width ,
2012-09-21 16:05:47 +04:00
dwc - > block_size ) ;
2008-07-08 22:59:42 +04:00
desc = dwc_desc_get ( dwc ) ;
if ( ! desc )
goto err_desc_get ;
2016-03-18 17:24:43 +03:00
lli_write ( desc , sar , src + offset ) ;
lli_write ( desc , dar , dest + offset ) ;
lli_write ( desc , ctllo , ctllo ) ;
lli_write ( desc , ctlhi , xfer_count ) ;
2013-01-25 13:48:02 +04:00
desc - > len = xfer_count < < src_width ;
2008-07-08 22:59:42 +04:00
if ( ! first ) {
first = desc ;
} else {
2016-03-18 17:24:44 +03:00
lli_write ( prev , llp , desc - > txd . phys | lms ) ;
2016-03-18 17:24:43 +03:00
list_add_tail ( & desc - > desc_node , & first - > tx_list ) ;
2008-07-08 22:59:42 +04:00
}
prev = desc ;
}
if ( flags & DMA_PREP_INTERRUPT )
/* Trigger interrupt after last block */
2016-03-18 17:24:43 +03:00
lli_set ( prev , ctllo , DWC_CTLL_INT_EN ) ;
2008-07-08 22:59:42 +04:00
prev - > lli . llp = 0 ;
2016-03-18 17:24:45 +03:00
lli_clear ( prev , ctllo , DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN ) ;
2008-07-08 22:59:42 +04:00
first - > txd . flags = flags ;
2013-01-25 13:48:01 +04:00
first - > total_len = len ;
2008-07-08 22:59:42 +04:00
return & first - > txd ;
err_desc_get :
dwc_desc_put ( dwc , first ) ;
return NULL ;
}
static struct dma_async_tx_descriptor *
dwc_prep_slave_sg ( struct dma_chan * chan , struct scatterlist * sgl ,
2011-10-13 21:04:23 +04:00
unsigned int sg_len , enum dma_transfer_direction direction ,
2012-03-09 00:35:13 +04:00
unsigned long flags , void * context )
2008-07-08 22:59:42 +04:00
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
2013-03-26 18:53:57 +04:00
struct dw_dma * dw = to_dw_dma ( chan - > device ) ;
2012-02-01 14:42:26 +04:00
struct dma_slave_config * sconfig = & dwc - > dma_sconfig ;
2008-07-08 22:59:42 +04:00
struct dw_desc * prev ;
struct dw_desc * first ;
u32 ctllo ;
2016-04-27 14:15:38 +03:00
u8 m_master = dwc - > m_master ;
u8 lms = DWC_LLP_LMS ( m_master ) ;
2008-07-08 22:59:42 +04:00
dma_addr_t reg ;
unsigned int reg_width ;
unsigned int mem_width ;
2016-04-27 14:15:39 +03:00
unsigned int data_width = dw - > pdata - > data_width [ m_master ] ;
2008-07-08 22:59:42 +04:00
unsigned int i ;
struct scatterlist * sg ;
size_t total_len = 0 ;
2012-06-19 14:34:05 +04:00
dev_vdbg ( chan2dev ( chan ) , " %s \n " , __func__ ) ;
2008-07-08 22:59:42 +04:00
2013-01-10 13:11:41 +04:00
if ( unlikely ( ! is_slave_direction ( direction ) | | ! sg_len ) )
2008-07-08 22:59:42 +04:00
return NULL ;
2013-01-10 12:53:03 +04:00
dwc - > direction = direction ;
2008-07-08 22:59:42 +04:00
prev = first = NULL ;
switch ( direction ) {
2011-10-13 21:04:23 +04:00
case DMA_MEM_TO_DEV :
2015-09-28 18:57:04 +03:00
reg_width = __ffs ( sconfig - > dst_addr_width ) ;
2012-02-01 14:42:26 +04:00
reg = sconfig - > dst_addr ;
ctllo = ( DWC_DEFAULT_CTLLO ( chan )
2008-07-08 22:59:42 +04:00
| DWC_CTLL_DST_WIDTH ( reg_width )
| DWC_CTLL_DST_FIX
2012-02-01 14:42:26 +04:00
| DWC_CTLL_SRC_INC ) ;
ctllo | = sconfig - > device_fc ? DWC_CTLL_FC ( DW_DMA_FC_P_M2P ) :
DWC_CTLL_FC ( DW_DMA_FC_D_M2P ) ;
2008-07-08 22:59:42 +04:00
for_each_sg ( sgl , sg , sg_len , i ) {
struct dw_desc * desc ;
2011-04-18 13:24:56 +04:00
u32 len , dlen , mem ;
2008-07-08 22:59:42 +04:00
2012-04-25 22:50:51 +04:00
mem = sg_dma_address ( sg ) ;
2011-04-18 13:24:56 +04:00
len = sg_dma_len ( sg ) ;
2012-02-01 14:42:25 +04:00
2016-04-27 14:15:38 +03:00
mem_width = __ffs ( data_width | mem | len ) ;
2008-07-08 22:59:42 +04:00
2011-04-18 13:24:56 +04:00
slave_sg_todev_fill_desc :
2008-07-08 22:59:42 +04:00
desc = dwc_desc_get ( dwc ) ;
2015-03-10 12:37:24 +03:00
if ( ! desc )
2008-07-08 22:59:42 +04:00
goto err_desc_get ;
2016-03-18 17:24:43 +03:00
lli_write ( desc , sar , mem ) ;
lli_write ( desc , dar , reg ) ;
lli_write ( desc , ctllo , ctllo | DWC_CTLL_SRC_WIDTH ( mem_width ) ) ;
2012-09-21 16:05:47 +04:00
if ( ( len > > mem_width ) > dwc - > block_size ) {
dlen = dwc - > block_size < < mem_width ;
2011-04-18 13:24:56 +04:00
mem + = dlen ;
len - = dlen ;
} else {
dlen = len ;
len = 0 ;
}
2016-03-18 17:24:43 +03:00
lli_write ( desc , ctlhi , dlen > > mem_width ) ;
2013-01-25 13:48:02 +04:00
desc - > len = dlen ;
2008-07-08 22:59:42 +04:00
if ( ! first ) {
first = desc ;
} else {
2016-03-18 17:24:44 +03:00
lli_write ( prev , llp , desc - > txd . phys | lms ) ;
2016-03-18 17:24:43 +03:00
list_add_tail ( & desc - > desc_node , & first - > tx_list ) ;
2008-07-08 22:59:42 +04:00
}
prev = desc ;
2011-04-18 13:24:56 +04:00
total_len + = dlen ;
if ( len )
goto slave_sg_todev_fill_desc ;
2008-07-08 22:59:42 +04:00
}
break ;
2011-10-13 21:04:23 +04:00
case DMA_DEV_TO_MEM :
2015-09-28 18:57:04 +03:00
reg_width = __ffs ( sconfig - > src_addr_width ) ;
2012-02-01 14:42:26 +04:00
reg = sconfig - > src_addr ;
ctllo = ( DWC_DEFAULT_CTLLO ( chan )
2008-07-08 22:59:42 +04:00
| DWC_CTLL_SRC_WIDTH ( reg_width )
| DWC_CTLL_DST_INC
2012-02-01 14:42:26 +04:00
| DWC_CTLL_SRC_FIX ) ;
ctllo | = sconfig - > device_fc ? DWC_CTLL_FC ( DW_DMA_FC_P_P2M ) :
DWC_CTLL_FC ( DW_DMA_FC_D_P2M ) ;
2008-07-08 22:59:42 +04:00
for_each_sg ( sgl , sg , sg_len , i ) {
struct dw_desc * desc ;
2011-04-18 13:24:56 +04:00
u32 len , dlen , mem ;
2008-07-08 22:59:42 +04:00
2012-04-25 22:50:51 +04:00
mem = sg_dma_address ( sg ) ;
2008-07-08 22:59:42 +04:00
len = sg_dma_len ( sg ) ;
2012-02-01 14:42:25 +04:00
2016-04-27 14:15:38 +03:00
mem_width = __ffs ( data_width | mem | len ) ;
2008-07-08 22:59:42 +04:00
2011-04-18 13:24:56 +04:00
slave_sg_fromdev_fill_desc :
desc = dwc_desc_get ( dwc ) ;
2015-03-10 12:37:24 +03:00
if ( ! desc )
2011-04-18 13:24:56 +04:00
goto err_desc_get ;
2016-03-18 17:24:43 +03:00
lli_write ( desc , sar , reg ) ;
lli_write ( desc , dar , mem ) ;
lli_write ( desc , ctllo , ctllo | DWC_CTLL_DST_WIDTH ( mem_width ) ) ;
2012-09-21 16:05:47 +04:00
if ( ( len > > reg_width ) > dwc - > block_size ) {
dlen = dwc - > block_size < < reg_width ;
2011-04-18 13:24:56 +04:00
mem + = dlen ;
len - = dlen ;
} else {
dlen = len ;
len = 0 ;
}
2016-03-18 17:24:43 +03:00
lli_write ( desc , ctlhi , dlen > > reg_width ) ;
2013-01-25 13:48:02 +04:00
desc - > len = dlen ;
2008-07-08 22:59:42 +04:00
if ( ! first ) {
first = desc ;
} else {
2016-03-18 17:24:44 +03:00
lli_write ( prev , llp , desc - > txd . phys | lms ) ;
2016-03-18 17:24:43 +03:00
list_add_tail ( & desc - > desc_node , & first - > tx_list ) ;
2008-07-08 22:59:42 +04:00
}
prev = desc ;
2011-04-18 13:24:56 +04:00
total_len + = dlen ;
if ( len )
goto slave_sg_fromdev_fill_desc ;
2008-07-08 22:59:42 +04:00
}
break ;
default :
return NULL ;
}
if ( flags & DMA_PREP_INTERRUPT )
/* Trigger interrupt after last block */
2016-03-18 17:24:43 +03:00
lli_set ( prev , ctllo , DWC_CTLL_INT_EN ) ;
2008-07-08 22:59:42 +04:00
prev - > lli . llp = 0 ;
2016-03-18 17:24:45 +03:00
lli_clear ( prev , ctllo , DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN ) ;
2013-01-25 13:48:01 +04:00
first - > total_len = total_len ;
2008-07-08 22:59:42 +04:00
return & first - > txd ;
err_desc_get :
2015-03-10 12:37:24 +03:00
dev_err ( chan2dev ( chan ) ,
" not enough descriptors available. Direction %d \n " , direction ) ;
2008-07-08 22:59:42 +04:00
dwc_desc_put ( dwc , first ) ;
return NULL ;
}
2014-08-19 21:29:16 +04:00
bool dw_dma_filter ( struct dma_chan * chan , void * param )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
struct dw_dma_slave * dws = param ;
2016-04-08 16:22:17 +03:00
if ( dws - > dma_dev ! = chan - > device - > dev )
2014-08-19 21:29:16 +04:00
return false ;
/* We have to copy data since dws can be temporary storage */
dwc - > src_id = dws - > src_id ;
dwc - > dst_id = dws - > dst_id ;
2016-03-18 17:24:41 +03:00
dwc - > m_master = dws - > m_master ;
dwc - > p_master = dws - > p_master ;
2014-08-19 21:29:16 +04:00
return true ;
}
EXPORT_SYMBOL_GPL ( dw_dma_filter ) ;
2012-02-01 14:42:26 +04:00
/*
* Fix sconfig ' s burst size according to dw_dmac . We need to convert them as :
* 1 - > 0 , 4 - > 1 , 8 - > 2 , 16 - > 3.
*
* NOTE : burst size 2 is not supported by controller .
*
* This can be done by finding least significant bit set : n & ( n - 1 )
*/
static inline void convert_burst ( u32 * maxburst )
{
if ( * maxburst > 1 )
* maxburst = fls ( * maxburst ) - 2 ;
else
* maxburst = 0 ;
}
2014-11-17 16:42:12 +03:00
static int dwc_config ( struct dma_chan * chan , struct dma_slave_config * sconfig )
2012-02-01 14:42:26 +04:00
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
2013-01-10 13:11:41 +04:00
/* Check if chan will be configured for slave transfers */
if ( ! is_slave_direction ( sconfig - > direction ) )
2012-02-01 14:42:26 +04:00
return - EINVAL ;
memcpy ( & dwc - > dma_sconfig , sconfig , sizeof ( * sconfig ) ) ;
2013-01-10 12:53:03 +04:00
dwc - > direction = sconfig - > direction ;
2012-02-01 14:42:26 +04:00
convert_burst ( & dwc - > dma_sconfig . src_maxburst ) ;
convert_burst ( & dwc - > dma_sconfig . dst_maxburst ) ;
return 0 ;
}
2014-11-17 16:42:12 +03:00
static int dwc_pause ( struct dma_chan * chan )
2013-01-09 12:17:14 +04:00
{
2014-11-17 16:42:12 +03:00
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
unsigned long flags ;
unsigned int count = 20 ; /* timeout iterations */
u32 cfglo ;
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2013-01-09 12:17:14 +04:00
2014-11-17 16:42:12 +03:00
cfglo = channel_readl ( dwc , CFG_LO ) ;
2013-01-09 12:17:14 +04:00
channel_writel ( dwc , CFG_LO , cfglo | DWC_CFGL_CH_SUSP ) ;
2013-03-21 13:49:17 +04:00
while ( ! ( channel_readl ( dwc , CFG_LO ) & DWC_CFGL_FIFO_EMPTY ) & & count - - )
udelay ( 2 ) ;
2013-01-09 12:17:14 +04:00
2016-03-18 17:24:51 +03:00
set_bit ( DW_DMA_IS_PAUSED , & dwc - > flags ) ;
2014-11-17 16:42:12 +03:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
return 0 ;
2013-01-09 12:17:14 +04:00
}
static inline void dwc_chan_resume ( struct dw_dma_chan * dwc )
{
u32 cfglo = channel_readl ( dwc , CFG_LO ) ;
channel_writel ( dwc , CFG_LO , cfglo & ~ DWC_CFGL_CH_SUSP ) ;
2016-03-18 17:24:51 +03:00
clear_bit ( DW_DMA_IS_PAUSED , & dwc - > flags ) ;
2013-01-09 12:17:14 +04:00
}
2014-11-17 16:42:12 +03:00
static int dwc_resume ( struct dma_chan * chan )
2008-07-08 22:59:42 +04:00
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2008-07-08 22:59:42 +04:00
2014-11-17 16:42:12 +03:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
2016-03-18 17:24:51 +03:00
if ( test_bit ( DW_DMA_IS_PAUSED , & dwc - > flags ) )
dwc_chan_resume ( dwc ) ;
2008-07-08 22:59:42 +04:00
2014-11-17 16:42:12 +03:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
2014-11-17 16:42:12 +03:00
return 0 ;
}
2008-07-08 22:59:42 +04:00
2014-11-17 16:42:12 +03:00
static int dwc_terminate_all ( struct dma_chan * chan )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
struct dw_dma * dw = to_dw_dma ( chan - > device ) ;
struct dw_desc * desc , * _desc ;
unsigned long flags ;
LIST_HEAD ( list ) ;
2008-07-08 22:59:42 +04:00
2014-11-17 16:42:12 +03:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2012-09-21 16:05:49 +04:00
2014-11-17 16:42:12 +03:00
clear_bit ( DW_DMA_IS_SOFT_LLP , & dwc - > flags ) ;
2012-09-21 16:05:49 +04:00
2014-11-17 16:42:12 +03:00
dwc_chan_disable ( dw , dwc ) ;
2011-04-19 04:31:32 +04:00
2014-11-17 16:42:12 +03:00
dwc_chan_resume ( dwc ) ;
2011-04-19 04:31:32 +04:00
2014-11-17 16:42:12 +03:00
/* active_list entries will end up before queued entries */
list_splice_init ( & dwc - > queue , & list ) ;
list_splice_init ( & dwc - > active_list , & list ) ;
2011-04-19 04:31:32 +04:00
2014-11-17 16:42:12 +03:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2011-04-19 04:31:32 +04:00
2014-11-17 16:42:12 +03:00
/* Flush all pending and queued descriptors */
list_for_each_entry_safe ( desc , _desc , & list , desc_node )
dwc_descriptor_complete ( dwc , desc , false ) ;
2010-03-27 02:44:01 +03:00
return 0 ;
2008-07-08 22:59:42 +04:00
}
2016-03-18 17:24:53 +03:00
static struct dw_desc * dwc_find_desc ( struct dw_dma_chan * dwc , dma_cookie_t c )
{
struct dw_desc * desc ;
list_for_each_entry ( desc , & dwc - > active_list , desc_node )
if ( desc - > txd . cookie = = c )
return desc ;
return NULL ;
}
static u32 dwc_get_residue ( struct dw_dma_chan * dwc , dma_cookie_t cookie )
2013-01-25 13:48:03 +04:00
{
2016-03-18 17:24:53 +03:00
struct dw_desc * desc ;
2013-01-25 13:48:03 +04:00
unsigned long flags ;
u32 residue ;
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2016-03-18 17:24:53 +03:00
desc = dwc_find_desc ( dwc , cookie ) ;
if ( desc ) {
if ( desc = = dwc_first_active ( dwc ) ) {
residue = desc - > residue ;
if ( test_bit ( DW_DMA_IS_SOFT_LLP , & dwc - > flags ) & & residue )
residue - = dwc_get_sent ( dwc ) ;
} else {
residue = desc - > total_len ;
}
} else {
residue = 0 ;
}
2013-01-25 13:48:03 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
return residue ;
}
2008-07-08 22:59:42 +04:00
static enum dma_status
2010-03-27 02:50:49 +03:00
dwc_tx_status ( struct dma_chan * chan ,
dma_cookie_t cookie ,
struct dma_tx_state * txstate )
2008-07-08 22:59:42 +04:00
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
2012-03-07 02:35:27 +04:00
enum dma_status ret ;
2008-07-08 22:59:42 +04:00
2012-03-07 02:35:27 +04:00
ret = dma_cookie_status ( chan , cookie , txstate ) ;
2013-10-16 12:11:15 +04:00
if ( ret = = DMA_COMPLETE )
2013-07-15 16:04:40 +04:00
return ret ;
2008-07-08 22:59:42 +04:00
2013-07-15 16:04:40 +04:00
dwc_scan_descriptors ( to_dw_dma ( chan - > device ) , dwc ) ;
2008-07-08 22:59:42 +04:00
2013-07-15 16:04:40 +04:00
ret = dma_cookie_status ( chan , cookie , txstate ) ;
2016-03-18 17:24:53 +03:00
if ( ret = = DMA_COMPLETE )
return ret ;
dma_set_residue ( txstate , dwc_get_residue ( dwc , cookie ) ) ;
2008-07-08 22:59:42 +04:00
2016-03-18 17:24:51 +03:00
if ( test_bit ( DW_DMA_IS_PAUSED , & dwc - > flags ) & & ret = = DMA_IN_PROGRESS )
2011-04-19 04:31:32 +04:00
return DMA_PAUSED ;
2008-07-08 22:59:42 +04:00
return ret ;
}
static void dwc_issue_pending ( struct dma_chan * chan )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
2014-06-18 13:15:38 +04:00
unsigned long flags ;
2008-07-08 22:59:42 +04:00
2014-06-18 13:15:38 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
if ( list_empty ( & dwc - > active_list ) )
dwc_dostart_first_queued ( dwc ) ;
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
}
2014-09-23 18:18:14 +04:00
/*----------------------------------------------------------------------*/
static void dw_dma_off ( struct dw_dma * dw )
{
2016-03-18 17:24:48 +03:00
unsigned int i ;
2014-09-23 18:18:14 +04:00
dma_writel ( dw , CFG , 0 ) ;
channel_clear_bit ( dw , MASK . XFER , dw - > all_chan_mask ) ;
2016-01-11 16:04:29 +03:00
channel_clear_bit ( dw , MASK . BLOCK , dw - > all_chan_mask ) ;
2014-09-23 18:18:14 +04:00
channel_clear_bit ( dw , MASK . SRC_TRAN , dw - > all_chan_mask ) ;
channel_clear_bit ( dw , MASK . DST_TRAN , dw - > all_chan_mask ) ;
channel_clear_bit ( dw , MASK . ERROR , dw - > all_chan_mask ) ;
while ( dma_readl ( dw , CFG ) & DW_CFG_DMA_EN )
cpu_relax ( ) ;
for ( i = 0 ; i < dw - > dma . chancnt ; i + + )
2016-03-18 17:24:52 +03:00
clear_bit ( DW_DMA_IS_INITIALIZED , & dw - > chan [ i ] . flags ) ;
2014-09-23 18:18:14 +04:00
}
static void dw_dma_on ( struct dw_dma * dw )
{
dma_writel ( dw , CFG , DW_CFG_DMA_EN ) ;
}
2009-01-06 21:38:17 +03:00
static int dwc_alloc_chan_resources ( struct dma_chan * chan )
2008-07-08 22:59:42 +04:00
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
struct dw_dma * dw = to_dw_dma ( chan - > device ) ;
2012-06-19 14:34:05 +04:00
dev_vdbg ( chan2dev ( chan ) , " %s \n " , __func__ ) ;
2008-07-08 22:59:42 +04:00
/* ASSERT: channel is idle */
if ( dma_readl ( dw , CH_EN ) & dwc - > mask ) {
2009-01-06 21:38:21 +03:00
dev_dbg ( chan2dev ( chan ) , " DMA channel not idle? \n " ) ;
2008-07-08 22:59:42 +04:00
return - EIO ;
}
2012-03-07 02:35:47 +04:00
dma_cookie_init ( chan ) ;
2008-07-08 22:59:42 +04:00
/*
* NOTE : some controllers may have additional features that we
* need to initialize here , like " scatter-gather " ( which
* doesn ' t mean what you think it means ) , and status writeback .
*/
2016-04-08 16:22:17 +03:00
/*
* We need controller - specific data to set up slave transfers .
*/
if ( chan - > private & & ! dw_dma_filter ( chan , chan - > private ) ) {
dev_warn ( chan2dev ( chan ) , " Wrong controller-specific data \n " ) ;
return - EINVAL ;
}
2014-09-23 18:18:14 +04:00
/* Enable controller here if needed */
if ( ! dw - > in_use )
dw_dma_on ( dw ) ;
dw - > in_use | = dwc - > mask ;
2016-04-14 19:11:01 +03:00
return 0 ;
2008-07-08 22:59:42 +04:00
}
static void dwc_free_chan_resources ( struct dma_chan * chan )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
struct dw_dma * dw = to_dw_dma ( chan - > device ) ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2008-07-08 22:59:42 +04:00
LIST_HEAD ( list ) ;
2012-06-19 14:34:05 +04:00
dev_dbg ( chan2dev ( chan ) , " %s: descs allocated=%u \n " , __func__ ,
2008-07-08 22:59:42 +04:00
dwc - > descs_allocated ) ;
/* ASSERT: channel is idle */
BUG_ON ( ! list_empty ( & dwc - > active_list ) ) ;
BUG_ON ( ! list_empty ( & dwc - > queue ) ) ;
BUG_ON ( dma_readl ( to_dw_dma ( chan - > device ) , CH_EN ) & dwc - > mask ) ;
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2016-04-08 16:22:17 +03:00
/* Clear custom channel configuration */
dwc - > src_id = 0 ;
dwc - > dst_id = 0 ;
2016-03-18 17:24:41 +03:00
dwc - > m_master = 0 ;
dwc - > p_master = 0 ;
2016-04-08 16:22:17 +03:00
2016-03-18 17:24:52 +03:00
clear_bit ( DW_DMA_IS_INITIALIZED , & dwc - > flags ) ;
2008-07-08 22:59:42 +04:00
/* Disable interrupts */
channel_clear_bit ( dw , MASK . XFER , dwc - > mask ) ;
2016-01-11 16:04:29 +03:00
channel_clear_bit ( dw , MASK . BLOCK , dwc - > mask ) ;
2008-07-08 22:59:42 +04:00
channel_clear_bit ( dw , MASK . ERROR , dwc - > mask ) ;
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2008-07-08 22:59:42 +04:00
2014-09-23 18:18:14 +04:00
/* Disable controller in case it was a last user */
dw - > in_use & = ~ dwc - > mask ;
if ( ! dw - > in_use )
dw_dma_off ( dw ) ;
2012-06-19 14:34:05 +04:00
dev_vdbg ( chan2dev ( chan ) , " %s: done \n " , __func__ ) ;
2008-07-08 22:59:42 +04:00
}
2009-04-01 17:47:02 +04:00
/* --------------------- Cyclic DMA API extensions -------------------- */
/**
* dw_dma_cyclic_start - start the cyclic DMA transfer
* @ chan : the DMA channel to start
*
* Must be called with soft interrupts disabled . Returns zero on success or
* - errno on failure .
*/
int dw_dma_cyclic_start ( struct dma_chan * chan )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
2016-02-10 16:59:42 +03:00
struct dw_dma * dw = to_dw_dma ( chan - > device ) ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2009-04-01 17:47:02 +04:00
if ( ! test_bit ( DW_DMA_IS_CYCLIC , & dwc - > flags ) ) {
dev_err ( chan2dev ( & dwc - > chan ) , " missing prep for cyclic DMA \n " ) ;
return - ENODEV ;
}
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2016-02-10 16:59:42 +03:00
/* Enable interrupts to perform cyclic transfer */
channel_set_bit ( dw , MASK . BLOCK , dwc - > mask ) ;
2016-01-11 16:04:28 +03:00
dwc_dostart ( dwc , dwc - > cdesc - > desc [ 0 ] ) ;
2016-02-10 16:59:42 +03:00
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2009-04-01 17:47:02 +04:00
return 0 ;
}
EXPORT_SYMBOL ( dw_dma_cyclic_start ) ;
/**
* dw_dma_cyclic_stop - stop the cyclic DMA transfer
* @ chan : the DMA channel to stop
*
* Must be called with soft interrupts disabled .
*/
void dw_dma_cyclic_stop ( struct dma_chan * chan )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
struct dw_dma * dw = to_dw_dma ( dwc - > chan . device ) ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2009-04-01 17:47:02 +04:00
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2009-04-01 17:47:02 +04:00
2012-06-19 14:46:32 +04:00
dwc_chan_disable ( dw , dwc ) ;
2009-04-01 17:47:02 +04:00
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2009-04-01 17:47:02 +04:00
}
EXPORT_SYMBOL ( dw_dma_cyclic_stop ) ;
/**
* dw_dma_cyclic_prep - prepare the cyclic DMA transfer
* @ chan : the DMA channel to prepare
* @ buf_addr : physical DMA address where the buffer starts
* @ buf_len : total number of bytes for the entire buffer
* @ period_len : number of bytes for each period
* @ direction : transfer direction , to or from device
*
* Must be called before trying to start the transfer . Returns a valid struct
* dw_cyclic_desc if successful or an ERR_PTR ( - errno ) if not successful .
*/
struct dw_cyclic_desc * dw_dma_cyclic_prep ( struct dma_chan * chan ,
dma_addr_t buf_addr , size_t buf_len , size_t period_len ,
2011-10-13 21:04:23 +04:00
enum dma_transfer_direction direction )
2009-04-01 17:47:02 +04:00
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
2012-02-01 14:42:26 +04:00
struct dma_slave_config * sconfig = & dwc - > dma_sconfig ;
2009-04-01 17:47:02 +04:00
struct dw_cyclic_desc * cdesc ;
struct dw_cyclic_desc * retval = NULL ;
struct dw_desc * desc ;
struct dw_desc * last = NULL ;
2016-03-18 17:24:44 +03:00
u8 lms = DWC_LLP_LMS ( dwc - > m_master ) ;
2009-04-01 17:47:02 +04:00
unsigned long was_cyclic ;
unsigned int reg_width ;
unsigned int periods ;
unsigned int i ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2009-04-01 17:47:02 +04:00
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2012-09-21 16:05:49 +04:00
if ( dwc - > nollp ) {
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
dev_dbg ( chan2dev ( & dwc - > chan ) ,
" channel doesn't support LLP transfers \n " ) ;
return ERR_PTR ( - EINVAL ) ;
}
2009-04-01 17:47:02 +04:00
if ( ! list_empty ( & dwc - > queue ) | | ! list_empty ( & dwc - > active_list ) ) {
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2009-04-01 17:47:02 +04:00
dev_dbg ( chan2dev ( & dwc - > chan ) ,
" queue and/or active list are not empty \n " ) ;
return ERR_PTR ( - EBUSY ) ;
}
was_cyclic = test_and_set_bit ( DW_DMA_IS_CYCLIC , & dwc - > flags ) ;
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2009-04-01 17:47:02 +04:00
if ( was_cyclic ) {
dev_dbg ( chan2dev ( & dwc - > chan ) ,
" channel already prepared for cyclic DMA \n " ) ;
return ERR_PTR ( - EBUSY ) ;
}
retval = ERR_PTR ( - EINVAL ) ;
2012-02-01 14:42:26 +04:00
2013-01-10 12:52:58 +04:00
if ( unlikely ( ! is_slave_direction ( direction ) ) )
goto out_err ;
2013-01-10 12:53:03 +04:00
dwc - > direction = direction ;
2012-02-01 14:42:26 +04:00
if ( direction = = DMA_MEM_TO_DEV )
reg_width = __ffs ( sconfig - > dst_addr_width ) ;
else
reg_width = __ffs ( sconfig - > src_addr_width ) ;
2009-04-01 17:47:02 +04:00
periods = buf_len / period_len ;
/* Check for too big/unaligned periods and unaligned DMA buffer. */
2012-09-21 16:05:47 +04:00
if ( period_len > ( dwc - > block_size < < reg_width ) )
2009-04-01 17:47:02 +04:00
goto out_err ;
if ( unlikely ( period_len & ( ( 1 < < reg_width ) - 1 ) ) )
goto out_err ;
if ( unlikely ( buf_addr & ( ( 1 < < reg_width ) - 1 ) ) )
goto out_err ;
retval = ERR_PTR ( - ENOMEM ) ;
cdesc = kzalloc ( sizeof ( struct dw_cyclic_desc ) , GFP_KERNEL ) ;
if ( ! cdesc )
goto out_err ;
cdesc - > desc = kzalloc ( sizeof ( struct dw_desc * ) * periods , GFP_KERNEL ) ;
if ( ! cdesc - > desc )
goto out_err_alloc ;
for ( i = 0 ; i < periods ; i + + ) {
desc = dwc_desc_get ( dwc ) ;
if ( ! desc )
goto out_err_desc_get ;
switch ( direction ) {
2011-10-13 21:04:23 +04:00
case DMA_MEM_TO_DEV :
2016-03-18 17:24:43 +03:00
lli_write ( desc , dar , sconfig - > dst_addr ) ;
lli_write ( desc , sar , buf_addr + period_len * i ) ;
lli_write ( desc , ctllo , ( DWC_DEFAULT_CTLLO ( chan )
| DWC_CTLL_DST_WIDTH ( reg_width )
| DWC_CTLL_SRC_WIDTH ( reg_width )
| DWC_CTLL_DST_FIX
| DWC_CTLL_SRC_INC
| DWC_CTLL_INT_EN ) ) ;
lli_set ( desc , ctllo , sconfig - > device_fc ?
DWC_CTLL_FC ( DW_DMA_FC_P_M2P ) :
DWC_CTLL_FC ( DW_DMA_FC_D_M2P ) ) ;
2012-02-01 14:42:26 +04:00
2009-04-01 17:47:02 +04:00
break ;
2011-10-13 21:04:23 +04:00
case DMA_DEV_TO_MEM :
2016-03-18 17:24:43 +03:00
lli_write ( desc , dar , buf_addr + period_len * i ) ;
lli_write ( desc , sar , sconfig - > src_addr ) ;
lli_write ( desc , ctllo , ( DWC_DEFAULT_CTLLO ( chan )
| DWC_CTLL_SRC_WIDTH ( reg_width )
| DWC_CTLL_DST_WIDTH ( reg_width )
| DWC_CTLL_DST_INC
| DWC_CTLL_SRC_FIX
| DWC_CTLL_INT_EN ) ) ;
lli_set ( desc , ctllo , sconfig - > device_fc ?
DWC_CTLL_FC ( DW_DMA_FC_P_P2M ) :
DWC_CTLL_FC ( DW_DMA_FC_D_P2M ) ) ;
2012-02-01 14:42:26 +04:00
2009-04-01 17:47:02 +04:00
break ;
default :
break ;
}
2016-03-18 17:24:43 +03:00
lli_write ( desc , ctlhi , period_len > > reg_width ) ;
2009-04-01 17:47:02 +04:00
cdesc - > desc [ i ] = desc ;
2013-01-16 17:48:50 +04:00
if ( last )
2016-03-18 17:24:44 +03:00
lli_write ( last , llp , desc - > txd . phys | lms ) ;
2009-04-01 17:47:02 +04:00
last = desc ;
}
2013-03-26 18:53:54 +04:00
/* Let's make a cyclic list */
2016-03-18 17:24:44 +03:00
lli_write ( last , llp , cdesc - > desc [ 0 ] - > txd . phys | lms ) ;
2009-04-01 17:47:02 +04:00
2014-01-13 16:04:50 +04:00
dev_dbg ( chan2dev ( & dwc - > chan ) ,
" cyclic prepared buf %pad len %zu period %zu periods %d \n " ,
& buf_addr , buf_len , period_len , periods ) ;
2009-04-01 17:47:02 +04:00
cdesc - > periods = periods ;
dwc - > cdesc = cdesc ;
return cdesc ;
out_err_desc_get :
while ( i - - )
dwc_desc_put ( dwc , cdesc - > desc [ i ] ) ;
out_err_alloc :
kfree ( cdesc ) ;
out_err :
clear_bit ( DW_DMA_IS_CYCLIC , & dwc - > flags ) ;
return ( struct dw_cyclic_desc * ) retval ;
}
EXPORT_SYMBOL ( dw_dma_cyclic_prep ) ;
/**
* dw_dma_cyclic_free - free a prepared cyclic DMA transfer
* @ chan : the DMA channel to free
*/
void dw_dma_cyclic_free ( struct dma_chan * chan )
{
struct dw_dma_chan * dwc = to_dw_dma_chan ( chan ) ;
struct dw_dma * dw = to_dw_dma ( dwc - > chan . device ) ;
struct dw_cyclic_desc * cdesc = dwc - > cdesc ;
2016-03-18 17:24:48 +03:00
unsigned int i ;
2011-04-15 14:33:35 +04:00
unsigned long flags ;
2009-04-01 17:47:02 +04:00
2012-06-19 14:34:05 +04:00
dev_dbg ( chan2dev ( & dwc - > chan ) , " %s \n " , __func__ ) ;
2009-04-01 17:47:02 +04:00
if ( ! cdesc )
return ;
2011-04-15 14:33:35 +04:00
spin_lock_irqsave ( & dwc - > lock , flags ) ;
2009-04-01 17:47:02 +04:00
2012-06-19 14:46:32 +04:00
dwc_chan_disable ( dw , dwc ) ;
2009-04-01 17:47:02 +04:00
2016-01-11 16:04:29 +03:00
dma_writel ( dw , CLEAR . BLOCK , dwc - > mask ) ;
2009-04-01 17:47:02 +04:00
dma_writel ( dw , CLEAR . ERROR , dwc - > mask ) ;
dma_writel ( dw , CLEAR . XFER , dwc - > mask ) ;
2011-04-15 14:33:35 +04:00
spin_unlock_irqrestore ( & dwc - > lock , flags ) ;
2009-04-01 17:47:02 +04:00
for ( i = 0 ; i < cdesc - > periods ; i + + )
dwc_desc_put ( dwc , cdesc - > desc [ i ] ) ;
kfree ( cdesc - > desc ) ;
kfree ( cdesc ) ;
2016-03-18 17:24:54 +03:00
dwc - > cdesc = NULL ;
2009-04-01 17:47:02 +04:00
clear_bit ( DW_DMA_IS_CYCLIC , & dwc - > flags ) ;
}
EXPORT_SYMBOL ( dw_dma_cyclic_free ) ;
2008-07-08 22:59:42 +04:00
/*----------------------------------------------------------------------*/
2016-04-27 14:15:40 +03:00
int dw_dma_probe ( struct dw_dma_chip * chip )
2012-10-16 08:19:17 +04:00
{
2016-04-27 14:15:40 +03:00
struct dw_dma_platform_data * pdata ;
2008-07-08 22:59:42 +04:00
struct dw_dma * dw ;
2015-10-13 20:09:17 +03:00
bool autocfg = false ;
2012-09-21 16:05:46 +04:00
unsigned int dw_params ;
2016-03-18 17:24:48 +03:00
unsigned int i ;
2008-07-08 22:59:42 +04:00
int err ;
2014-03-05 17:48:12 +04:00
dw = devm_kzalloc ( chip - > dev , sizeof ( * dw ) , GFP_KERNEL ) ;
if ( ! dw )
return - ENOMEM ;
2016-04-27 14:15:39 +03:00
dw - > pdata = devm_kzalloc ( chip - > dev , sizeof ( * dw - > pdata ) , GFP_KERNEL ) ;
if ( ! dw - > pdata )
return - ENOMEM ;
2014-03-05 17:48:12 +04:00
dw - > regs = chip - > regs ;
chip - > dw = dw ;
2014-11-05 19:34:48 +03:00
pm_runtime_get_sync ( chip - > dev ) ;
2016-04-27 14:15:40 +03:00
if ( ! chip - > pdata ) {
2016-03-18 17:24:46 +03:00
dw_params = dma_readl ( dw , DW_PARAMS ) ;
2015-10-13 20:09:17 +03:00
dev_dbg ( chip - > dev , " DW_PARAMS: 0x%08x \n " , dw_params ) ;
2012-09-21 16:05:46 +04:00
2015-10-13 20:09:17 +03:00
autocfg = dw_params > > DW_PARAMS_EN & 1 ;
if ( ! autocfg ) {
err = - EINVAL ;
goto err_pdata ;
}
2013-01-09 12:17:01 +04:00
2016-04-27 14:15:39 +03:00
/* Reassign the platform data pointer */
pdata = dw - > pdata ;
2013-01-09 12:17:01 +04:00
2015-10-13 20:09:17 +03:00
/* Get hardware configuration parameters */
pdata - > nr_channels = ( dw_params > > DW_PARAMS_NR_CHAN & 7 ) + 1 ;
pdata - > nr_masters = ( dw_params > > DW_PARAMS_NR_MASTER & 3 ) + 1 ;
for ( i = 0 ; i < pdata - > nr_masters ; i + + ) {
pdata - > data_width [ i ] =
2016-04-27 14:15:38 +03:00
4 < < ( dw_params > > DW_PARAMS_DATA_WIDTH ( i ) & 3 ) ;
2015-10-13 20:09:17 +03:00
}
2016-04-27 14:15:39 +03:00
pdata - > block_size = dma_readl ( dw , MAX_BLK_SIZE ) ;
2015-10-13 20:09:17 +03:00
2013-01-09 12:17:01 +04:00
/* Fill platform data with the default values */
pdata - > is_private = true ;
2015-10-13 20:09:19 +03:00
pdata - > is_memcpy = true ;
2013-01-09 12:17:01 +04:00
pdata - > chan_allocation_order = CHAN_ALLOCATION_ASCENDING ;
pdata - > chan_priority = CHAN_PRIORITY_ASCENDING ;
2016-04-27 14:15:40 +03:00
} else if ( chip - > pdata - > nr_channels > DW_DMA_MAX_NR_CHANNELS ) {
2014-05-08 13:01:49 +04:00
err = - EINVAL ;
goto err_pdata ;
2016-04-27 14:15:39 +03:00
} else {
2016-04-27 14:15:40 +03:00
memcpy ( dw - > pdata , chip - > pdata , sizeof ( * dw - > pdata ) ) ;
2016-04-27 14:15:39 +03:00
/* Reassign the platform data pointer */
pdata = dw - > pdata ;
2014-05-08 13:01:49 +04:00
}
2013-01-09 12:17:01 +04:00
2015-10-13 20:09:17 +03:00
dw - > chan = devm_kcalloc ( chip - > dev , pdata - > nr_channels , sizeof ( * dw - > chan ) ,
2014-03-05 17:48:12 +04:00
GFP_KERNEL ) ;
2014-05-08 13:01:49 +04:00
if ( ! dw - > chan ) {
err = - ENOMEM ;
goto err_pdata ;
}
2008-07-08 22:59:42 +04:00
2012-06-19 14:34:06 +04:00
/* Calculate all channel mask before DMA setup */
2015-10-13 20:09:17 +03:00
dw - > all_chan_mask = ( 1 < < pdata - > nr_channels ) - 1 ;
2012-06-19 14:34:06 +04:00
2013-03-26 18:53:54 +04:00
/* Force dma off, just in case */
2008-07-08 22:59:42 +04:00
dw_dma_off ( dw ) ;
2013-03-26 18:53:54 +04:00
/* Create a pool of consistent memory blocks for hardware descriptors */
2013-06-05 16:26:45 +04:00
dw - > desc_pool = dmam_pool_create ( " dw_dmac_desc_pool " , chip - > dev ,
2013-01-16 17:48:50 +04:00
sizeof ( struct dw_desc ) , 4 , 0 ) ;
if ( ! dw - > desc_pool ) {
2013-06-05 16:26:45 +04:00
dev_err ( chip - > dev , " No memory for descriptors dma pool \n " ) ;
2014-05-08 13:01:49 +04:00
err = - ENOMEM ;
goto err_pdata ;
2013-01-16 17:48:50 +04:00
}
2008-07-08 22:59:42 +04:00
tasklet_init ( & dw - > tasklet , dw_dma_tasklet , ( unsigned long ) dw ) ;
2014-05-07 11:56:24 +04:00
err = request_irq ( chip - > irq , dw_dma_interrupt , IRQF_SHARED ,
" dw_dmac " , dw ) ;
if ( err )
2014-05-08 13:01:49 +04:00
goto err_pdata ;
2014-05-07 11:56:24 +04:00
2008-07-08 22:59:42 +04:00
INIT_LIST_HEAD ( & dw - > dma . channels ) ;
2015-10-13 20:09:17 +03:00
for ( i = 0 ; i < pdata - > nr_channels ; i + + ) {
2008-07-08 22:59:42 +04:00
struct dw_dma_chan * dwc = & dw - > chan [ i ] ;
dwc - > chan . device = & dw - > dma ;
2012-03-07 02:35:47 +04:00
dma_cookie_init ( & dwc - > chan ) ;
2011-03-03 13:17:21 +03:00
if ( pdata - > chan_allocation_order = = CHAN_ALLOCATION_ASCENDING )
list_add_tail ( & dwc - > chan . device_node ,
& dw - > dma . channels ) ;
else
list_add ( & dwc - > chan . device_node , & dw - > dma . channels ) ;
2008-07-08 22:59:42 +04:00
2011-03-03 13:17:22 +03:00
/* 7 is highest priority & 0 is lowest. */
if ( pdata - > chan_priority = = CHAN_PRIORITY_ASCENDING )
2015-10-13 20:09:17 +03:00
dwc - > priority = pdata - > nr_channels - i - 1 ;
2011-03-03 13:17:22 +03:00
else
dwc - > priority = i ;
2008-07-08 22:59:42 +04:00
dwc - > ch_regs = & __dw_regs ( dw ) - > CHAN [ i ] ;
spin_lock_init ( & dwc - > lock ) ;
dwc - > mask = 1 < < i ;
INIT_LIST_HEAD ( & dwc - > active_list ) ;
INIT_LIST_HEAD ( & dwc - > queue ) ;
channel_clear_bit ( dw , CH_EN , dwc - > mask ) ;
2012-09-21 16:05:47 +04:00
2013-01-10 12:53:03 +04:00
dwc - > direction = DMA_TRANS_NONE ;
2012-09-21 16:05:48 +04:00
2013-03-26 18:53:54 +04:00
/* Hardware configuration */
2012-09-21 16:05:49 +04:00
if ( autocfg ) {
2015-09-28 18:57:03 +03:00
unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1 ;
2016-03-18 17:24:46 +03:00
void __iomem * addr = & __dw_regs ( dw ) - > DWC_PARAMS [ r ] ;
unsigned int dwc_params = dma_readl_native ( addr ) ;
2012-09-21 16:05:49 +04:00
2013-06-05 16:26:45 +04:00
dev_dbg ( chip - > dev , " DWC_PARAMS[%d]: 0x%08x \n " , i ,
dwc_params ) ;
2013-01-18 19:10:59 +04:00
2014-01-13 16:04:48 +04:00
/*
* Decode maximum block size for given channel . The
2012-09-21 16:05:47 +04:00
* stored 4 bit value represents blocks from 0x00 for 3
2014-01-13 16:04:48 +04:00
* up to 0x0a for 4095.
*/
2012-09-21 16:05:47 +04:00
dwc - > block_size =
2016-04-27 14:15:39 +03:00
( 4 < < ( ( pdata - > block_size > > 4 * i ) & 0xf ) ) - 1 ;
2012-09-21 16:05:49 +04:00
dwc - > nollp =
( dwc_params > > DWC_PARAMS_MBLK_EN & 0x1 ) = = 0 ;
} else {
2012-09-21 16:05:47 +04:00
dwc - > block_size = pdata - > block_size ;
2012-09-21 16:05:49 +04:00
/* Check if channel supports multi block transfer */
2016-03-18 17:24:44 +03:00
channel_writel ( dwc , LLP , DWC_LLP_LOC ( 0xffffffff ) ) ;
dwc - > nollp = DWC_LLP_LOC ( channel_readl ( dwc , LLP ) ) = = 0 ;
2012-09-21 16:05:49 +04:00
channel_writel ( dwc , LLP , 0 ) ;
}
2008-07-08 22:59:42 +04:00
}
2012-06-19 14:34:06 +04:00
/* Clear all interrupts on all channels. */
2008-07-08 22:59:42 +04:00
dma_writel ( dw , CLEAR . XFER , dw - > all_chan_mask ) ;
2012-06-19 14:34:07 +04:00
dma_writel ( dw , CLEAR . BLOCK , dw - > all_chan_mask ) ;
2008-07-08 22:59:42 +04:00
dma_writel ( dw , CLEAR . SRC_TRAN , dw - > all_chan_mask ) ;
dma_writel ( dw , CLEAR . DST_TRAN , dw - > all_chan_mask ) ;
dma_writel ( dw , CLEAR . ERROR , dw - > all_chan_mask ) ;
2015-10-13 20:09:19 +03:00
/* Set capabilities */
2008-07-08 22:59:42 +04:00
dma_cap_set ( DMA_SLAVE , dw - > dma . cap_mask ) ;
2011-01-21 17:11:54 +03:00
if ( pdata - > is_private )
dma_cap_set ( DMA_PRIVATE , dw - > dma . cap_mask ) ;
2015-10-13 20:09:19 +03:00
if ( pdata - > is_memcpy )
dma_cap_set ( DMA_MEMCPY , dw - > dma . cap_mask ) ;
2013-06-05 16:26:45 +04:00
dw - > dma . dev = chip - > dev ;
2008-07-08 22:59:42 +04:00
dw - > dma . device_alloc_chan_resources = dwc_alloc_chan_resources ;
dw - > dma . device_free_chan_resources = dwc_free_chan_resources ;
dw - > dma . device_prep_dma_memcpy = dwc_prep_dma_memcpy ;
dw - > dma . device_prep_slave_sg = dwc_prep_slave_sg ;
2015-01-02 17:17:24 +03:00
2014-11-17 16:42:12 +03:00
dw - > dma . device_config = dwc_config ;
dw - > dma . device_pause = dwc_pause ;
dw - > dma . device_resume = dwc_resume ;
dw - > dma . device_terminate_all = dwc_terminate_all ;
2008-07-08 22:59:42 +04:00
2010-03-27 02:50:49 +03:00
dw - > dma . device_tx_status = dwc_tx_status ;
2008-07-08 22:59:42 +04:00
dw - > dma . device_issue_pending = dwc_issue_pending ;
2015-01-02 17:17:24 +03:00
/* DMA capabilities */
dw - > dma . src_addr_widths = DW_DMA_BUSWIDTHS ;
dw - > dma . dst_addr_widths = DW_DMA_BUSWIDTHS ;
dw - > dma . directions = BIT ( DMA_DEV_TO_MEM ) | BIT ( DMA_MEM_TO_DEV ) |
BIT ( DMA_MEM_TO_MEM ) ;
dw - > dma . residue_granularity = DMA_RESIDUE_GRANULARITY_BURST ;
2014-05-08 13:01:50 +04:00
err = dma_async_device_register ( & dw - > dma ) ;
if ( err )
goto err_dma_register ;
2013-06-05 16:26:45 +04:00
dev_info ( chip - > dev , " DesignWare DMA Controller, %d channels \n " ,
2015-10-13 20:09:17 +03:00
pdata - > nr_channels ) ;
2008-07-08 22:59:42 +04:00
2014-11-05 19:34:48 +03:00
pm_runtime_put_sync_suspend ( chip - > dev ) ;
2008-07-08 22:59:42 +04:00
return 0 ;
2014-05-08 13:01:49 +04:00
2014-05-08 13:01:50 +04:00
err_dma_register :
free_irq ( chip - > irq , dw ) ;
2014-05-08 13:01:49 +04:00
err_pdata :
2014-11-05 19:34:48 +03:00
pm_runtime_put_sync_suspend ( chip - > dev ) ;
2014-05-08 13:01:49 +04:00
return err ;
2008-07-08 22:59:42 +04:00
}
2013-06-05 16:26:45 +04:00
EXPORT_SYMBOL_GPL ( dw_dma_probe ) ;
2008-07-08 22:59:42 +04:00
2013-06-05 16:26:45 +04:00
int dw_dma_remove ( struct dw_dma_chip * chip )
2008-07-08 22:59:42 +04:00
{
2013-06-05 16:26:45 +04:00
struct dw_dma * dw = chip - > dw ;
2008-07-08 22:59:42 +04:00
struct dw_dma_chan * dwc , * _dwc ;
2014-11-05 19:34:48 +03:00
pm_runtime_get_sync ( chip - > dev ) ;
2008-07-08 22:59:42 +04:00
dw_dma_off ( dw ) ;
dma_async_device_unregister ( & dw - > dma ) ;
2014-05-07 11:56:24 +04:00
free_irq ( chip - > irq , dw ) ;
2008-07-08 22:59:42 +04:00
tasklet_kill ( & dw - > tasklet ) ;
list_for_each_entry_safe ( dwc , _dwc , & dw - > dma . channels ,
chan . device_node ) {
list_del ( & dwc - > chan . device_node ) ;
channel_clear_bit ( dw , CH_EN , dwc - > mask ) ;
}
2014-11-05 19:34:48 +03:00
pm_runtime_put_sync_suspend ( chip - > dev ) ;
2008-07-08 22:59:42 +04:00
return 0 ;
}
2013-06-05 16:26:45 +04:00
EXPORT_SYMBOL_GPL ( dw_dma_remove ) ;
2008-07-08 22:59:42 +04:00
2014-09-23 18:18:13 +04:00
int dw_dma_disable ( struct dw_dma_chip * chip )
2008-07-08 22:59:42 +04:00
{
2013-06-05 16:26:45 +04:00
struct dw_dma * dw = chip - > dw ;
2008-07-08 22:59:42 +04:00
2012-10-18 18:34:10 +04:00
dw_dma_off ( dw ) ;
2008-07-08 22:59:42 +04:00
return 0 ;
}
2014-09-23 18:18:13 +04:00
EXPORT_SYMBOL_GPL ( dw_dma_disable ) ;
2008-07-08 22:59:42 +04:00
2014-09-23 18:18:13 +04:00
int dw_dma_enable ( struct dw_dma_chip * chip )
2008-07-08 22:59:42 +04:00
{
2013-06-05 16:26:45 +04:00
struct dw_dma * dw = chip - > dw ;
2008-07-08 22:59:42 +04:00
2014-09-23 18:18:12 +04:00
dw_dma_on ( dw ) ;
2008-07-08 22:59:42 +04:00
return 0 ;
}
2014-09-23 18:18:13 +04:00
EXPORT_SYMBOL_GPL ( dw_dma_enable ) ;
2008-07-08 22:59:42 +04:00
MODULE_LICENSE ( " GPL v2 " ) ;
2013-06-05 16:26:45 +04:00
MODULE_DESCRIPTION ( " Synopsys DesignWare DMA Controller core driver " ) ;
2011-05-18 18:49:24 +04:00
MODULE_AUTHOR ( " Haavard Skinnemoen (Atmel) " ) ;
2015-07-18 02:23:50 +03:00
MODULE_AUTHOR ( " Viresh Kumar <vireshk@kernel.org> " ) ;