2012-04-13 15:10:24 +04:00
/*
* OMAP DMAengine support
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
2013-11-02 21:07:09 +04:00
# include <linux/delay.h>
2012-04-13 15:10:24 +04:00
# include <linux/dmaengine.h>
# include <linux/dma-mapping.h>
# include <linux/err.h>
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/list.h>
# include <linux/module.h>
# include <linux/omap-dma.h>
# include <linux/platform_device.h>
# include <linux/slab.h>
# include <linux/spinlock.h>
2013-02-26 22:27:24 +04:00
# include <linux/of_dma.h>
# include <linux/of_device.h>
2012-04-13 15:10:24 +04:00
# include "virt-dma.h"
2012-08-28 04:43:01 +04:00
2012-04-13 15:10:24 +04:00
struct omap_dmadev {
struct dma_device ddev ;
spinlock_t lock ;
struct tasklet_struct task ;
struct list_head pending ;
2013-11-02 17:00:03 +04:00
struct omap_system_dma_plat_info * plat ;
2012-04-13 15:10:24 +04:00
} ;
struct omap_chan {
struct virt_dma_chan vc ;
struct list_head node ;
2013-11-02 17:00:03 +04:00
struct omap_system_dma_plat_info * plat ;
2012-04-13 15:10:24 +04:00
struct dma_slave_config cfg ;
unsigned dma_sig ;
2012-06-21 13:40:15 +04:00
bool cyclic ;
2012-09-14 16:05:45 +04:00
bool paused ;
2012-04-13 15:10:24 +04:00
int dma_ch ;
struct omap_desc * desc ;
unsigned sgidx ;
} ;
struct omap_sg {
dma_addr_t addr ;
uint32_t en ; /* number of elements (24-bit) */
uint32_t fn ; /* number of frames (16-bit) */
} ;
struct omap_desc {
struct virt_dma_desc vd ;
enum dma_transfer_direction dir ;
dma_addr_t dev_addr ;
2012-06-18 19:45:19 +04:00
int16_t fi ; /* for OMAP_DMA_SYNC_PACKET */
2013-11-02 23:57:06 +04:00
uint8_t es ; /* CSDP_DATA_TYPE_xxx */
2013-11-02 23:16:09 +04:00
uint32_t ccr ; /* CCR value */
2013-11-06 21:12:30 +04:00
uint16_t clnk_ctrl ; /* CLNK_CTRL value */
2013-11-02 21:07:09 +04:00
uint16_t cicr ; /* CICR value */
2013-11-02 22:51:53 +04:00
uint32_t csdp ; /* CSDP value */
2012-04-13 15:10:24 +04:00
unsigned sglen ;
struct omap_sg sg [ 0 ] ;
} ;
2013-11-02 23:57:06 +04:00
enum {
CCR_FS = BIT ( 5 ) ,
CCR_READ_PRIORITY = BIT ( 6 ) ,
CCR_ENABLE = BIT ( 7 ) ,
CCR_AUTO_INIT = BIT ( 8 ) , /* OMAP1 only */
CCR_REPEAT = BIT ( 9 ) , /* OMAP1 only */
CCR_OMAP31_DISABLE = BIT ( 10 ) , /* OMAP1 only */
CCR_SUSPEND_SENSITIVE = BIT ( 8 ) , /* OMAP2+ only */
CCR_RD_ACTIVE = BIT ( 9 ) , /* OMAP2+ only */
CCR_WR_ACTIVE = BIT ( 10 ) , /* OMAP2+ only */
CCR_SRC_AMODE_CONSTANT = 0 < < 12 ,
CCR_SRC_AMODE_POSTINC = 1 < < 12 ,
CCR_SRC_AMODE_SGLIDX = 2 < < 12 ,
CCR_SRC_AMODE_DBLIDX = 3 < < 12 ,
CCR_DST_AMODE_CONSTANT = 0 < < 14 ,
CCR_DST_AMODE_POSTINC = 1 < < 14 ,
CCR_DST_AMODE_SGLIDX = 2 < < 14 ,
CCR_DST_AMODE_DBLIDX = 3 < < 14 ,
CCR_CONSTANT_FILL = BIT ( 16 ) ,
CCR_TRANSPARENT_COPY = BIT ( 17 ) ,
CCR_BS = BIT ( 18 ) ,
CCR_SUPERVISOR = BIT ( 22 ) ,
CCR_PREFETCH = BIT ( 23 ) ,
CCR_TRIGGER_SRC = BIT ( 24 ) ,
CCR_BUFFERING_DISABLE = BIT ( 25 ) ,
CCR_WRITE_PRIORITY = BIT ( 26 ) ,
CCR_SYNC_ELEMENT = 0 ,
CCR_SYNC_FRAME = CCR_FS ,
CCR_SYNC_BLOCK = CCR_BS ,
CCR_SYNC_PACKET = CCR_BS | CCR_FS ,
CSDP_DATA_TYPE_8 = 0 ,
CSDP_DATA_TYPE_16 = 1 ,
CSDP_DATA_TYPE_32 = 2 ,
CSDP_SRC_PORT_EMIFF = 0 < < 2 , /* OMAP1 only */
CSDP_SRC_PORT_EMIFS = 1 < < 2 , /* OMAP1 only */
CSDP_SRC_PORT_OCP_T1 = 2 < < 2 , /* OMAP1 only */
CSDP_SRC_PORT_TIPB = 3 < < 2 , /* OMAP1 only */
CSDP_SRC_PORT_OCP_T2 = 4 < < 2 , /* OMAP1 only */
CSDP_SRC_PORT_MPUI = 5 < < 2 , /* OMAP1 only */
CSDP_SRC_PACKED = BIT ( 6 ) ,
CSDP_SRC_BURST_1 = 0 < < 7 ,
CSDP_SRC_BURST_16 = 1 < < 7 ,
CSDP_SRC_BURST_32 = 2 < < 7 ,
CSDP_SRC_BURST_64 = 3 < < 7 ,
CSDP_DST_PORT_EMIFF = 0 < < 9 , /* OMAP1 only */
CSDP_DST_PORT_EMIFS = 1 < < 9 , /* OMAP1 only */
CSDP_DST_PORT_OCP_T1 = 2 < < 9 , /* OMAP1 only */
CSDP_DST_PORT_TIPB = 3 < < 9 , /* OMAP1 only */
CSDP_DST_PORT_OCP_T2 = 4 < < 9 , /* OMAP1 only */
CSDP_DST_PORT_MPUI = 5 < < 9 , /* OMAP1 only */
CSDP_DST_PACKED = BIT ( 13 ) ,
CSDP_DST_BURST_1 = 0 < < 14 ,
CSDP_DST_BURST_16 = 1 < < 14 ,
CSDP_DST_BURST_32 = 2 < < 14 ,
CSDP_DST_BURST_64 = 3 < < 14 ,
CICR_TOUT_IE = BIT ( 0 ) , /* OMAP1 only */
CICR_DROP_IE = BIT ( 1 ) ,
CICR_HALF_IE = BIT ( 2 ) ,
CICR_FRAME_IE = BIT ( 3 ) ,
CICR_LAST_IE = BIT ( 4 ) ,
CICR_BLOCK_IE = BIT ( 5 ) ,
CICR_PKT_IE = BIT ( 7 ) , /* OMAP2+ only */
CICR_TRANS_ERR_IE = BIT ( 8 ) , /* OMAP2+ only */
CICR_SUPERVISOR_ERR_IE = BIT ( 10 ) , /* OMAP2+ only */
CICR_MISALIGNED_ERR_IE = BIT ( 11 ) , /* OMAP2+ only */
CICR_DRAIN_IE = BIT ( 12 ) , /* OMAP2+ only */
CICR_SUPER_BLOCK_IE = BIT ( 14 ) , /* OMAP2+ only */
CLNK_CTRL_ENABLE_LNK = BIT ( 15 ) ,
} ;
2012-04-13 15:10:24 +04:00
static const unsigned es_bytes [ ] = {
2013-11-02 23:57:06 +04:00
[ CSDP_DATA_TYPE_8 ] = 1 ,
[ CSDP_DATA_TYPE_16 ] = 2 ,
[ CSDP_DATA_TYPE_32 ] = 4 ,
2012-04-13 15:10:24 +04:00
} ;
2013-02-26 22:27:24 +04:00
static struct of_dma_filter_info omap_dma_info = {
. filter_fn = omap_dma_filter_fn ,
} ;
2012-04-13 15:10:24 +04:00
static inline struct omap_dmadev * to_omap_dma_dev ( struct dma_device * d )
{
return container_of ( d , struct omap_dmadev , ddev ) ;
}
static inline struct omap_chan * to_omap_dma_chan ( struct dma_chan * c )
{
return container_of ( c , struct omap_chan , vc . chan ) ;
}
static inline struct omap_desc * to_omap_dma_desc ( struct dma_async_tx_descriptor * t )
{
return container_of ( t , struct omap_desc , vd . tx ) ;
}
static void omap_dma_desc_free ( struct virt_dma_desc * vd )
{
kfree ( container_of ( vd , struct omap_desc , vd ) ) ;
}
2013-11-03 01:23:06 +04:00
static void omap_dma_clear_csr ( struct omap_chan * c )
{
if ( dma_omap1 ( ) )
c - > plat - > dma_read ( CSR , c - > dma_ch ) ;
else
c - > plat - > dma_write ( ~ 0 , CSR , c - > dma_ch ) ;
}
2013-11-02 21:07:09 +04:00
static void omap_dma_start ( struct omap_chan * c , struct omap_desc * d )
{
struct omap_dmadev * od = to_omap_dma_dev ( c - > vc . chan . device ) ;
if ( __dma_omap15xx ( od - > plat - > dma_attr ) )
c - > plat - > dma_write ( 0 , CPC , c - > dma_ch ) ;
else
c - > plat - > dma_write ( 0 , CDAC , c - > dma_ch ) ;
2013-11-03 01:23:06 +04:00
omap_dma_clear_csr ( c ) ;
2013-11-02 21:07:09 +04:00
/* Enable interrupts */
c - > plat - > dma_write ( d - > cicr , CICR , c - > dma_ch ) ;
2013-11-06 21:18:42 +04:00
/* Enable channel */
c - > plat - > dma_write ( d - > ccr | CCR_ENABLE , CCR , c - > dma_ch ) ;
2013-11-02 21:07:09 +04:00
}
static void omap_dma_stop ( struct omap_chan * c )
{
struct omap_dmadev * od = to_omap_dma_dev ( c - > vc . chan . device ) ;
uint32_t val ;
/* disable irq */
c - > plat - > dma_write ( 0 , CICR , c - > dma_ch ) ;
2013-11-03 01:23:06 +04:00
omap_dma_clear_csr ( c ) ;
2013-11-02 21:07:09 +04:00
val = c - > plat - > dma_read ( CCR , c - > dma_ch ) ;
2013-11-02 23:57:06 +04:00
if ( od - > plat - > errata & DMA_ERRATA_i541 & & val & CCR_TRIGGER_SRC ) {
2013-11-02 21:07:09 +04:00
uint32_t sysconfig ;
unsigned i ;
sysconfig = c - > plat - > dma_read ( OCP_SYSCONFIG , c - > dma_ch ) ;
val = sysconfig & ~ DMA_SYSCONFIG_MIDLEMODE_MASK ;
val | = DMA_SYSCONFIG_MIDLEMODE ( DMA_IDLEMODE_NO_IDLE ) ;
c - > plat - > dma_write ( val , OCP_SYSCONFIG , c - > dma_ch ) ;
val = c - > plat - > dma_read ( CCR , c - > dma_ch ) ;
2013-11-02 23:57:06 +04:00
val & = ~ CCR_ENABLE ;
2013-11-02 21:07:09 +04:00
c - > plat - > dma_write ( val , CCR , c - > dma_ch ) ;
/* Wait for sDMA FIFO to drain */
for ( i = 0 ; ; i + + ) {
val = c - > plat - > dma_read ( CCR , c - > dma_ch ) ;
2013-11-02 23:57:06 +04:00
if ( ! ( val & ( CCR_RD_ACTIVE | CCR_WR_ACTIVE ) ) )
2013-11-02 21:07:09 +04:00
break ;
if ( i > 100 )
break ;
udelay ( 5 ) ;
}
2013-11-02 23:57:06 +04:00
if ( val & ( CCR_RD_ACTIVE | CCR_WR_ACTIVE ) )
2013-11-02 21:07:09 +04:00
dev_err ( c - > vc . chan . device - > dev ,
" DMA drain did not complete on lch %d \n " ,
c - > dma_ch ) ;
c - > plat - > dma_write ( sysconfig , OCP_SYSCONFIG , c - > dma_ch ) ;
} else {
2013-11-02 23:57:06 +04:00
val & = ~ CCR_ENABLE ;
2013-11-02 21:07:09 +04:00
c - > plat - > dma_write ( val , CCR , c - > dma_ch ) ;
}
mb ( ) ;
if ( ! __dma_omap15xx ( od - > plat - > dma_attr ) & & c - > cyclic ) {
val = c - > plat - > dma_read ( CLNK_CTRL , c - > dma_ch ) ;
if ( dma_omap1 ( ) )
val | = 1 < < 14 ; /* set the STOP_LNK bit */
else
2013-11-02 23:57:06 +04:00
val & = ~ CLNK_CTRL_ENABLE_LNK ;
2013-11-02 21:07:09 +04:00
c - > plat - > dma_write ( val , CLNK_CTRL , c - > dma_ch ) ;
}
}
2012-04-13 15:10:24 +04:00
static void omap_dma_start_sg ( struct omap_chan * c , struct omap_desc * d ,
unsigned idx )
{
struct omap_sg * sg = d - > sg + idx ;
2013-11-03 15:17:11 +04:00
unsigned cxsa , cxei , cxfi ;
2013-11-02 18:41:42 +04:00
if ( d - > dir = = DMA_DEV_TO_MEM ) {
2013-11-03 15:17:11 +04:00
cxsa = CDSA ;
cxei = CDEI ;
cxfi = CDFI ;
2013-11-02 18:41:42 +04:00
} else {
2013-11-03 15:17:11 +04:00
cxsa = CSSA ;
cxei = CSEI ;
cxfi = CSFI ;
2013-11-02 18:41:42 +04:00
}
2013-11-03 15:17:11 +04:00
c - > plat - > dma_write ( sg - > addr , cxsa , c - > dma_ch ) ;
c - > plat - > dma_write ( 0 , cxei , c - > dma_ch ) ;
c - > plat - > dma_write ( 0 , cxfi , c - > dma_ch ) ;
2013-11-02 18:41:42 +04:00
c - > plat - > dma_write ( sg - > en , CEN , c - > dma_ch ) ;
c - > plat - > dma_write ( sg - > fn , CFN , c - > dma_ch ) ;
2013-11-02 21:07:09 +04:00
omap_dma_start ( c , d ) ;
2013-11-02 18:41:42 +04:00
}
static void omap_dma_start_desc ( struct omap_chan * c )
{
struct virt_dma_desc * vd = vchan_next_desc ( & c - > vc ) ;
struct omap_desc * d ;
2013-11-03 15:17:11 +04:00
unsigned cxsa , cxei , cxfi ;
2013-11-02 17:26:57 +04:00
2013-11-02 18:41:42 +04:00
if ( ! vd ) {
c - > desc = NULL ;
return ;
}
list_del ( & vd - > node ) ;
c - > desc = d = to_omap_dma_desc ( & vd - > tx ) ;
c - > sgidx = 0 ;
2013-11-06 21:15:16 +04:00
/*
* This provides the necessary barrier to ensure data held in
* DMA coherent memory is visible to the DMA engine prior to
* the transfer starting .
*/
mb ( ) ;
2013-11-02 23:16:09 +04:00
c - > plat - > dma_write ( d - > ccr , CCR , c - > dma_ch ) ;
if ( dma_omap1 ( ) )
c - > plat - > dma_write ( d - > ccr > > 16 , CCR2 , c - > dma_ch ) ;
2013-11-02 17:26:57 +04:00
2013-11-02 23:16:09 +04:00
if ( d - > dir = = DMA_DEV_TO_MEM ) {
2013-11-03 15:17:11 +04:00
cxsa = CSSA ;
cxei = CSEI ;
cxfi = CSFI ;
2013-11-02 17:26:57 +04:00
} else {
2013-11-03 15:17:11 +04:00
cxsa = CDSA ;
cxei = CDEI ;
cxfi = CDFI ;
2013-11-02 17:26:57 +04:00
}
2013-11-03 15:17:11 +04:00
c - > plat - > dma_write ( d - > dev_addr , cxsa , c - > dma_ch ) ;
c - > plat - > dma_write ( 0 , cxei , c - > dma_ch ) ;
c - > plat - > dma_write ( d - > fi , cxfi , c - > dma_ch ) ;
2013-11-02 22:51:53 +04:00
c - > plat - > dma_write ( d - > csdp , CSDP , c - > dma_ch ) ;
2013-11-06 21:12:30 +04:00
c - > plat - > dma_write ( d - > clnk_ctrl , CLNK_CTRL , c - > dma_ch ) ;
2013-11-02 17:26:57 +04:00
2012-04-13 15:10:24 +04:00
omap_dma_start_sg ( c , d , 0 ) ;
}
static void omap_dma_callback ( int ch , u16 status , void * data )
{
struct omap_chan * c = data ;
struct omap_desc * d ;
unsigned long flags ;
spin_lock_irqsave ( & c - > vc . lock , flags ) ;
d = c - > desc ;
if ( d ) {
2012-06-21 13:40:15 +04:00
if ( ! c - > cyclic ) {
if ( + + c - > sgidx < d - > sglen ) {
omap_dma_start_sg ( c , d , c - > sgidx ) ;
} else {
omap_dma_start_desc ( c ) ;
vchan_cookie_complete ( & d - > vd ) ;
}
2012-04-13 15:10:24 +04:00
} else {
2012-06-21 13:40:15 +04:00
vchan_cyclic_callback ( & d - > vd ) ;
2012-04-13 15:10:24 +04:00
}
}
spin_unlock_irqrestore ( & c - > vc . lock , flags ) ;
}
/*
* This callback schedules all pending channels . We could be more
* clever here by postponing allocation of the real DMA channels to
* this point , and freeing them when our virtual channel becomes idle .
*
* We would then need to deal with ' all channels in - use '
*/
static void omap_dma_sched ( unsigned long data )
{
struct omap_dmadev * d = ( struct omap_dmadev * ) data ;
LIST_HEAD ( head ) ;
spin_lock_irq ( & d - > lock ) ;
list_splice_tail_init ( & d - > pending , & head ) ;
spin_unlock_irq ( & d - > lock ) ;
while ( ! list_empty ( & head ) ) {
struct omap_chan * c = list_first_entry ( & head ,
struct omap_chan , node ) ;
spin_lock_irq ( & c - > vc . lock ) ;
list_del_init ( & c - > node ) ;
omap_dma_start_desc ( c ) ;
spin_unlock_irq ( & c - > vc . lock ) ;
}
}
static int omap_dma_alloc_chan_resources ( struct dma_chan * chan )
{
struct omap_chan * c = to_omap_dma_chan ( chan ) ;
2013-12-20 05:22:29 +04:00
dev_dbg ( c - > vc . chan . device - > dev , " allocating channel for %u \n " , c - > dma_sig ) ;
2012-04-13 15:10:24 +04:00
return omap_request_dma ( c - > dma_sig , " DMA engine " ,
omap_dma_callback , c , & c - > dma_ch ) ;
}
static void omap_dma_free_chan_resources ( struct dma_chan * chan )
{
struct omap_chan * c = to_omap_dma_chan ( chan ) ;
vchan_free_chan_resources ( & c - > vc ) ;
omap_free_dma ( c - > dma_ch ) ;
2013-12-20 05:22:29 +04:00
dev_dbg ( c - > vc . chan . device - > dev , " freeing channel for %u \n " , c - > dma_sig ) ;
2012-04-13 15:10:24 +04:00
}
2012-06-21 13:37:35 +04:00
static size_t omap_dma_sg_size ( struct omap_sg * sg )
{
return sg - > en * sg - > fn ;
}
static size_t omap_dma_desc_size ( struct omap_desc * d )
{
unsigned i ;
size_t size ;
for ( size = i = 0 ; i < d - > sglen ; i + + )
size + = omap_dma_sg_size ( & d - > sg [ i ] ) ;
return size * es_bytes [ d - > es ] ;
}
static size_t omap_dma_desc_size_pos ( struct omap_desc * d , dma_addr_t addr )
{
unsigned i ;
size_t size , es_size = es_bytes [ d - > es ] ;
for ( size = i = 0 ; i < d - > sglen ; i + + ) {
size_t this_size = omap_dma_sg_size ( & d - > sg [ i ] ) * es_size ;
if ( size )
size + = this_size ;
else if ( addr > = d - > sg [ i ] . addr & &
addr < d - > sg [ i ] . addr + this_size )
size + = d - > sg [ i ] . addr + this_size - addr ;
}
return size ;
}
2013-11-02 22:04:17 +04:00
static dma_addr_t omap_dma_get_src_pos ( struct omap_chan * c )
{
struct omap_dmadev * od = to_omap_dma_dev ( c - > vc . chan . device ) ;
dma_addr_t addr ;
if ( __dma_omap15xx ( od - > plat - > dma_attr ) )
addr = c - > plat - > dma_read ( CPC , c - > dma_ch ) ;
else
addr = c - > plat - > dma_read ( CSAC , c - > dma_ch ) ;
if ( od - > plat - > errata & DMA_ERRATA_3_3 & & addr = = 0 )
addr = c - > plat - > dma_read ( CSAC , c - > dma_ch ) ;
if ( ! __dma_omap15xx ( od - > plat - > dma_attr ) ) {
/*
* CDAC = = 0 indicates that the DMA transfer on the channel has
* not been started ( no data has been transferred so far ) .
* Return the programmed source start address in this case .
*/
if ( c - > plat - > dma_read ( CDAC , c - > dma_ch ) )
addr = c - > plat - > dma_read ( CSAC , c - > dma_ch ) ;
else
addr = c - > plat - > dma_read ( CSSA , c - > dma_ch ) ;
}
if ( dma_omap1 ( ) )
addr | = c - > plat - > dma_read ( CSSA , c - > dma_ch ) & 0xffff0000 ;
return addr ;
}
static dma_addr_t omap_dma_get_dst_pos ( struct omap_chan * c )
{
struct omap_dmadev * od = to_omap_dma_dev ( c - > vc . chan . device ) ;
dma_addr_t addr ;
if ( __dma_omap15xx ( od - > plat - > dma_attr ) )
addr = c - > plat - > dma_read ( CPC , c - > dma_ch ) ;
else
addr = c - > plat - > dma_read ( CDAC , c - > dma_ch ) ;
/*
* omap 3.2 / 3.3 erratum : sometimes 0 is returned if CSAC / CDAC is
* read before the DMA controller finished disabling the channel .
*/
if ( ! __dma_omap15xx ( od - > plat - > dma_attr ) & & addr = = 0 ) {
addr = c - > plat - > dma_read ( CDAC , c - > dma_ch ) ;
/*
* CDAC = = 0 indicates that the DMA transfer on the channel has
* not been started ( no data has been transferred so far ) .
* Return the programmed destination start address in this case .
*/
if ( addr = = 0 )
addr = c - > plat - > dma_read ( CDSA , c - > dma_ch ) ;
}
if ( dma_omap1 ( ) )
addr | = c - > plat - > dma_read ( CDSA , c - > dma_ch ) & 0xffff0000 ;
return addr ;
}
2012-04-13 15:10:24 +04:00
static enum dma_status omap_dma_tx_status ( struct dma_chan * chan ,
dma_cookie_t cookie , struct dma_tx_state * txstate )
{
2012-06-21 13:37:35 +04:00
struct omap_chan * c = to_omap_dma_chan ( chan ) ;
struct virt_dma_desc * vd ;
enum dma_status ret ;
unsigned long flags ;
ret = dma_cookie_status ( chan , cookie , txstate ) ;
2013-10-16 19:21:54 +04:00
if ( ret = = DMA_COMPLETE | | ! txstate )
2012-06-21 13:37:35 +04:00
return ret ;
spin_lock_irqsave ( & c - > vc . lock , flags ) ;
vd = vchan_find_desc ( & c - > vc , cookie ) ;
if ( vd ) {
txstate - > residue = omap_dma_desc_size ( to_omap_dma_desc ( & vd - > tx ) ) ;
} else if ( c - > desc & & c - > desc - > vd . tx . cookie = = cookie ) {
struct omap_desc * d = c - > desc ;
dma_addr_t pos ;
if ( d - > dir = = DMA_MEM_TO_DEV )
2013-11-02 22:04:17 +04:00
pos = omap_dma_get_src_pos ( c ) ;
2012-06-21 13:37:35 +04:00
else if ( d - > dir = = DMA_DEV_TO_MEM )
2013-11-02 22:04:17 +04:00
pos = omap_dma_get_dst_pos ( c ) ;
2012-06-21 13:37:35 +04:00
else
pos = 0 ;
txstate - > residue = omap_dma_desc_size_pos ( d , pos ) ;
} else {
txstate - > residue = 0 ;
}
spin_unlock_irqrestore ( & c - > vc . lock , flags ) ;
return ret ;
2012-04-13 15:10:24 +04:00
}
static void omap_dma_issue_pending ( struct dma_chan * chan )
{
struct omap_chan * c = to_omap_dma_chan ( chan ) ;
unsigned long flags ;
spin_lock_irqsave ( & c - > vc . lock , flags ) ;
if ( vchan_issue_pending ( & c - > vc ) & & ! c - > desc ) {
2013-04-09 18:33:06 +04:00
/*
* c - > cyclic is used only by audio and in this case the DMA need
* to be started without delay .
*/
if ( ! c - > cyclic ) {
struct omap_dmadev * d = to_omap_dma_dev ( chan - > device ) ;
spin_lock ( & d - > lock ) ;
if ( list_empty ( & c - > node ) )
list_add_tail ( & c - > node , & d - > pending ) ;
spin_unlock ( & d - > lock ) ;
tasklet_schedule ( & d - > task ) ;
} else {
omap_dma_start_desc ( c ) ;
}
2012-04-13 15:10:24 +04:00
}
spin_unlock_irqrestore ( & c - > vc . lock , flags ) ;
}
static struct dma_async_tx_descriptor * omap_dma_prep_slave_sg (
struct dma_chan * chan , struct scatterlist * sgl , unsigned sglen ,
enum dma_transfer_direction dir , unsigned long tx_flags , void * context )
{
2013-11-03 01:09:18 +04:00
struct omap_dmadev * od = to_omap_dma_dev ( chan - > device ) ;
2012-04-13 15:10:24 +04:00
struct omap_chan * c = to_omap_dma_chan ( chan ) ;
enum dma_slave_buswidth dev_width ;
struct scatterlist * sgent ;
struct omap_desc * d ;
dma_addr_t dev_addr ;
2013-11-02 23:16:09 +04:00
unsigned i , j = 0 , es , en , frame_bytes ;
2012-04-13 15:10:24 +04:00
u32 burst ;
if ( dir = = DMA_DEV_TO_MEM ) {
dev_addr = c - > cfg . src_addr ;
dev_width = c - > cfg . src_addr_width ;
burst = c - > cfg . src_maxburst ;
} else if ( dir = = DMA_MEM_TO_DEV ) {
dev_addr = c - > cfg . dst_addr ;
dev_width = c - > cfg . dst_addr_width ;
burst = c - > cfg . dst_maxburst ;
} else {
dev_err ( chan - > device - > dev , " %s: bad direction? \n " , __func__ ) ;
return NULL ;
}
/* Bus width translates to the element size (ES) */
switch ( dev_width ) {
case DMA_SLAVE_BUSWIDTH_1_BYTE :
2013-11-02 23:57:06 +04:00
es = CSDP_DATA_TYPE_8 ;
2012-04-13 15:10:24 +04:00
break ;
case DMA_SLAVE_BUSWIDTH_2_BYTES :
2013-11-02 23:57:06 +04:00
es = CSDP_DATA_TYPE_16 ;
2012-04-13 15:10:24 +04:00
break ;
case DMA_SLAVE_BUSWIDTH_4_BYTES :
2013-11-02 23:57:06 +04:00
es = CSDP_DATA_TYPE_32 ;
2012-04-13 15:10:24 +04:00
break ;
default : /* not reached */
return NULL ;
}
/* Now allocate and setup the descriptor. */
d = kzalloc ( sizeof ( * d ) + sglen * sizeof ( d - > sg [ 0 ] ) , GFP_ATOMIC ) ;
if ( ! d )
return NULL ;
d - > dir = dir ;
d - > dev_addr = dev_addr ;
d - > es = es ;
2013-11-02 23:16:09 +04:00
2013-11-02 23:57:06 +04:00
d - > ccr = CCR_SYNC_FRAME ;
2013-11-02 23:16:09 +04:00
if ( dir = = DMA_DEV_TO_MEM )
2013-11-02 23:57:06 +04:00
d - > ccr | = CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT ;
2013-11-02 23:16:09 +04:00
else
2013-11-02 23:57:06 +04:00
d - > ccr | = CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC ;
2013-11-02 23:16:09 +04:00
2013-11-02 23:57:06 +04:00
d - > cicr = CICR_DROP_IE | CICR_BLOCK_IE ;
2013-11-02 22:51:53 +04:00
d - > csdp = es ;
2013-11-02 21:07:09 +04:00
2013-11-02 22:51:53 +04:00
if ( dma_omap1 ( ) ) {
2013-11-02 23:16:09 +04:00
if ( __dma_omap16xx ( od - > plat - > dma_attr ) ) {
2013-11-02 23:57:06 +04:00
d - > ccr | = CCR_OMAP31_DISABLE ;
2013-11-02 23:16:09 +04:00
/* Duplicate what plat-omap/dma.c does */
d - > ccr | = c - > dma_ch + 1 ;
} else {
d - > ccr | = c - > dma_sig & 0x1f ;
}
2013-11-02 23:57:06 +04:00
d - > cicr | = CICR_TOUT_IE ;
2013-11-02 22:51:53 +04:00
if ( dir = = DMA_DEV_TO_MEM )
2013-11-02 23:57:06 +04:00
d - > csdp | = CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB ;
2013-11-02 22:51:53 +04:00
else
2013-11-02 23:57:06 +04:00
d - > csdp | = CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF ;
2013-11-02 22:51:53 +04:00
} else {
2013-11-02 23:16:09 +04:00
d - > ccr | = ( c - > dma_sig & ~ 0x1f ) < < 14 ;
d - > ccr | = c - > dma_sig & 0x1f ;
if ( dir = = DMA_DEV_TO_MEM )
2013-11-02 23:57:06 +04:00
d - > ccr | = CCR_TRIGGER_SRC ;
2013-11-02 23:16:09 +04:00
2013-11-02 23:57:06 +04:00
d - > cicr | = CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE ;
2013-11-02 22:51:53 +04:00
}
2013-11-03 01:09:18 +04:00
if ( od - > plat - > errata & DMA_ERRATA_IFRAME_BUFFERING )
d - > ccr | = CCR_BUFFERING_DISABLE ;
2013-11-06 21:12:30 +04:00
if ( od - > plat - > errata & DMA_ERRATA_PARALLEL_CHANNELS )
d - > clnk_ctrl = c - > dma_ch ;
2012-04-13 15:10:24 +04:00
/*
* Build our scatterlist entries : each contains the address ,
* the number of elements ( EN ) in each frame , and the number of
* frames ( FN ) . Number of bytes for this entry = ES * EN * FN .
*
* Burst size translates to number of elements with frame sync .
* Note : DMA engine defines burst to be the number of dev - width
* transfers .
*/
en = burst ;
frame_bytes = es_bytes [ es ] * en ;
for_each_sg ( sgl , sgent , sglen , i ) {
d - > sg [ j ] . addr = sg_dma_address ( sgent ) ;
d - > sg [ j ] . en = en ;
d - > sg [ j ] . fn = sg_dma_len ( sgent ) / frame_bytes ;
j + + ;
}
d - > sglen = j ;
return vchan_tx_prep ( & c - > vc , & d - > vd , tx_flags ) ;
}
2012-06-21 13:40:15 +04:00
static struct dma_async_tx_descriptor * omap_dma_prep_dma_cyclic (
struct dma_chan * chan , dma_addr_t buf_addr , size_t buf_len ,
2012-09-14 16:05:47 +04:00
size_t period_len , enum dma_transfer_direction dir , unsigned long flags ,
void * context )
2012-06-21 13:40:15 +04:00
{
2013-11-02 21:07:09 +04:00
struct omap_dmadev * od = to_omap_dma_dev ( chan - > device ) ;
2012-06-21 13:40:15 +04:00
struct omap_chan * c = to_omap_dma_chan ( chan ) ;
enum dma_slave_buswidth dev_width ;
struct omap_desc * d ;
dma_addr_t dev_addr ;
2013-11-02 23:16:09 +04:00
unsigned es ;
2012-06-21 13:40:15 +04:00
u32 burst ;
if ( dir = = DMA_DEV_TO_MEM ) {
dev_addr = c - > cfg . src_addr ;
dev_width = c - > cfg . src_addr_width ;
burst = c - > cfg . src_maxburst ;
} else if ( dir = = DMA_MEM_TO_DEV ) {
dev_addr = c - > cfg . dst_addr ;
dev_width = c - > cfg . dst_addr_width ;
burst = c - > cfg . dst_maxburst ;
} else {
dev_err ( chan - > device - > dev , " %s: bad direction? \n " , __func__ ) ;
return NULL ;
}
/* Bus width translates to the element size (ES) */
switch ( dev_width ) {
case DMA_SLAVE_BUSWIDTH_1_BYTE :
2013-11-02 23:57:06 +04:00
es = CSDP_DATA_TYPE_8 ;
2012-06-21 13:40:15 +04:00
break ;
case DMA_SLAVE_BUSWIDTH_2_BYTES :
2013-11-02 23:57:06 +04:00
es = CSDP_DATA_TYPE_16 ;
2012-06-21 13:40:15 +04:00
break ;
case DMA_SLAVE_BUSWIDTH_4_BYTES :
2013-11-02 23:57:06 +04:00
es = CSDP_DATA_TYPE_32 ;
2012-06-21 13:40:15 +04:00
break ;
default : /* not reached */
return NULL ;
}
/* Now allocate and setup the descriptor. */
d = kzalloc ( sizeof ( * d ) + sizeof ( d - > sg [ 0 ] ) , GFP_ATOMIC ) ;
if ( ! d )
return NULL ;
d - > dir = dir ;
d - > dev_addr = dev_addr ;
d - > fi = burst ;
d - > es = es ;
d - > sg [ 0 ] . addr = buf_addr ;
d - > sg [ 0 ] . en = period_len / es_bytes [ es ] ;
d - > sg [ 0 ] . fn = buf_len / period_len ;
d - > sglen = 1 ;
2013-11-02 23:16:09 +04:00
d - > ccr = 0 ;
if ( dir = = DMA_DEV_TO_MEM )
2013-11-02 23:57:06 +04:00
d - > ccr | = CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT ;
2013-11-02 23:16:09 +04:00
else
2013-11-02 23:57:06 +04:00
d - > ccr | = CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC ;
2013-11-02 23:16:09 +04:00
2013-11-02 23:57:06 +04:00
d - > cicr = CICR_DROP_IE ;
2013-11-02 21:07:09 +04:00
if ( flags & DMA_PREP_INTERRUPT )
2013-11-02 23:57:06 +04:00
d - > cicr | = CICR_FRAME_IE ;
2013-11-02 21:07:09 +04:00
2013-11-02 22:51:53 +04:00
d - > csdp = es ;
if ( dma_omap1 ( ) ) {
2013-11-02 23:16:09 +04:00
if ( __dma_omap16xx ( od - > plat - > dma_attr ) ) {
2013-11-02 23:57:06 +04:00
d - > ccr | = CCR_OMAP31_DISABLE ;
2013-11-02 23:16:09 +04:00
/* Duplicate what plat-omap/dma.c does */
d - > ccr | = c - > dma_ch + 1 ;
} else {
d - > ccr | = c - > dma_sig & 0x1f ;
}
2013-11-02 23:57:06 +04:00
d - > cicr | = CICR_TOUT_IE ;
2013-11-02 22:51:53 +04:00
if ( dir = = DMA_DEV_TO_MEM )
2013-11-02 23:57:06 +04:00
d - > csdp | = CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI ;
2013-11-02 22:51:53 +04:00
else
2013-11-02 23:57:06 +04:00
d - > csdp | = CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF ;
2013-11-02 22:51:53 +04:00
} else {
2013-11-02 23:16:09 +04:00
d - > ccr | = ( c - > dma_sig & ~ 0x1f ) < < 14 ;
d - > ccr | = c - > dma_sig & 0x1f ;
if ( burst )
2013-11-02 23:57:06 +04:00
d - > ccr | = CCR_SYNC_PACKET ;
else
d - > ccr | = CCR_SYNC_ELEMENT ;
2013-11-02 23:16:09 +04:00
if ( dir = = DMA_DEV_TO_MEM )
2013-11-02 23:57:06 +04:00
d - > ccr | = CCR_TRIGGER_SRC ;
2013-11-02 23:16:09 +04:00
2013-11-02 23:57:06 +04:00
d - > cicr | = CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE ;
2012-06-21 13:40:15 +04:00
2013-11-02 23:57:06 +04:00
d - > csdp | = CSDP_DST_BURST_64 | CSDP_SRC_BURST_64 ;
2013-11-02 22:51:53 +04:00
}
2013-11-03 01:09:18 +04:00
if ( od - > plat - > errata & DMA_ERRATA_IFRAME_BUFFERING )
d - > ccr | = CCR_BUFFERING_DISABLE ;
2013-11-02 22:51:53 +04:00
2013-11-06 21:12:30 +04:00
if ( __dma_omap15xx ( od - > plat - > dma_attr ) )
d - > ccr | = CCR_AUTO_INIT | CCR_REPEAT ;
else
d - > clnk_ctrl = c - > dma_ch | CLNK_CTRL_ENABLE_LNK ;
2013-11-02 23:16:09 +04:00
c - > cyclic = true ;
2012-06-21 13:40:15 +04:00
2012-09-14 16:05:48 +04:00
return vchan_tx_prep ( & c - > vc , & d - > vd , flags ) ;
2012-06-21 13:40:15 +04:00
}
2012-04-13 15:10:24 +04:00
static int omap_dma_slave_config ( struct omap_chan * c , struct dma_slave_config * cfg )
{
if ( cfg - > src_addr_width = = DMA_SLAVE_BUSWIDTH_8_BYTES | |
cfg - > dst_addr_width = = DMA_SLAVE_BUSWIDTH_8_BYTES )
return - EINVAL ;
memcpy ( & c - > cfg , cfg , sizeof ( c - > cfg ) ) ;
return 0 ;
}
static int omap_dma_terminate_all ( struct omap_chan * c )
{
struct omap_dmadev * d = to_omap_dma_dev ( c - > vc . chan . device ) ;
unsigned long flags ;
LIST_HEAD ( head ) ;
spin_lock_irqsave ( & c - > vc . lock , flags ) ;
/* Prevent this channel being scheduled */
spin_lock ( & d - > lock ) ;
list_del_init ( & c - > node ) ;
spin_unlock ( & d - > lock ) ;
/*
* Stop DMA activity : we assume the callback will not be called
2013-11-02 21:07:09 +04:00
* after omap_dma_stop ( ) returns ( even if it does , it will see
2012-04-13 15:10:24 +04:00
* c - > desc is NULL and exit . )
*/
if ( c - > desc ) {
c - > desc = NULL ;
2012-09-14 16:05:45 +04:00
/* Avoid stopping the dma twice */
if ( ! c - > paused )
2013-11-02 21:07:09 +04:00
omap_dma_stop ( c ) ;
2012-04-13 15:10:24 +04:00
}
2012-06-21 13:40:15 +04:00
if ( c - > cyclic ) {
c - > cyclic = false ;
2012-09-14 16:05:45 +04:00
c - > paused = false ;
2012-06-21 13:40:15 +04:00
}
2012-04-13 15:10:24 +04:00
vchan_get_all_descriptors ( & c - > vc , & head ) ;
spin_unlock_irqrestore ( & c - > vc . lock , flags ) ;
vchan_dma_desc_free_list ( & c - > vc , & head ) ;
return 0 ;
}
static int omap_dma_pause ( struct omap_chan * c )
{
2012-09-14 16:05:45 +04:00
/* Pause/Resume only allowed with cyclic mode */
if ( ! c - > cyclic )
return - EINVAL ;
if ( ! c - > paused ) {
2013-11-02 21:07:09 +04:00
omap_dma_stop ( c ) ;
2012-09-14 16:05:45 +04:00
c - > paused = true ;
}
return 0 ;
2012-04-13 15:10:24 +04:00
}
static int omap_dma_resume ( struct omap_chan * c )
{
2012-09-14 16:05:45 +04:00
/* Pause/Resume only allowed with cyclic mode */
if ( ! c - > cyclic )
return - EINVAL ;
if ( c - > paused ) {
2013-11-02 21:07:09 +04:00
omap_dma_start ( c , c - > desc ) ;
2012-09-14 16:05:45 +04:00
c - > paused = false ;
}
return 0 ;
2012-04-13 15:10:24 +04:00
}
static int omap_dma_control ( struct dma_chan * chan , enum dma_ctrl_cmd cmd ,
unsigned long arg )
{
struct omap_chan * c = to_omap_dma_chan ( chan ) ;
int ret ;
switch ( cmd ) {
case DMA_SLAVE_CONFIG :
ret = omap_dma_slave_config ( c , ( struct dma_slave_config * ) arg ) ;
break ;
case DMA_TERMINATE_ALL :
ret = omap_dma_terminate_all ( c ) ;
break ;
case DMA_PAUSE :
ret = omap_dma_pause ( c ) ;
break ;
case DMA_RESUME :
ret = omap_dma_resume ( c ) ;
break ;
default :
ret = - ENXIO ;
break ;
}
return ret ;
}
static int omap_dma_chan_init ( struct omap_dmadev * od , int dma_sig )
{
struct omap_chan * c ;
c = kzalloc ( sizeof ( * c ) , GFP_KERNEL ) ;
if ( ! c )
return - ENOMEM ;
2013-11-02 17:00:03 +04:00
c - > plat = od - > plat ;
2012-04-13 15:10:24 +04:00
c - > dma_sig = dma_sig ;
c - > vc . desc_free = omap_dma_desc_free ;
vchan_init ( & c - > vc , & od - > ddev ) ;
INIT_LIST_HEAD ( & c - > node ) ;
od - > ddev . chancnt + + ;
return 0 ;
}
static void omap_dma_free ( struct omap_dmadev * od )
{
tasklet_kill ( & od - > task ) ;
while ( ! list_empty ( & od - > ddev . channels ) ) {
struct omap_chan * c = list_first_entry ( & od - > ddev . channels ,
struct omap_chan , vc . chan . device_node ) ;
list_del ( & c - > vc . chan . device_node ) ;
tasklet_kill ( & c - > vc . task ) ;
kfree ( c ) ;
}
}
static int omap_dma_probe ( struct platform_device * pdev )
{
struct omap_dmadev * od ;
int rc , i ;
2013-11-02 16:58:29 +04:00
od = devm_kzalloc ( & pdev - > dev , sizeof ( * od ) , GFP_KERNEL ) ;
2012-04-13 15:10:24 +04:00
if ( ! od )
return - ENOMEM ;
2013-11-02 17:00:03 +04:00
od - > plat = omap_get_plat_info ( ) ;
if ( ! od - > plat )
return - EPROBE_DEFER ;
2012-04-13 15:10:24 +04:00
dma_cap_set ( DMA_SLAVE , od - > ddev . cap_mask ) ;
2012-06-21 13:40:15 +04:00
dma_cap_set ( DMA_CYCLIC , od - > ddev . cap_mask ) ;
2012-04-13 15:10:24 +04:00
od - > ddev . device_alloc_chan_resources = omap_dma_alloc_chan_resources ;
od - > ddev . device_free_chan_resources = omap_dma_free_chan_resources ;
od - > ddev . device_tx_status = omap_dma_tx_status ;
od - > ddev . device_issue_pending = omap_dma_issue_pending ;
od - > ddev . device_prep_slave_sg = omap_dma_prep_slave_sg ;
2012-06-21 13:40:15 +04:00
od - > ddev . device_prep_dma_cyclic = omap_dma_prep_dma_cyclic ;
2012-04-13 15:10:24 +04:00
od - > ddev . device_control = omap_dma_control ;
od - > ddev . dev = & pdev - > dev ;
INIT_LIST_HEAD ( & od - > ddev . channels ) ;
INIT_LIST_HEAD ( & od - > pending ) ;
spin_lock_init ( & od - > lock ) ;
tasklet_init ( & od - > task , omap_dma_sched , ( unsigned long ) od ) ;
for ( i = 0 ; i < 127 ; i + + ) {
rc = omap_dma_chan_init ( od , i ) ;
if ( rc ) {
omap_dma_free ( od ) ;
return rc ;
}
}
rc = dma_async_device_register ( & od - > ddev ) ;
if ( rc ) {
pr_warn ( " OMAP-DMA: failed to register slave DMA engine device: %d \n " ,
rc ) ;
omap_dma_free ( od ) ;
2013-02-26 22:27:24 +04:00
return rc ;
}
platform_set_drvdata ( pdev , od ) ;
if ( pdev - > dev . of_node ) {
omap_dma_info . dma_cap = od - > ddev . cap_mask ;
/* Device-tree DMA controller registration */
rc = of_dma_controller_register ( pdev - > dev . of_node ,
of_dma_simple_xlate , & omap_dma_info ) ;
if ( rc ) {
pr_warn ( " OMAP-DMA: failed to register DMA controller \n " ) ;
dma_async_device_unregister ( & od - > ddev ) ;
omap_dma_free ( od ) ;
}
2012-04-13 15:10:24 +04:00
}
dev_info ( & pdev - > dev , " OMAP DMA engine driver \n " ) ;
return rc ;
}
static int omap_dma_remove ( struct platform_device * pdev )
{
struct omap_dmadev * od = platform_get_drvdata ( pdev ) ;
2013-02-26 22:27:24 +04:00
if ( pdev - > dev . of_node )
of_dma_controller_free ( pdev - > dev . of_node ) ;
2012-04-13 15:10:24 +04:00
dma_async_device_unregister ( & od - > ddev ) ;
omap_dma_free ( od ) ;
return 0 ;
}
2013-02-26 22:27:24 +04:00
static const struct of_device_id omap_dma_match [ ] = {
{ . compatible = " ti,omap2420-sdma " , } ,
{ . compatible = " ti,omap2430-sdma " , } ,
{ . compatible = " ti,omap3430-sdma " , } ,
{ . compatible = " ti,omap3630-sdma " , } ,
{ . compatible = " ti,omap4430-sdma " , } ,
{ } ,
} ;
MODULE_DEVICE_TABLE ( of , omap_dma_match ) ;
2012-04-13 15:10:24 +04:00
static struct platform_driver omap_dma_driver = {
. probe = omap_dma_probe ,
. remove = omap_dma_remove ,
. driver = {
. name = " omap-dma-engine " ,
. owner = THIS_MODULE ,
2013-02-26 22:27:24 +04:00
. of_match_table = of_match_ptr ( omap_dma_match ) ,
2012-04-13 15:10:24 +04:00
} ,
} ;
bool omap_dma_filter_fn ( struct dma_chan * chan , void * param )
{
if ( chan - > device - > dev - > driver = = & omap_dma_driver . driver ) {
struct omap_chan * c = to_omap_dma_chan ( chan ) ;
unsigned req = * ( unsigned * ) param ;
return req = = c - > dma_sig ;
}
return false ;
}
EXPORT_SYMBOL_GPL ( omap_dma_filter_fn ) ;
static int omap_dma_init ( void )
{
2013-01-11 23:24:19 +04:00
return platform_driver_register ( & omap_dma_driver ) ;
2012-04-13 15:10:24 +04:00
}
subsys_initcall ( omap_dma_init ) ;
static void __exit omap_dma_exit ( void )
{
platform_driver_unregister ( & omap_dma_driver ) ;
}
module_exit ( omap_dma_exit ) ;
MODULE_AUTHOR ( " Russell King " ) ;
MODULE_LICENSE ( " GPL " ) ;