2009-09-07 03:26:23 +00:00
/*
* Renesas SuperH DMA Engine support
*
* base is drivers / dma / flsdma . c
*
2012-05-09 17:09:21 +02:00
* Copyright ( C ) 2011 - 2012 Guennadi Liakhovetski < g . liakhovetski @ gmx . de >
2009-09-07 03:26:23 +00:00
* Copyright ( C ) 2009 Nobuhiro Iwamatsu < iwamatsu . nobuhiro @ renesas . com >
* Copyright ( C ) 2009 Renesas Solutions , Inc . All rights reserved .
* Copyright ( C ) 2007 Freescale Semiconductor , Inc . All rights reserved .
*
* This is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* - DMA of SuperH does not have Hardware DMA chain mode .
* - MAX DMA size is 16 MB .
*
*/
2014-05-13 01:02:11 +02:00
# include <linux/delay.h>
# include <linux/dmaengine.h>
2014-05-13 01:02:12 +02:00
# include <linux/err.h>
2009-09-07 03:26:23 +00:00
# include <linux/init.h>
2014-05-13 01:02:11 +02:00
# include <linux/interrupt.h>
# include <linux/kdebug.h>
2009-09-07 03:26:23 +00:00
# include <linux/module.h>
2014-05-13 01:02:11 +02:00
# include <linux/notifier.h>
2013-08-02 16:50:36 +02:00
# include <linux/of.h>
# include <linux/of_device.h>
2009-09-07 03:26:23 +00:00
# include <linux/platform_device.h>
2010-02-11 16:50:18 +00:00
# include <linux/pm_runtime.h>
2014-05-13 01:02:11 +02:00
# include <linux/rculist.h>
2010-03-19 04:47:10 +00:00
# include <linux/sh_dma.h>
2014-05-13 01:02:11 +02:00
# include <linux/slab.h>
2010-12-17 19:16:10 +09:00
# include <linux/spinlock.h>
2012-03-06 22:34:26 +00:00
2012-07-02 22:30:53 +02:00
# include "../dmaengine.h"
2009-09-07 03:26:23 +00:00
# include "shdma.h"
2014-06-20 14:37:38 +02:00
/* DMA registers */
# define SAR 0x00 /* Source Address Register */
# define DAR 0x04 /* Destination Address Register */
# define TCR 0x08 /* Transfer Count Register */
# define CHCR 0x0C /* Channel Control Register */
# define DMAOR 0x40 /* DMA Operation Register */
2013-08-02 16:50:37 +02:00
# define TEND 0x18 /* USB-DMAC */
2012-05-09 17:09:21 +02:00
# define SH_DMAE_DRV_NAME "sh-dma-engine"
2009-09-07 03:26:23 +00:00
2010-02-11 16:50:14 +00:00
/* Default MEMCPY transfer size = 2^2 = 4 bytes */
# define LOG2_DEFAULT_XFER_SIZE 2
2012-05-09 17:09:21 +02:00
# define SH_DMA_SLAVE_NUMBER 256
# define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
2009-09-07 03:26:23 +00:00
2010-12-17 19:16:10 +09:00
/*
* Used for write - side mutual exclusion for the global device list ,
2011-04-29 17:09:21 +00:00
* read - side synchronization by way of RCU , and per - controller data .
2010-12-17 19:16:10 +09:00
*/
static DEFINE_SPINLOCK ( sh_dmae_lock ) ;
static LIST_HEAD ( sh_dmae_devices ) ;
2013-07-10 12:09:47 +02:00
/*
* Different DMAC implementations provide different ways to clear DMA channels :
* ( 1 ) none - no CHCLR registers are available
* ( 2 ) one CHCLR register per channel - 0 has to be written to it to clear
* channel buffers
* ( 3 ) one CHCLR per several channels - 1 has to be written to the bit ,
* corresponding to the specific channel to reset it
*/
2013-07-02 17:37:58 +02:00
static void channel_clear ( struct sh_dmae_chan * sh_dc )
2012-01-04 15:34:17 +01:00
{
struct sh_dmae_device * shdev = to_sh_dev ( sh_dc ) ;
2013-07-10 12:09:47 +02:00
const struct sh_dmae_channel * chan_pdata = shdev - > pdata - > channel +
sh_dc - > shdma_chan . id ;
u32 val = shdev - > pdata - > chclr_bitwise ? 1 < < chan_pdata - > chclr_bit : 0 ;
2012-01-04 15:34:17 +01:00
2013-07-10 12:09:47 +02:00
__raw_writel ( val , shdev - > chan_reg + chan_pdata - > chclr_offset ) ;
2012-01-04 15:34:17 +01:00
}
2009-12-17 09:41:39 -07:00
2009-09-07 03:26:23 +00:00
static void sh_dmae_writel ( struct sh_dmae_chan * sh_dc , u32 data , u32 reg )
{
2013-07-02 17:46:01 +02:00
__raw_writel ( data , sh_dc - > base + reg ) ;
2009-09-07 03:26:23 +00:00
}
static u32 sh_dmae_readl ( struct sh_dmae_chan * sh_dc , u32 reg )
{
2013-07-02 17:46:01 +02:00
return __raw_readl ( sh_dc - > base + reg ) ;
2010-02-11 16:50:10 +00:00
}
static u16 dmaor_read ( struct sh_dmae_device * shdev )
{
2013-07-02 17:46:01 +02:00
void __iomem * addr = shdev - > chan_reg + DMAOR ;
2011-06-17 08:20:56 +00:00
if ( shdev - > pdata - > dmaor_is_32bit )
return __raw_readl ( addr ) ;
else
return __raw_readw ( addr ) ;
2010-02-11 16:50:10 +00:00
}
static void dmaor_write ( struct sh_dmae_device * shdev , u16 data )
{
2013-07-02 17:46:01 +02:00
void __iomem * addr = shdev - > chan_reg + DMAOR ;
2011-06-17 08:20:56 +00:00
if ( shdev - > pdata - > dmaor_is_32bit )
__raw_writel ( data , addr ) ;
else
__raw_writew ( data , addr ) ;
2009-09-07 03:26:23 +00:00
}
2011-06-17 08:20:40 +00:00
static void chcr_write ( struct sh_dmae_chan * sh_dc , u32 data )
{
struct sh_dmae_device * shdev = to_sh_dev ( sh_dc ) ;
2013-07-02 17:46:01 +02:00
__raw_writel ( data , sh_dc - > base + shdev - > chcr_offset ) ;
2011-06-17 08:20:40 +00:00
}
static u32 chcr_read ( struct sh_dmae_chan * sh_dc )
{
struct sh_dmae_device * shdev = to_sh_dev ( sh_dc ) ;
2013-07-02 17:46:01 +02:00
return __raw_readl ( sh_dc - > base + shdev - > chcr_offset ) ;
2009-09-07 03:26:23 +00:00
}
/*
* Reset DMA controller
*
* SH7780 has two DMAOR register
*/
2010-02-11 16:50:10 +00:00
static void sh_dmae_ctl_stop ( struct sh_dmae_device * shdev )
2009-09-07 03:26:23 +00:00
{
2011-04-29 17:09:21 +00:00
unsigned short dmaor ;
unsigned long flags ;
spin_lock_irqsave ( & sh_dmae_lock , flags ) ;
2009-09-07 03:26:23 +00:00
2011-04-29 17:09:21 +00:00
dmaor = dmaor_read ( shdev ) ;
2010-02-11 16:50:10 +00:00
dmaor_write ( shdev , dmaor & ~ ( DMAOR_NMIF | DMAOR_AE | DMAOR_DME ) ) ;
2011-04-29 17:09:21 +00:00
spin_unlock_irqrestore ( & sh_dmae_lock , flags ) ;
2009-09-07 03:26:23 +00:00
}
2010-02-11 16:50:10 +00:00
static int sh_dmae_rst ( struct sh_dmae_device * shdev )
2009-09-07 03:26:23 +00:00
{
unsigned short dmaor ;
2011-04-29 17:09:21 +00:00
unsigned long flags ;
2009-09-07 03:26:23 +00:00
2011-04-29 17:09:21 +00:00
spin_lock_irqsave ( & sh_dmae_lock , flags ) ;
2009-09-07 03:26:23 +00:00
2011-04-29 17:09:21 +00:00
dmaor = dmaor_read ( shdev ) & ~ ( DMAOR_NMIF | DMAOR_AE | DMAOR_DME ) ;
2012-01-04 15:34:17 +01:00
if ( shdev - > pdata - > chclr_present ) {
int i ;
for ( i = 0 ; i < shdev - > pdata - > channel_num ; i + + ) {
struct sh_dmae_chan * sh_chan = shdev - > chan [ i ] ;
if ( sh_chan )
2013-07-02 17:37:58 +02:00
channel_clear ( sh_chan ) ;
2012-01-04 15:34:17 +01:00
}
}
2011-04-29 17:09:21 +00:00
dmaor_write ( shdev , dmaor | shdev - > pdata - > dmaor_init ) ;
dmaor = dmaor_read ( shdev ) ;
spin_unlock_irqrestore ( & sh_dmae_lock , flags ) ;
if ( dmaor & ( DMAOR_AE | DMAOR_NMIF ) ) {
2012-05-09 17:09:21 +02:00
dev_warn ( shdev - > shdma_dev . dma_dev . dev , " Can't initialize DMAOR. \n " ) ;
2011-04-29 17:09:21 +00:00
return - EIO ;
2009-09-07 03:26:23 +00:00
}
2012-01-04 15:34:17 +01:00
if ( shdev - > pdata - > dmaor_init & ~ dmaor )
2012-05-09 17:09:21 +02:00
dev_warn ( shdev - > shdma_dev . dma_dev . dev ,
2012-01-04 15:34:17 +01:00
" DMAOR=0x%x hasn't latched the initial value 0x%x. \n " ,
dmaor , shdev - > pdata - > dmaor_init ) ;
2009-09-07 03:26:23 +00:00
return 0 ;
}
2010-01-19 07:24:55 +00:00
static bool dmae_is_busy ( struct sh_dmae_chan * sh_chan )
2009-09-07 03:26:23 +00:00
{
2011-06-17 08:20:40 +00:00
u32 chcr = chcr_read ( sh_chan ) ;
2010-01-19 07:24:55 +00:00
if ( ( chcr & ( CHCR_DE | CHCR_TE ) ) = = CHCR_DE )
return true ; /* working */
return false ; /* waiting */
2009-09-07 03:26:23 +00:00
}
2010-02-11 16:50:14 +00:00
static unsigned int calc_xmit_shift ( struct sh_dmae_chan * sh_chan , u32 chcr )
2009-09-07 03:26:23 +00:00
{
2011-06-16 05:08:09 +00:00
struct sh_dmae_device * shdev = to_sh_dev ( sh_chan ) ;
2013-08-02 16:18:09 +02:00
const struct sh_dmae_pdata * pdata = shdev - > pdata ;
2010-02-11 16:50:14 +00:00
int cnt = ( ( chcr & pdata - > ts_low_mask ) > > pdata - > ts_low_shift ) |
( ( chcr & pdata - > ts_high_mask ) > > pdata - > ts_high_shift ) ;
if ( cnt > = pdata - > ts_shift_num )
cnt = 0 ;
2010-02-03 14:44:12 +00:00
2010-02-11 16:50:14 +00:00
return pdata - > ts_shift [ cnt ] ;
}
static u32 log2size_to_chcr ( struct sh_dmae_chan * sh_chan , int l2size )
{
2011-06-16 05:08:09 +00:00
struct sh_dmae_device * shdev = to_sh_dev ( sh_chan ) ;
2013-08-02 16:18:09 +02:00
const struct sh_dmae_pdata * pdata = shdev - > pdata ;
2010-02-11 16:50:14 +00:00
int i ;
for ( i = 0 ; i < pdata - > ts_shift_num ; i + + )
if ( pdata - > ts_shift [ i ] = = l2size )
break ;
if ( i = = pdata - > ts_shift_num )
i = 0 ;
return ( ( i < < pdata - > ts_low_shift ) & pdata - > ts_low_mask ) |
( ( i < < pdata - > ts_high_shift ) & pdata - > ts_high_mask ) ;
2009-09-07 03:26:23 +00:00
}
2009-12-17 09:41:39 -07:00
static void dmae_set_reg ( struct sh_dmae_chan * sh_chan , struct sh_dmae_regs * hw )
2009-09-07 03:26:23 +00:00
{
2009-12-17 09:41:39 -07:00
sh_dmae_writel ( sh_chan , hw - > sar , SAR ) ;
sh_dmae_writel ( sh_chan , hw - > dar , DAR ) ;
2010-02-03 14:46:41 +00:00
sh_dmae_writel ( sh_chan , hw - > tcr > > sh_chan - > xmit_shift , TCR ) ;
2009-09-07 03:26:23 +00:00
}
static void dmae_start ( struct sh_dmae_chan * sh_chan )
{
2011-06-17 08:20:51 +00:00
struct sh_dmae_device * shdev = to_sh_dev ( sh_chan ) ;
2011-06-17 08:20:40 +00:00
u32 chcr = chcr_read ( sh_chan ) ;
2009-09-07 03:26:23 +00:00
2011-06-17 08:21:05 +00:00
if ( shdev - > pdata - > needs_tend_set )
sh_dmae_writel ( sh_chan , 0xFFFFFFFF , TEND ) ;
2011-06-17 08:20:51 +00:00
chcr | = CHCR_DE | shdev - > chcr_ie_bit ;
2011-06-17 08:20:40 +00:00
chcr_write ( sh_chan , chcr & ~ CHCR_TE ) ;
2009-09-07 03:26:23 +00:00
}
2010-02-03 14:46:41 +00:00
static void dmae_init ( struct sh_dmae_chan * sh_chan )
{
2010-02-11 16:50:14 +00:00
/*
* Default configuration for dual address memory - memory transfer .
*/
2014-06-20 14:37:41 +02:00
u32 chcr = DM_INC | SM_INC | RS_AUTO | log2size_to_chcr ( sh_chan ,
2010-02-11 16:50:14 +00:00
LOG2_DEFAULT_XFER_SIZE ) ;
sh_chan - > xmit_shift = calc_xmit_shift ( sh_chan , chcr ) ;
2011-06-17 08:20:40 +00:00
chcr_write ( sh_chan , chcr ) ;
2010-02-03 14:46:41 +00:00
}
2009-09-07 03:26:23 +00:00
static int dmae_set_chcr ( struct sh_dmae_chan * sh_chan , u32 val )
{
2011-04-29 17:09:21 +00:00
/* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
2010-01-19 07:24:55 +00:00
if ( dmae_is_busy ( sh_chan ) )
return - EBUSY ;
2009-09-07 03:26:23 +00:00
2010-02-11 16:50:14 +00:00
sh_chan - > xmit_shift = calc_xmit_shift ( sh_chan , val ) ;
2011-06-17 08:20:40 +00:00
chcr_write ( sh_chan , val ) ;
2010-02-03 14:46:41 +00:00
2009-09-07 03:26:23 +00:00
return 0 ;
}
static int dmae_set_dmars ( struct sh_dmae_chan * sh_chan , u16 val )
{
2011-06-16 05:08:09 +00:00
struct sh_dmae_device * shdev = to_sh_dev ( sh_chan ) ;
2013-08-02 16:18:09 +02:00
const struct sh_dmae_pdata * pdata = shdev - > pdata ;
2012-05-09 17:09:21 +02:00
const struct sh_dmae_channel * chan_pdata = & pdata - > channel [ sh_chan - > shdma_chan . id ] ;
2013-07-02 17:46:01 +02:00
void __iomem * addr = shdev - > dmars ;
2011-06-16 05:08:28 +00:00
unsigned int shift = chan_pdata - > dmars_bit ;
2010-01-19 07:24:55 +00:00
if ( dmae_is_busy ( sh_chan ) )
return - EBUSY ;
2009-09-07 03:26:23 +00:00
2011-06-17 08:21:05 +00:00
if ( pdata - > no_dmars )
return 0 ;
2011-05-24 10:31:12 +00:00
/* in the case of a missing DMARS resource use first memory window */
if ( ! addr )
2013-07-02 17:46:01 +02:00
addr = shdev - > chan_reg ;
addr + = chan_pdata - > dmars ;
2011-05-24 10:31:12 +00:00
2010-02-11 16:50:10 +00:00
__raw_writew ( ( __raw_readw ( addr ) & ( 0xff00 > > shift ) ) | ( val < < shift ) ,
addr ) ;
2009-09-07 03:26:23 +00:00
return 0 ;
}
2012-05-09 17:09:21 +02:00
static void sh_dmae_start_xfer ( struct shdma_chan * schan ,
struct shdma_desc * sdesc )
2009-09-07 03:26:23 +00:00
{
2012-05-09 17:09:21 +02:00
struct sh_dmae_chan * sh_chan = container_of ( schan , struct sh_dmae_chan ,
shdma_chan ) ;
struct sh_dmae_desc * sh_desc = container_of ( sdesc ,
struct sh_dmae_desc , shdma_desc ) ;
dev_dbg ( sh_chan - > shdma_chan . dev , " Queue #%d to %d: %u@%x -> %x \n " ,
sdesc - > async_tx . cookie , sh_chan - > shdma_chan . id ,
sh_desc - > hw . tcr , sh_desc - > hw . sar , sh_desc - > hw . dar ) ;
/* Get the ld start address from ld_queue */
dmae_set_reg ( sh_chan , & sh_desc - > hw ) ;
dmae_start ( sh_chan ) ;
2009-09-07 03:26:23 +00:00
}
2012-05-09 17:09:21 +02:00
static bool sh_dmae_channel_busy ( struct shdma_chan * schan )
2009-09-07 03:26:23 +00:00
{
2012-05-09 17:09:21 +02:00
struct sh_dmae_chan * sh_chan = container_of ( schan , struct sh_dmae_chan ,
shdma_chan ) ;
return dmae_is_busy ( sh_chan ) ;
2009-09-07 03:26:23 +00:00
}
2012-05-09 17:09:21 +02:00
static void sh_dmae_setup_xfer ( struct shdma_chan * schan ,
2012-07-05 12:29:41 +02:00
int slave_id )
2010-02-03 14:46:41 +00:00
{
2012-05-09 17:09:21 +02:00
struct sh_dmae_chan * sh_chan = container_of ( schan , struct sh_dmae_chan ,
shdma_chan ) ;
2010-02-03 14:46:41 +00:00
2012-07-05 12:29:41 +02:00
if ( slave_id > = 0 ) {
2012-05-09 17:09:21 +02:00
const struct sh_dmae_slave_config * cfg =
2012-07-05 12:29:40 +02:00
sh_chan - > config ;
2010-02-03 14:46:41 +00:00
2012-05-09 17:09:21 +02:00
dmae_set_dmars ( sh_chan , cfg - > mid_rid ) ;
dmae_set_chcr ( sh_chan , cfg - > chcr ) ;
2010-01-19 07:24:55 +00:00
} else {
2012-05-09 17:09:21 +02:00
dmae_init ( sh_chan ) ;
2010-01-19 07:24:55 +00:00
}
}
2013-06-18 18:16:57 +02:00
/*
* Find a slave channel configuration from the contoller list by either a slave
* ID in the non - DT case , or by a MID / RID value in the DT case
*/
2012-05-09 17:09:21 +02:00
static const struct sh_dmae_slave_config * dmae_find_slave (
2013-06-18 18:16:57 +02:00
struct sh_dmae_chan * sh_chan , int match )
2010-01-19 07:24:55 +00:00
{
2012-05-09 17:09:21 +02:00
struct sh_dmae_device * shdev = to_sh_dev ( sh_chan ) ;
2013-08-02 16:18:09 +02:00
const struct sh_dmae_pdata * pdata = shdev - > pdata ;
2012-05-09 17:09:21 +02:00
const struct sh_dmae_slave_config * cfg ;
2010-01-19 07:24:55 +00:00
int i ;
2013-06-18 18:16:57 +02:00
if ( ! sh_chan - > shdma_chan . dev - > of_node ) {
if ( match > = SH_DMA_SLAVE_NUMBER )
return NULL ;
2010-01-19 07:24:55 +00:00
2013-06-18 18:16:57 +02:00
for ( i = 0 , cfg = pdata - > slave ; i < pdata - > slave_num ; i + + , cfg + + )
if ( cfg - > slave_id = = match )
return cfg ;
} else {
for ( i = 0 , cfg = pdata - > slave ; i < pdata - > slave_num ; i + + , cfg + + )
if ( cfg - > mid_rid = = match ) {
2013-08-02 16:50:36 +02:00
sh_chan - > shdma_chan . slave_id = i ;
2013-06-18 18:16:57 +02:00
return cfg ;
}
}
2010-01-19 07:24:55 +00:00
return NULL ;
}
2012-05-09 17:09:21 +02:00
static int sh_dmae_set_slave ( struct shdma_chan * schan ,
2013-08-02 16:50:36 +02:00
int slave_id , dma_addr_t slave_addr , bool try )
2010-01-19 07:24:55 +00:00
{
2012-05-09 17:09:21 +02:00
struct sh_dmae_chan * sh_chan = container_of ( schan , struct sh_dmae_chan ,
shdma_chan ) ;
2012-07-05 12:29:41 +02:00
const struct sh_dmae_slave_config * cfg = dmae_find_slave ( sh_chan , slave_id ) ;
2012-05-09 17:09:21 +02:00
if ( ! cfg )
2012-11-28 06:49:47 +00:00
return - ENXIO ;
2010-02-18 16:30:02 +00:00
2013-08-02 16:50:36 +02:00
if ( ! try ) {
2012-07-05 12:29:42 +02:00
sh_chan - > config = cfg ;
2013-08-02 16:50:36 +02:00
sh_chan - > slave_addr = slave_addr ? : cfg - > addr ;
}
2010-03-26 16:44:01 -07:00
return 0 ;
2010-02-03 14:46:41 +00:00
}
2012-05-09 17:09:21 +02:00
static void dmae_halt ( struct sh_dmae_chan * sh_chan )
2009-09-07 03:26:23 +00:00
{
2012-05-09 17:09:21 +02:00
struct sh_dmae_device * shdev = to_sh_dev ( sh_chan ) ;
u32 chcr = chcr_read ( sh_chan ) ;
2009-12-17 09:41:39 -07:00
2012-05-09 17:09:21 +02:00
chcr & = ~ ( CHCR_DE | CHCR_TE | shdev - > chcr_ie_bit ) ;
chcr_write ( sh_chan , chcr ) ;
2009-12-17 09:41:39 -07:00
}
2012-05-09 17:09:21 +02:00
static int sh_dmae_desc_setup ( struct shdma_chan * schan ,
struct shdma_desc * sdesc ,
dma_addr_t src , dma_addr_t dst , size_t * len )
2009-12-17 09:41:39 -07:00
{
2012-05-09 17:09:21 +02:00
struct sh_dmae_desc * sh_desc = container_of ( sdesc ,
struct sh_dmae_desc , shdma_desc ) ;
2009-09-07 03:26:23 +00:00
2012-05-09 17:09:21 +02:00
if ( * len > schan - > max_xfer_len )
* len = schan - > max_xfer_len ;
2009-09-07 03:26:23 +00:00
2012-05-09 17:09:21 +02:00
sh_desc - > hw . sar = src ;
sh_desc - > hw . dar = dst ;
sh_desc - > hw . tcr = * len ;
2009-09-07 03:26:23 +00:00
2012-05-09 17:09:21 +02:00
return 0 ;
2009-09-07 03:26:23 +00:00
}
2012-05-09 17:09:21 +02:00
static void sh_dmae_halt ( struct shdma_chan * schan )
2009-09-07 03:26:23 +00:00
{
2012-05-09 17:09:21 +02:00
struct sh_dmae_chan * sh_chan = container_of ( schan , struct sh_dmae_chan ,
shdma_chan ) ;
dmae_halt ( sh_chan ) ;
2009-09-07 03:26:23 +00:00
}
2012-05-09 17:09:21 +02:00
static bool sh_dmae_chan_irq ( struct shdma_chan * schan , int irq )
2009-09-07 03:26:23 +00:00
{
2012-05-09 17:09:21 +02:00
struct sh_dmae_chan * sh_chan = container_of ( schan , struct sh_dmae_chan ,
shdma_chan ) ;
2009-09-07 03:26:23 +00:00
2012-05-09 17:09:21 +02:00
if ( ! ( chcr_read ( sh_chan ) & CHCR_TE ) )
return false ;
2009-09-07 03:26:23 +00:00
2012-05-09 17:09:21 +02:00
/* DMA stop */
dmae_halt ( sh_chan ) ;
2011-04-29 17:09:21 +00:00
2012-05-09 17:09:21 +02:00
return true ;
2009-09-07 03:26:23 +00:00
}
2012-07-30 21:28:27 +02:00
static size_t sh_dmae_get_partial ( struct shdma_chan * schan ,
struct shdma_desc * sdesc )
{
struct sh_dmae_chan * sh_chan = container_of ( schan , struct sh_dmae_chan ,
shdma_chan ) ;
struct sh_dmae_desc * sh_desc = container_of ( sdesc ,
struct sh_dmae_desc , shdma_desc ) ;
2013-07-23 23:12:41 -07:00
return sh_desc - > hw . tcr -
( sh_dmae_readl ( sh_chan , TCR ) < < sh_chan - > xmit_shift ) ;
2012-07-30 21:28:27 +02:00
}
2011-04-29 17:09:21 +00:00
/* Called from error IRQ or NMI */
static bool sh_dmae_reset ( struct sh_dmae_device * shdev )
2009-09-07 03:26:23 +00:00
{
2012-05-09 17:09:21 +02:00
bool ret ;
2009-09-07 03:26:23 +00:00
2010-02-11 16:50:05 +00:00
/* halt the dma controller */
2010-02-11 16:50:10 +00:00
sh_dmae_ctl_stop ( shdev ) ;
2010-02-11 16:50:05 +00:00
/* We cannot detect, which channel caused the error, have to reset all */
2012-05-09 17:09:21 +02:00
ret = shdma_reset ( & shdev - > shdma_dev ) ;
2010-12-17 19:16:10 +09:00
2010-02-11 16:50:10 +00:00
sh_dmae_rst ( shdev ) ;
2010-02-11 16:50:05 +00:00
2012-05-09 17:09:21 +02:00
return ret ;
2010-12-17 19:16:10 +09:00
}
2015-04-11 00:27:58 +02:00
# if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
2010-12-17 19:16:10 +09:00
static irqreturn_t sh_dmae_err ( int irq , void * data )
{
2011-02-09 07:46:47 +00:00
struct sh_dmae_device * shdev = data ;
2011-04-29 17:09:21 +00:00
if ( ! ( dmaor_read ( shdev ) & DMAOR_AE ) )
2011-02-09 07:46:47 +00:00
return IRQ_NONE ;
2011-04-29 17:09:21 +00:00
2012-05-09 17:09:21 +02:00
sh_dmae_reset ( shdev ) ;
2011-04-29 17:09:21 +00:00
return IRQ_HANDLED ;
2009-09-07 03:26:23 +00:00
}
2013-12-11 13:43:05 +01:00
# endif
2009-09-07 03:26:23 +00:00
2012-05-09 17:09:21 +02:00
static bool sh_dmae_desc_completed ( struct shdma_chan * schan ,
struct shdma_desc * sdesc )
2009-09-07 03:26:23 +00:00
{
2012-05-09 17:09:21 +02:00
struct sh_dmae_chan * sh_chan = container_of ( schan ,
struct sh_dmae_chan , shdma_chan ) ;
struct sh_dmae_desc * sh_desc = container_of ( sdesc ,
struct sh_dmae_desc , shdma_desc ) ;
2009-09-07 03:26:23 +00:00
u32 sar_buf = sh_dmae_readl ( sh_chan , SAR ) ;
2010-02-03 14:46:41 +00:00
u32 dar_buf = sh_dmae_readl ( sh_chan , DAR ) ;
2009-12-10 18:35:07 +01:00
2012-05-09 17:09:21 +02:00
return ( sdesc - > direction = = DMA_DEV_TO_MEM & &
( sh_desc - > hw . dar + sh_desc - > hw . tcr ) = = dar_buf ) | |
( sdesc - > direction ! = DMA_DEV_TO_MEM & &
( sh_desc - > hw . sar + sh_desc - > hw . tcr ) = = sar_buf ) ;
2009-09-07 03:26:23 +00:00
}
2010-12-17 19:16:10 +09:00
static bool sh_dmae_nmi_notify ( struct sh_dmae_device * shdev )
{
/* Fast path out if NMIF is not asserted for this controller */
if ( ( dmaor_read ( shdev ) & DMAOR_NMIF ) = = 0 )
return false ;
2011-04-29 17:09:21 +00:00
return sh_dmae_reset ( shdev ) ;
2010-12-17 19:16:10 +09:00
}
static int sh_dmae_nmi_handler ( struct notifier_block * self ,
unsigned long cmd , void * data )
{
struct sh_dmae_device * shdev ;
int ret = NOTIFY_DONE ;
bool triggered ;
/*
* Only concern ourselves with NMI events .
*
* Normally we would check the die chain value , but as this needs
* to be architecture independent , check for NMI context instead .
*/
if ( ! in_nmi ( ) )
return NOTIFY_DONE ;
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( shdev , & sh_dmae_devices , node ) {
/*
* Only stop if one of the controllers has NMIF asserted ,
* we do not want to interfere with regular address error
* handling or NMI events that don ' t concern the DMACs .
*/
triggered = sh_dmae_nmi_notify ( shdev ) ;
if ( triggered = = true )
ret = NOTIFY_OK ;
}
rcu_read_unlock ( ) ;
return ret ;
}
static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
. notifier_call = sh_dmae_nmi_handler ,
/* Run before NMI debug handler and KGDB */
. priority = 1 ,
} ;
2012-11-19 13:22:55 -05:00
static int sh_dmae_chan_probe ( struct sh_dmae_device * shdev , int id ,
2010-02-11 16:50:10 +00:00
int irq , unsigned long flags )
2009-09-07 03:26:23 +00:00
{
2010-04-21 15:36:49 +00:00
const struct sh_dmae_channel * chan_pdata = & shdev - > pdata - > channel [ id ] ;
2012-05-09 17:09:21 +02:00
struct shdma_dev * sdev = & shdev - > shdma_dev ;
struct platform_device * pdev = to_platform_device ( sdev - > dma_dev . dev ) ;
struct sh_dmae_chan * sh_chan ;
struct shdma_chan * schan ;
int err ;
2009-09-07 03:26:23 +00:00
2013-07-02 17:45:55 +02:00
sh_chan = devm_kzalloc ( sdev - > dma_dev . dev , sizeof ( struct sh_dmae_chan ) ,
GFP_KERNEL ) ;
2016-06-07 18:38:41 +01:00
if ( ! sh_chan )
2009-09-07 03:26:23 +00:00
return - ENOMEM ;
2012-05-09 17:09:21 +02:00
schan = & sh_chan - > shdma_chan ;
schan - > max_xfer_len = SH_DMA_TCR_MAX + 1 ;
2010-02-11 16:50:14 +00:00
2012-05-09 17:09:21 +02:00
shdma_chan_probe ( sdev , schan , id ) ;
2009-09-07 03:26:23 +00:00
2013-07-02 17:46:01 +02:00
sh_chan - > base = shdev - > chan_reg + chan_pdata - > offset ;
2009-09-07 03:26:23 +00:00
2012-05-09 17:09:21 +02:00
/* set up channel irq */
2010-02-11 16:50:10 +00:00
if ( pdev - > id > = 0 )
2012-05-09 17:09:21 +02:00
snprintf ( sh_chan - > dev_id , sizeof ( sh_chan - > dev_id ) ,
" sh-dmae%d.%d " , pdev - > id , id ) ;
2010-02-11 16:50:10 +00:00
else
2012-05-09 17:09:21 +02:00
snprintf ( sh_chan - > dev_id , sizeof ( sh_chan - > dev_id ) ,
" sh-dma%d " , id ) ;
2009-09-07 03:26:23 +00:00
2012-05-09 17:09:21 +02:00
err = shdma_request_irq ( schan , irq , flags , sh_chan - > dev_id ) ;
2009-09-07 03:26:23 +00:00
if ( err ) {
2012-05-09 17:09:21 +02:00
dev_err ( sdev - > dma_dev . dev ,
" DMA channel %d request_irq error %d \n " ,
id , err ) ;
2009-09-07 03:26:23 +00:00
goto err_no_irq ;
}
2012-05-09 17:09:21 +02:00
shdev - > chan [ id ] = sh_chan ;
2009-09-07 03:26:23 +00:00
return 0 ;
err_no_irq :
/* remove from dmaengine device node */
2012-05-09 17:09:21 +02:00
shdma_chan_remove ( schan ) ;
2009-09-07 03:26:23 +00:00
return err ;
}
static void sh_dmae_chan_remove ( struct sh_dmae_device * shdev )
{
2012-05-09 17:09:21 +02:00
struct shdma_chan * schan ;
2009-09-07 03:26:23 +00:00
int i ;
2012-05-09 17:09:21 +02:00
shdma_for_each_chan ( schan , & shdev - > shdma_dev , i ) {
BUG_ON ( ! schan ) ;
2010-02-11 16:50:10 +00:00
2012-05-09 17:09:21 +02:00
shdma_chan_remove ( schan ) ;
}
}
2015-01-21 00:09:46 +03:00
# ifdef CONFIG_PM
2012-05-09 17:09:21 +02:00
static int sh_dmae_runtime_suspend ( struct device * dev )
{
2015-02-26 11:26:34 +01:00
struct sh_dmae_device * shdev = dev_get_drvdata ( dev ) ;
sh_dmae_ctl_stop ( shdev ) ;
2012-05-09 17:09:21 +02:00
return 0 ;
}
static int sh_dmae_runtime_resume ( struct device * dev )
{
struct sh_dmae_device * shdev = dev_get_drvdata ( dev ) ;
return sh_dmae_rst ( shdev ) ;
}
2015-01-21 00:13:21 +03:00
# endif
2012-05-09 17:09:21 +02:00
2015-01-21 00:13:21 +03:00
# ifdef CONFIG_PM_SLEEP
2012-05-09 17:09:21 +02:00
static int sh_dmae_suspend ( struct device * dev )
{
2015-02-26 11:26:34 +01:00
struct sh_dmae_device * shdev = dev_get_drvdata ( dev ) ;
sh_dmae_ctl_stop ( shdev ) ;
2012-05-09 17:09:21 +02:00
return 0 ;
}
static int sh_dmae_resume ( struct device * dev )
{
struct sh_dmae_device * shdev = dev_get_drvdata ( dev ) ;
int i , ret ;
ret = sh_dmae_rst ( shdev ) ;
if ( ret < 0 )
dev_err ( dev , " Failed to reset! \n " ) ;
for ( i = 0 ; i < shdev - > pdata - > channel_num ; i + + ) {
struct sh_dmae_chan * sh_chan = shdev - > chan [ i ] ;
if ( ! sh_chan - > shdma_chan . desc_num )
continue ;
2012-07-05 12:29:41 +02:00
if ( sh_chan - > shdma_chan . slave_id > = 0 ) {
2012-07-05 12:29:40 +02:00
const struct sh_dmae_slave_config * cfg = sh_chan - > config ;
2012-05-09 17:09:21 +02:00
dmae_set_dmars ( sh_chan , cfg - > mid_rid ) ;
dmae_set_chcr ( sh_chan , cfg - > chcr ) ;
} else {
dmae_init ( sh_chan ) ;
2009-09-07 03:26:23 +00:00
}
}
2012-05-09 17:09:21 +02:00
return 0 ;
2009-09-07 03:26:23 +00:00
}
2012-05-09 17:09:21 +02:00
# endif
2009-09-07 03:26:23 +00:00
2013-12-11 13:43:06 +01:00
static const struct dev_pm_ops sh_dmae_pm = {
2015-01-21 00:13:21 +03:00
SET_SYSTEM_SLEEP_PM_OPS ( sh_dmae_suspend , sh_dmae_resume )
2015-01-21 00:09:46 +03:00
SET_RUNTIME_PM_OPS ( sh_dmae_runtime_suspend , sh_dmae_runtime_resume ,
NULL )
2012-05-09 17:09:21 +02:00
} ;
static dma_addr_t sh_dmae_slave_addr ( struct shdma_chan * schan )
{
2012-07-05 12:29:40 +02:00
struct sh_dmae_chan * sh_chan = container_of ( schan ,
struct sh_dmae_chan , shdma_chan ) ;
2012-05-09 17:09:21 +02:00
/*
2012-07-05 12:29:40 +02:00
* Implicit BUG_ON ( ! sh_chan - > config )
* This is an exclusive slave DMA operation , may only be called after a
* successful slave configuration .
2012-05-09 17:09:21 +02:00
*/
2013-08-02 16:50:36 +02:00
return sh_chan - > slave_addr ;
2012-05-09 17:09:21 +02:00
}
static struct shdma_desc * sh_dmae_embedded_desc ( void * buf , int i )
{
return & ( ( struct sh_dmae_desc * ) buf ) [ i ] . shdma_desc ;
}
static const struct shdma_ops sh_dmae_shdma_ops = {
. desc_completed = sh_dmae_desc_completed ,
. halt_channel = sh_dmae_halt ,
. channel_busy = sh_dmae_channel_busy ,
. slave_addr = sh_dmae_slave_addr ,
. desc_setup = sh_dmae_desc_setup ,
. set_slave = sh_dmae_set_slave ,
. setup_xfer = sh_dmae_setup_xfer ,
. start_xfer = sh_dmae_start_xfer ,
. embedded_desc = sh_dmae_embedded_desc ,
. chan_irq = sh_dmae_chan_irq ,
2012-07-30 21:28:27 +02:00
. get_partial = sh_dmae_get_partial ,
2012-05-09 17:09:21 +02:00
} ;
2013-08-02 16:50:36 +02:00
static const struct of_device_id sh_dmae_of_match [ ] = {
2013-08-02 16:50:39 +02:00
{ . compatible = " renesas,shdma-r8a73a4 " , . data = r8a73a4_shdma_devid , } ,
2013-08-02 16:50:36 +02:00
{ }
} ;
MODULE_DEVICE_TABLE ( of , sh_dmae_of_match ) ;
2012-11-19 13:22:55 -05:00
static int sh_dmae_probe ( struct platform_device * pdev )
2009-09-07 03:26:23 +00:00
{
2015-01-22 02:32:51 +00:00
const enum dma_slave_buswidth widths =
DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES |
DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES ;
2013-08-02 16:50:36 +02:00
const struct sh_dmae_pdata * pdata ;
2013-12-11 13:43:05 +01:00
unsigned long chan_flag [ SH_DMAE_MAX_CHANNELS ] = { } ;
int chan_irq [ SH_DMAE_MAX_CHANNELS ] ;
2015-04-11 00:27:58 +02:00
# if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
2013-12-11 13:43:05 +01:00
unsigned long irqflags = 0 ;
int errirq ;
# endif
2011-05-24 10:31:20 +00:00
int err , i , irq_cnt = 0 , irqres = 0 , irq_cap = 0 ;
2009-09-07 03:26:23 +00:00
struct sh_dmae_device * shdev ;
2012-05-09 17:09:21 +02:00
struct dma_device * dma_dev ;
2010-02-11 16:50:10 +00:00
struct resource * chan , * dmars , * errirq_res , * chanirq_res ;
2009-09-07 03:26:23 +00:00
2013-08-02 16:50:36 +02:00
if ( pdev - > dev . of_node )
2016-03-01 17:37:07 +01:00
pdata = of_device_get_match_data ( & pdev - > dev ) ;
2013-08-02 16:50:36 +02:00
else
2013-09-02 17:42:35 +05:30
pdata = dev_get_platdata ( & pdev - > dev ) ;
2013-08-02 16:50:36 +02:00
2009-11-22 12:10:10 -07:00
/* get platform data */
2010-02-11 16:50:10 +00:00
if ( ! pdata | | ! pdata - > channel_num )
2009-11-22 12:10:10 -07:00
return - ENODEV ;
2010-02-11 16:50:10 +00:00
chan = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
2011-05-24 10:31:12 +00:00
/* DMARS area is optional */
2010-02-11 16:50:10 +00:00
dmars = platform_get_resource ( pdev , IORESOURCE_MEM , 1 ) ;
/*
* IRQ resources :
* 1. there always must be at least one IRQ IO - resource . On SH4 it is
* the error IRQ , in which case it is the only IRQ in this resource :
* start = = end . If it is the only IRQ resource , all channels also
* use the same IRQ .
* 2. DMA channel IRQ resources can be specified one per resource or in
* ranges ( start ! = end )
* 3. iff all events ( channels and , optionally , error ) on this
* controller use the same IRQ , only one IRQ resource can be
* specified , otherwise there must be one IRQ per channel , even if
* some of them are equal
* 4. if all IRQs on this controller are equal or if some specific IRQs
* specify IORESOURCE_IRQ_SHAREABLE in their resources , they will be
* requested with the IRQF_SHARED flag
*/
errirq_res = platform_get_resource ( pdev , IORESOURCE_IRQ , 0 ) ;
if ( ! chan | | ! errirq_res )
return - ENODEV ;
2013-07-02 17:45:55 +02:00
shdev = devm_kzalloc ( & pdev - > dev , sizeof ( struct sh_dmae_device ) ,
GFP_KERNEL ) ;
2016-06-07 18:38:41 +01:00
if ( ! shdev )
2013-07-02 17:45:55 +02:00
return - ENOMEM ;
2010-02-11 16:50:10 +00:00
2012-05-09 17:09:21 +02:00
dma_dev = & shdev - > shdma_dev . dma_dev ;
2013-07-02 17:45:55 +02:00
shdev - > chan_reg = devm_ioremap_resource ( & pdev - > dev , chan ) ;
if ( IS_ERR ( shdev - > chan_reg ) )
return PTR_ERR ( shdev - > chan_reg ) ;
2010-02-11 16:50:10 +00:00
if ( dmars ) {
2013-07-02 17:45:55 +02:00
shdev - > dmars = devm_ioremap_resource ( & pdev - > dev , dmars ) ;
if ( IS_ERR ( shdev - > dmars ) )
return PTR_ERR ( shdev - > dmars ) ;
2009-09-07 03:26:23 +00:00
}
2015-01-22 02:32:51 +00:00
dma_dev - > src_addr_widths = widths ;
dma_dev - > dst_addr_widths = widths ;
dma_dev - > directions = BIT ( DMA_MEM_TO_DEV ) | BIT ( DMA_DEV_TO_MEM ) ;
dma_dev - > residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR ;
2012-05-09 17:09:21 +02:00
if ( ! pdata - > slave_only )
dma_cap_set ( DMA_MEMCPY , dma_dev - > cap_mask ) ;
if ( pdata - > slave & & pdata - > slave_num )
dma_cap_set ( DMA_SLAVE , dma_dev - > cap_mask ) ;
/* Default transfer size of 32 bytes requires 32-byte alignment */
dma_dev - > copy_align = LOG2_DEFAULT_XFER_SIZE ;
shdev - > shdma_dev . ops = & sh_dmae_shdma_ops ;
shdev - > shdma_dev . desc_size = sizeof ( struct sh_dmae_desc ) ;
err = shdma_init ( & pdev - > dev , & shdev - > shdma_dev ,
pdata - > channel_num ) ;
if ( err < 0 )
goto eshdma ;
2009-09-07 03:26:23 +00:00
/* platform data */
2013-06-06 17:37:13 +02:00
shdev - > pdata = pdata ;
2009-09-07 03:26:23 +00:00
2011-06-17 08:20:40 +00:00
if ( pdata - > chcr_offset )
shdev - > chcr_offset = pdata - > chcr_offset ;
else
shdev - > chcr_offset = CHCR ;
2011-06-17 08:20:51 +00:00
if ( pdata - > chcr_ie_bit )
shdev - > chcr_ie_bit = pdata - > chcr_ie_bit ;
else
shdev - > chcr_ie_bit = CHCR_IE ;
2011-05-31 15:53:03 +09:00
platform_set_drvdata ( pdev , shdev ) ;
2010-02-11 16:50:18 +00:00
pm_runtime_enable ( & pdev - > dev ) ;
2012-05-09 17:09:21 +02:00
err = pm_runtime_get_sync ( & pdev - > dev ) ;
if ( err < 0 )
dev_err ( & pdev - > dev , " %s(): GET = %d \n " , __func__ , err ) ;
2010-02-11 16:50:18 +00:00
2011-05-02 07:59:02 +00:00
spin_lock_irq ( & sh_dmae_lock ) ;
2010-12-17 19:16:10 +09:00
list_add_tail_rcu ( & shdev - > node , & sh_dmae_devices ) ;
2011-05-02 07:59:02 +00:00
spin_unlock_irq ( & sh_dmae_lock ) ;
2010-12-17 19:16:10 +09:00
2011-04-29 17:09:21 +00:00
/* reset dma controller - only needed as a test */
2010-02-11 16:50:10 +00:00
err = sh_dmae_rst ( shdev ) ;
2009-09-07 03:26:23 +00:00
if ( err )
goto rst_err ;
2010-03-19 04:47:19 +00:00
# if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
2010-02-11 16:50:10 +00:00
chanirq_res = platform_get_resource ( pdev , IORESOURCE_IRQ , 1 ) ;
if ( ! chanirq_res )
chanirq_res = errirq_res ;
else
irqres + + ;
if ( chanirq_res = = errirq_res | |
( errirq_res - > flags & IORESOURCE_BITS ) = = IORESOURCE_IRQ_SHAREABLE )
2009-09-07 03:26:23 +00:00
irqflags = IRQF_SHARED ;
2010-02-11 16:50:10 +00:00
errirq = errirq_res - > start ;
2013-07-02 17:45:55 +02:00
err = devm_request_irq ( & pdev - > dev , errirq , sh_dmae_err , irqflags ,
" DMAC Address Error " , shdev ) ;
2010-02-11 16:50:10 +00:00
if ( err ) {
dev_err ( & pdev - > dev ,
" DMA failed requesting irq #%d, error %d \n " ,
errirq , err ) ;
goto eirq_err ;
2009-09-07 03:26:23 +00:00
}
2010-02-11 16:50:10 +00:00
# else
chanirq_res = errirq_res ;
2010-03-19 04:47:19 +00:00
# endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
2010-02-11 16:50:10 +00:00
if ( chanirq_res - > start = = chanirq_res - > end & &
! platform_get_resource ( pdev , IORESOURCE_IRQ , 1 ) ) {
/* Special case - all multiplexed */
for ( ; irq_cnt < pdata - > channel_num ; irq_cnt + + ) {
2012-05-09 17:09:21 +02:00
if ( irq_cnt < SH_DMAE_MAX_CHANNELS ) {
2011-05-24 10:31:20 +00:00
chan_irq [ irq_cnt ] = chanirq_res - > start ;
chan_flag [ irq_cnt ] = IRQF_SHARED ;
} else {
irq_cap = 1 ;
break ;
}
2009-09-07 03:26:23 +00:00
}
2010-02-11 16:50:10 +00:00
} else {
do {
for ( i = chanirq_res - > start ; i < = chanirq_res - > end ; i + + ) {
2012-05-09 17:09:21 +02:00
if ( irq_cnt > = SH_DMAE_MAX_CHANNELS ) {
2011-06-09 06:35:08 +00:00
irq_cap = 1 ;
break ;
}
2010-02-11 16:50:10 +00:00
if ( ( errirq_res - > flags & IORESOURCE_BITS ) = =
IORESOURCE_IRQ_SHAREABLE )
chan_flag [ irq_cnt ] = IRQF_SHARED ;
else
2013-10-13 07:10:51 +02:00
chan_flag [ irq_cnt ] = 0 ;
2010-02-11 16:50:10 +00:00
dev_dbg ( & pdev - > dev ,
" Found IRQ %d for channel %d \n " ,
i , irq_cnt ) ;
chan_irq [ irq_cnt + + ] = i ;
2011-05-24 10:31:20 +00:00
}
2012-05-09 17:09:21 +02:00
if ( irq_cnt > = SH_DMAE_MAX_CHANNELS )
2011-05-24 10:31:20 +00:00
break ;
2011-06-09 06:35:08 +00:00
2010-02-11 16:50:10 +00:00
chanirq_res = platform_get_resource ( pdev ,
IORESOURCE_IRQ , + + irqres ) ;
} while ( irq_cnt < pdata - > channel_num & & chanirq_res ) ;
2009-09-07 03:26:23 +00:00
}
2010-02-11 16:50:10 +00:00
2009-09-07 03:26:23 +00:00
/* Create DMA Channel */
2011-05-24 10:31:20 +00:00
for ( i = 0 ; i < irq_cnt ; i + + ) {
2010-02-11 16:50:10 +00:00
err = sh_dmae_chan_probe ( shdev , i , chan_irq [ i ] , chan_flag [ i ] ) ;
2009-09-07 03:26:23 +00:00
if ( err )
goto chan_probe_err ;
}
2011-05-24 10:31:20 +00:00
if ( irq_cap )
dev_notice ( & pdev - > dev , " Attempting to register %d DMA "
" channels when a maximum of %d are supported. \n " ,
2012-05-09 17:09:21 +02:00
pdata - > channel_num , SH_DMAE_MAX_CHANNELS ) ;
2011-05-24 10:31:20 +00:00
2010-02-11 16:50:18 +00:00
pm_runtime_put ( & pdev - > dev ) ;
2012-05-09 17:09:21 +02:00
err = dma_async_device_register ( & shdev - > shdma_dev . dma_dev ) ;
if ( err < 0 )
goto edmadevreg ;
2009-09-07 03:26:23 +00:00
return err ;
2012-05-09 17:09:21 +02:00
edmadevreg :
pm_runtime_get ( & pdev - > dev ) ;
2009-09-07 03:26:23 +00:00
chan_probe_err :
sh_dmae_chan_remove ( shdev ) ;
2011-05-24 10:31:20 +00:00
2010-03-19 04:47:19 +00:00
# if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
2009-09-07 03:26:23 +00:00
eirq_err :
2010-02-11 16:50:10 +00:00
# endif
2009-09-07 03:26:23 +00:00
rst_err :
2011-05-02 07:59:02 +00:00
spin_lock_irq ( & sh_dmae_lock ) ;
2010-12-17 19:16:10 +09:00
list_del_rcu ( & shdev - > node ) ;
2011-05-02 07:59:02 +00:00
spin_unlock_irq ( & sh_dmae_lock ) ;
2010-12-17 19:16:10 +09:00
2010-02-11 16:50:18 +00:00
pm_runtime_put ( & pdev - > dev ) ;
2011-04-29 17:09:25 +00:00
pm_runtime_disable ( & pdev - > dev ) ;
2012-05-09 17:09:21 +02:00
shdma_cleanup ( & shdev - > shdma_dev ) ;
eshdma :
2011-05-02 07:59:02 +00:00
synchronize_rcu ( ) ;
2009-09-07 03:26:23 +00:00
return err ;
}
2012-12-21 15:09:59 -08:00
static int sh_dmae_remove ( struct platform_device * pdev )
2009-09-07 03:26:23 +00:00
{
struct sh_dmae_device * shdev = platform_get_drvdata ( pdev ) ;
2012-05-09 17:09:21 +02:00
struct dma_device * dma_dev = & shdev - > shdma_dev . dma_dev ;
2009-09-07 03:26:23 +00:00
2012-05-09 17:09:21 +02:00
dma_async_device_unregister ( dma_dev ) ;
2009-09-07 03:26:23 +00:00
2011-05-02 07:59:02 +00:00
spin_lock_irq ( & sh_dmae_lock ) ;
2010-12-17 19:16:10 +09:00
list_del_rcu ( & shdev - > node ) ;
2011-05-02 07:59:02 +00:00
spin_unlock_irq ( & sh_dmae_lock ) ;
2010-12-17 19:16:10 +09:00
2010-02-11 16:50:18 +00:00
pm_runtime_disable ( & pdev - > dev ) ;
2012-05-09 17:09:21 +02:00
sh_dmae_chan_remove ( shdev ) ;
shdma_cleanup ( & shdev - > shdma_dev ) ;
2011-05-02 07:59:02 +00:00
synchronize_rcu ( ) ;
2010-02-11 16:50:10 +00:00
2009-09-07 03:26:23 +00:00
return 0 ;
}
static struct platform_driver sh_dmae_driver = {
2015-02-26 11:26:34 +01:00
. driver = {
2011-04-29 17:09:25 +00:00
. pm = & sh_dmae_pm ,
2012-05-09 17:09:21 +02:00
. name = SH_DMAE_DRV_NAME ,
2013-06-18 18:16:57 +02:00
. of_match_table = sh_dmae_of_match ,
2009-09-07 03:26:23 +00:00
} ,
2012-11-19 13:20:04 -05:00
. remove = sh_dmae_remove ,
2009-09-07 03:26:23 +00:00
} ;
static int __init sh_dmae_init ( void )
{
2011-01-06 17:04:50 +00:00
/* Wire up NMI handling */
int err = register_die_notifier ( & sh_dmae_nmi_notifier ) ;
if ( err )
return err ;
2009-09-07 03:26:23 +00:00
return platform_driver_probe ( & sh_dmae_driver , sh_dmae_probe ) ;
}
module_init ( sh_dmae_init ) ;
static void __exit sh_dmae_exit ( void )
{
platform_driver_unregister ( & sh_dmae_driver ) ;
2011-01-06 17:04:50 +00:00
unregister_die_notifier ( & sh_dmae_nmi_notifier ) ;
2009-09-07 03:26:23 +00:00
}
module_exit ( sh_dmae_exit ) ;
MODULE_AUTHOR ( " Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> " ) ;
MODULE_DESCRIPTION ( " Renesas SH DMA Engine driver " ) ;
MODULE_LICENSE ( " GPL " ) ;
2012-05-09 17:09:21 +02:00
MODULE_ALIAS ( " platform: " SH_DMAE_DRV_NAME ) ;