2018-05-22 05:53:32 +03:00
// SPDX-License-Identifier: GPL-2.0+
//
// drivers/dma/imx-dma.c
//
// This file contains a driver for the Freescale i.MX DMA engine
// found on i.MX1/21/27
//
// Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
// Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
2013-01-21 14:09:00 +04:00
# include <linux/err.h>
2010-10-06 12:25:55 +04:00
# include <linux/init.h>
# include <linux/types.h>
# include <linux/mm.h>
# include <linux/interrupt.h>
# include <linux/spinlock.h>
# include <linux/device.h>
# include <linux/dma-mapping.h>
# include <linux/slab.h>
# include <linux/platform_device.h>
2012-03-22 17:54:01 +04:00
# include <linux/clk.h>
2010-10-06 12:25:55 +04:00
# include <linux/dmaengine.h>
2011-08-01 00:14:17 +04:00
# include <linux/module.h>
2013-05-26 13:53:20 +04:00
# include <linux/of_device.h>
# include <linux/of_dma.h>
2010-10-06 12:25:55 +04:00
# include <asm/irq.h>
2012-08-24 17:14:29 +04:00
# include <linux/platform_data/dma-imx.h>
2010-10-06 12:25:55 +04:00
2012-03-07 02:34:26 +04:00
# include "dmaengine.h"
2012-03-02 12:28:47 +04:00
# define IMXDMA_MAX_CHAN_DESCRIPTORS 16
2012-03-22 17:54:01 +04:00
# define IMX_DMA_CHANNELS 16
2012-03-22 17:54:14 +04:00
# define IMX_DMA_2D_SLOTS 2
# define IMX_DMA_2D_SLOT_A 0
# define IMX_DMA_2D_SLOT_B 1
2012-03-22 17:54:01 +04:00
# define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
# define IMX_DMA_MEMSIZE_32 (0 << 4)
# define IMX_DMA_MEMSIZE_8 (1 << 4)
# define IMX_DMA_MEMSIZE_16 (2 << 4)
# define IMX_DMA_TYPE_LINEAR (0 << 10)
# define IMX_DMA_TYPE_2D (1 << 10)
# define IMX_DMA_TYPE_FIFO (2 << 10)
# define IMX_DMA_ERR_BURST (1 << 0)
# define IMX_DMA_ERR_REQUEST (1 << 1)
# define IMX_DMA_ERR_TRANSFER (1 << 2)
# define IMX_DMA_ERR_BUFFER (1 << 3)
# define IMX_DMA_ERR_TIMEOUT (1 << 4)
# define DMA_DCR 0x00 /* Control Register */
# define DMA_DISR 0x04 /* Interrupt status Register */
# define DMA_DIMR 0x08 /* Interrupt mask Register */
# define DMA_DBTOSR 0x0c /* Burst timeout status Register */
# define DMA_DRTOSR 0x10 /* Request timeout Register */
# define DMA_DSESR 0x14 /* Transfer Error Status Register */
# define DMA_DBOSR 0x18 /* Buffer overflow status Register */
# define DMA_DBTOCR 0x1c /* Burst timeout control Register */
# define DMA_WSRA 0x40 /* W-Size Register A */
# define DMA_XSRA 0x44 /* X-Size Register A */
# define DMA_YSRA 0x48 /* Y-Size Register A */
# define DMA_WSRB 0x4c /* W-Size Register B */
# define DMA_XSRB 0x50 /* X-Size Register B */
# define DMA_YSRB 0x54 /* Y-Size Register B */
# define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
# define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
# define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
# define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
# define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
# define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
# define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
# define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
# define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
# define DCR_DRST (1<<1)
# define DCR_DEN (1<<0)
# define DBTOCR_EN (1<<15)
# define DBTOCR_CNT(x) ((x) & 0x7fff)
# define CNTR_CNT(x) ((x) & 0xffffff)
# define CCR_ACRPT (1<<14)
# define CCR_DMOD_LINEAR (0x0 << 12)
# define CCR_DMOD_2D (0x1 << 12)
# define CCR_DMOD_FIFO (0x2 << 12)
# define CCR_DMOD_EOBFIFO (0x3 << 12)
# define CCR_SMOD_LINEAR (0x0 << 10)
# define CCR_SMOD_2D (0x1 << 10)
# define CCR_SMOD_FIFO (0x2 << 10)
# define CCR_SMOD_EOBFIFO (0x3 << 10)
# define CCR_MDIR_DEC (1<<9)
# define CCR_MSEL_B (1<<8)
# define CCR_DSIZ_32 (0x0 << 6)
# define CCR_DSIZ_8 (0x1 << 6)
# define CCR_DSIZ_16 (0x2 << 6)
# define CCR_SSIZ_32 (0x0 << 4)
# define CCR_SSIZ_8 (0x1 << 4)
# define CCR_SSIZ_16 (0x2 << 4)
# define CCR_REN (1<<3)
# define CCR_RPT (1<<2)
# define CCR_FRC (1<<1)
# define CCR_CEN (1<<0)
# define RTOR_EN (1<<15)
# define RTOR_CLK (1<<14)
# define RTOR_PSC (1<<13)
2012-03-02 12:28:47 +04:00
enum imxdma_prep_type {
IMXDMA_DESC_MEMCPY ,
IMXDMA_DESC_INTERLEAVED ,
IMXDMA_DESC_SLAVE_SG ,
IMXDMA_DESC_CYCLIC ,
} ;
2012-03-22 17:54:14 +04:00
struct imx_dma_2d_config {
u16 xsr ;
u16 ysr ;
u16 wsr ;
int count ;
} ;
2012-03-02 12:28:47 +04:00
struct imxdma_desc {
struct list_head node ;
struct dma_async_tx_descriptor desc ;
enum dma_status status ;
dma_addr_t src ;
dma_addr_t dest ;
size_t len ;
2012-03-22 17:54:03 +04:00
enum dma_transfer_direction direction ;
2012-03-02 12:28:47 +04:00
enum imxdma_prep_type type ;
/* For memcpy and interleaved */
unsigned int config_port ;
unsigned int config_mem ;
/* For interleaved transfers */
unsigned int x ;
unsigned int y ;
unsigned int w ;
/* For slave sg and cyclic */
struct scatterlist * sg ;
unsigned int sgcount ;
} ;
2010-10-06 12:25:55 +04:00
struct imxdma_channel {
2012-03-22 17:54:10 +04:00
int hw_chaining ;
struct timer_list watchdog ;
2010-10-06 12:25:55 +04:00
struct imxdma_engine * imxdma ;
unsigned int channel ;
2012-03-02 12:28:47 +04:00
struct tasklet_struct dma_tasklet ;
struct list_head ld_free ;
struct list_head ld_queue ;
struct list_head ld_active ;
int descs_allocated ;
2010-10-06 12:25:55 +04:00
enum dma_slave_buswidth word_size ;
dma_addr_t per_address ;
u32 watermark_level ;
struct dma_chan chan ;
struct dma_async_tx_descriptor desc ;
enum dma_status status ;
int dma_request ;
struct scatterlist * sg_list ;
2012-03-22 17:54:06 +04:00
u32 ccr_from_device ;
u32 ccr_to_device ;
2012-03-22 17:54:14 +04:00
bool enabled_2d ;
int slot_2d ;
2016-07-02 12:55:01 +03:00
unsigned int irq ;
2018-07-19 19:52:26 +03:00
struct dma_slave_config config ;
2010-10-06 12:25:55 +04:00
} ;
2012-09-15 17:11:28 +04:00
enum imx_dma_type {
IMX1_DMA ,
IMX21_DMA ,
IMX27_DMA ,
} ;
2010-10-06 12:25:55 +04:00
struct imxdma_engine {
struct device * dev ;
2011-01-12 15:14:37 +03:00
struct device_dma_parameters dma_parms ;
2010-10-06 12:25:55 +04:00
struct dma_device dma_device ;
2012-03-22 17:54:12 +04:00
void __iomem * base ;
2012-07-03 22:33:29 +04:00
struct clk * dma_ahb ;
struct clk * dma_ipg ;
2012-03-22 17:54:14 +04:00
spinlock_t lock ;
struct imx_dma_2d_config slots_2d [ IMX_DMA_2D_SLOTS ] ;
2012-03-22 17:54:01 +04:00
struct imxdma_channel channel [ IMX_DMA_CHANNELS ] ;
2012-09-15 17:11:28 +04:00
enum imx_dma_type devtype ;
2016-07-02 12:55:01 +03:00
unsigned int irq ;
unsigned int irq_err ;
2010-10-06 12:25:55 +04:00
} ;
2013-05-26 13:53:20 +04:00
struct imxdma_filter_data {
struct imxdma_engine * imxdma ;
int request ;
} ;
2015-05-01 18:57:46 +03:00
static const struct platform_device_id imx_dma_devtype [ ] = {
2012-09-15 17:11:28 +04:00
{
. name = " imx1-dma " ,
. driver_data = IMX1_DMA ,
} , {
. name = " imx21-dma " ,
. driver_data = IMX21_DMA ,
} , {
. name = " imx27-dma " ,
. driver_data = IMX27_DMA ,
} , {
/* sentinel */
}
} ;
MODULE_DEVICE_TABLE ( platform , imx_dma_devtype ) ;
2013-05-26 13:53:20 +04:00
static const struct of_device_id imx_dma_of_dev_id [ ] = {
{
. compatible = " fsl,imx1-dma " ,
. data = & imx_dma_devtype [ IMX1_DMA ] ,
} , {
. compatible = " fsl,imx21-dma " ,
. data = & imx_dma_devtype [ IMX21_DMA ] ,
} , {
. compatible = " fsl,imx27-dma " ,
. data = & imx_dma_devtype [ IMX27_DMA ] ,
} , {
/* sentinel */
}
} ;
MODULE_DEVICE_TABLE ( of , imx_dma_of_dev_id ) ;
2012-09-15 17:11:28 +04:00
static inline int is_imx1_dma ( struct imxdma_engine * imxdma )
{
return imxdma - > devtype = = IMX1_DMA ;
}
static inline int is_imx27_dma ( struct imxdma_engine * imxdma )
{
return imxdma - > devtype = = IMX27_DMA ;
}
2010-10-06 12:25:55 +04:00
static struct imxdma_channel * to_imxdma_chan ( struct dma_chan * chan )
{
return container_of ( chan , struct imxdma_channel , chan ) ;
}
2012-03-02 12:28:47 +04:00
static inline bool imxdma_chan_is_doing_cyclic ( struct imxdma_channel * imxdmac )
2010-10-06 12:25:55 +04:00
{
2012-03-02 12:28:47 +04:00
struct imxdma_desc * desc ;
if ( ! list_empty ( & imxdmac - > ld_active ) ) {
desc = list_first_entry ( & imxdmac - > ld_active , struct imxdma_desc ,
node ) ;
if ( desc - > type = = IMXDMA_DESC_CYCLIC )
return true ;
}
return false ;
2010-10-06 12:25:55 +04:00
}
2012-03-22 17:54:01 +04:00
2012-03-22 17:54:12 +04:00
static void imx_dmav1_writel ( struct imxdma_engine * imxdma , unsigned val ,
unsigned offset )
2012-03-22 17:54:01 +04:00
{
2012-03-22 17:54:12 +04:00
__raw_writel ( val , imxdma - > base + offset ) ;
2012-03-22 17:54:01 +04:00
}
2012-03-22 17:54:12 +04:00
static unsigned imx_dmav1_readl ( struct imxdma_engine * imxdma , unsigned offset )
2010-10-06 12:25:55 +04:00
{
2012-03-22 17:54:12 +04:00
return __raw_readl ( imxdma - > base + offset ) ;
2012-03-22 17:54:01 +04:00
}
2010-10-06 12:25:55 +04:00
2012-03-22 17:54:10 +04:00
static int imxdma_hw_chain ( struct imxdma_channel * imxdmac )
2012-03-22 17:54:01 +04:00
{
2012-09-15 17:11:28 +04:00
struct imxdma_engine * imxdma = imxdmac - > imxdma ;
if ( is_imx27_dma ( imxdma ) )
2012-03-22 17:54:10 +04:00
return imxdmac - > hw_chaining ;
2012-03-22 17:54:01 +04:00
else
return 0 ;
}
/*
* imxdma_sg_next - prepare next chunk for scatter - gather DMA emulation
*/
2019-01-20 09:12:44 +03:00
static inline void imxdma_sg_next ( struct imxdma_desc * d )
2010-10-06 12:25:55 +04:00
{
2012-03-22 17:54:03 +04:00
struct imxdma_channel * imxdmac = to_imxdma_chan ( d - > desc . chan ) ;
2012-03-22 17:54:12 +04:00
struct imxdma_engine * imxdma = imxdmac - > imxdma ;
2012-03-22 17:54:11 +04:00
struct scatterlist * sg = d - > sg ;
2019-01-20 09:12:44 +03:00
size_t now ;
2012-03-22 17:54:01 +04:00
2019-01-10 14:15:35 +03:00
now = min_t ( size_t , d - > len , sg_dma_len ( sg ) ) ;
2012-03-22 17:54:09 +04:00
if ( d - > len ! = IMX_DMA_LENGTH_LOOP )
d - > len - = now ;
2012-03-22 17:54:01 +04:00
2012-03-22 17:54:03 +04:00
if ( d - > direction = = DMA_DEV_TO_MEM )
2012-03-22 17:54:12 +04:00
imx_dmav1_writel ( imxdma , sg - > dma_address ,
DMA_DAR ( imxdmac - > channel ) ) ;
2012-03-22 17:54:01 +04:00
else
2012-03-22 17:54:12 +04:00
imx_dmav1_writel ( imxdma , sg - > dma_address ,
DMA_SAR ( imxdmac - > channel ) ) ;
2012-03-22 17:54:01 +04:00
2012-03-22 17:54:12 +04:00
imx_dmav1_writel ( imxdma , now , DMA_CNTR ( imxdmac - > channel ) ) ;
2012-03-22 17:54:01 +04:00
2012-03-22 17:54:13 +04:00
dev_dbg ( imxdma - > dev , " %s channel: %d dst 0x%08x, src 0x%08x, "
" size 0x%08x \n " , __func__ , imxdmac - > channel ,
2012-03-22 17:54:12 +04:00
imx_dmav1_readl ( imxdma , DMA_DAR ( imxdmac - > channel ) ) ,
imx_dmav1_readl ( imxdma , DMA_SAR ( imxdmac - > channel ) ) ,
imx_dmav1_readl ( imxdma , DMA_CNTR ( imxdmac - > channel ) ) ) ;
2010-10-06 12:25:55 +04:00
}
2012-03-22 17:54:03 +04:00
static void imxdma_enable_hw ( struct imxdma_desc * d )
2010-10-06 12:25:55 +04:00
{
2012-03-22 17:54:03 +04:00
struct imxdma_channel * imxdmac = to_imxdma_chan ( d - > desc . chan ) ;
2012-03-22 17:54:12 +04:00
struct imxdma_engine * imxdma = imxdmac - > imxdma ;
2012-03-22 17:54:01 +04:00
int channel = imxdmac - > channel ;
unsigned long flags ;
2012-03-22 17:54:13 +04:00
dev_dbg ( imxdma - > dev , " %s channel %d \n " , __func__ , channel ) ;
2012-03-22 17:54:01 +04:00
local_irq_save ( flags ) ;
2012-03-22 17:54:12 +04:00
imx_dmav1_writel ( imxdma , 1 < < channel , DMA_DISR ) ;
imx_dmav1_writel ( imxdma , imx_dmav1_readl ( imxdma , DMA_DIMR ) &
~ ( 1 < < channel ) , DMA_DIMR ) ;
imx_dmav1_writel ( imxdma , imx_dmav1_readl ( imxdma , DMA_CCR ( channel ) ) |
CCR_CEN | CCR_ACRPT , DMA_CCR ( channel ) ) ;
2012-03-22 17:54:01 +04:00
2012-09-15 17:11:28 +04:00
if ( ! is_imx1_dma ( imxdma ) & &
2012-03-22 17:54:10 +04:00
d - > sg & & imxdma_hw_chain ( imxdmac ) ) {
2012-03-22 17:54:07 +04:00
d - > sg = sg_next ( d - > sg ) ;
if ( d - > sg ) {
2012-03-22 17:54:01 +04:00
u32 tmp ;
2012-03-22 17:54:11 +04:00
imxdma_sg_next ( d ) ;
2012-03-22 17:54:12 +04:00
tmp = imx_dmav1_readl ( imxdma , DMA_CCR ( channel ) ) ;
imx_dmav1_writel ( imxdma , tmp | CCR_RPT | CCR_ACRPT ,
DMA_CCR ( channel ) ) ;
2012-03-22 17:54:01 +04:00
}
}
local_irq_restore ( flags ) ;
}
static void imxdma_disable_hw ( struct imxdma_channel * imxdmac )
{
2012-03-22 17:54:12 +04:00
struct imxdma_engine * imxdma = imxdmac - > imxdma ;
2012-03-22 17:54:01 +04:00
int channel = imxdmac - > channel ;
unsigned long flags ;
2012-03-22 17:54:13 +04:00
dev_dbg ( imxdma - > dev , " %s channel %d \n " , __func__ , channel ) ;
2012-03-22 17:54:01 +04:00
2012-03-22 17:54:10 +04:00
if ( imxdma_hw_chain ( imxdmac ) )
del_timer ( & imxdmac - > watchdog ) ;
2012-03-22 17:54:01 +04:00
local_irq_save ( flags ) ;
2012-03-22 17:54:12 +04:00
imx_dmav1_writel ( imxdma , imx_dmav1_readl ( imxdma , DMA_DIMR ) |
( 1 < < channel ) , DMA_DIMR ) ;
imx_dmav1_writel ( imxdma , imx_dmav1_readl ( imxdma , DMA_CCR ( channel ) ) &
~ CCR_CEN , DMA_CCR ( channel ) ) ;
imx_dmav1_writel ( imxdma , 1 < < channel , DMA_DISR ) ;
2012-03-22 17:54:01 +04:00
local_irq_restore ( flags ) ;
}
2017-10-24 13:02:23 +03:00
static void imxdma_watchdog ( struct timer_list * t )
2010-10-06 12:25:55 +04:00
{
2017-10-24 13:02:23 +03:00
struct imxdma_channel * imxdmac = from_timer ( imxdmac , t , watchdog ) ;
2012-03-22 17:54:12 +04:00
struct imxdma_engine * imxdma = imxdmac - > imxdma ;
2012-03-22 17:54:01 +04:00
int channel = imxdmac - > channel ;
2010-10-06 12:25:55 +04:00
2012-03-22 17:54:12 +04:00
imx_dmav1_writel ( imxdma , 0 , DMA_CCR ( channel ) ) ;
2010-10-06 12:25:55 +04:00
2012-03-22 17:54:01 +04:00
/* Tasklet watchdog error handler */
2012-03-02 12:28:47 +04:00
tasklet_schedule ( & imxdmac - > dma_tasklet ) ;
2012-03-22 17:54:13 +04:00
dev_dbg ( imxdma - > dev , " channel %d: watchdog timeout! \n " ,
imxdmac - > channel ) ;
2010-10-06 12:25:55 +04:00
}
2012-03-22 17:54:01 +04:00
static irqreturn_t imxdma_err_handler ( int irq , void * dev_id )
2010-10-06 12:25:55 +04:00
{
2012-03-22 17:54:01 +04:00
struct imxdma_engine * imxdma = dev_id ;
unsigned int err_mask ;
int i , disr ;
int errcode ;
2012-03-22 17:54:12 +04:00
disr = imx_dmav1_readl ( imxdma , DMA_DISR ) ;
2012-03-22 17:54:01 +04:00
2012-03-22 17:54:12 +04:00
err_mask = imx_dmav1_readl ( imxdma , DMA_DBTOSR ) |
imx_dmav1_readl ( imxdma , DMA_DRTOSR ) |
imx_dmav1_readl ( imxdma , DMA_DSESR ) |
imx_dmav1_readl ( imxdma , DMA_DBOSR ) ;
2012-03-22 17:54:01 +04:00
if ( ! err_mask )
return IRQ_HANDLED ;
2012-03-22 17:54:12 +04:00
imx_dmav1_writel ( imxdma , disr & err_mask , DMA_DISR ) ;
2012-03-22 17:54:01 +04:00
for ( i = 0 ; i < IMX_DMA_CHANNELS ; i + + ) {
if ( ! ( err_mask & ( 1 < < i ) ) )
continue ;
errcode = 0 ;
2012-03-22 17:54:12 +04:00
if ( imx_dmav1_readl ( imxdma , DMA_DBTOSR ) & ( 1 < < i ) ) {
imx_dmav1_writel ( imxdma , 1 < < i , DMA_DBTOSR ) ;
2012-03-22 17:54:01 +04:00
errcode | = IMX_DMA_ERR_BURST ;
}
2012-03-22 17:54:12 +04:00
if ( imx_dmav1_readl ( imxdma , DMA_DRTOSR ) & ( 1 < < i ) ) {
imx_dmav1_writel ( imxdma , 1 < < i , DMA_DRTOSR ) ;
2012-03-22 17:54:01 +04:00
errcode | = IMX_DMA_ERR_REQUEST ;
}
2012-03-22 17:54:12 +04:00
if ( imx_dmav1_readl ( imxdma , DMA_DSESR ) & ( 1 < < i ) ) {
imx_dmav1_writel ( imxdma , 1 < < i , DMA_DSESR ) ;
2012-03-22 17:54:01 +04:00
errcode | = IMX_DMA_ERR_TRANSFER ;
}
2012-03-22 17:54:12 +04:00
if ( imx_dmav1_readl ( imxdma , DMA_DBOSR ) & ( 1 < < i ) ) {
imx_dmav1_writel ( imxdma , 1 < < i , DMA_DBOSR ) ;
2012-03-22 17:54:01 +04:00
errcode | = IMX_DMA_ERR_BUFFER ;
}
/* Tasklet error handler */
tasklet_schedule ( & imxdma - > channel [ i ] . dma_tasklet ) ;
2014-02-22 22:16:47 +04:00
dev_warn ( imxdma - > dev ,
" DMA timeout on channel %d -%s%s%s%s \n " , i ,
errcode & IMX_DMA_ERR_BURST ? " burst " : " " ,
errcode & IMX_DMA_ERR_REQUEST ? " request " : " " ,
errcode & IMX_DMA_ERR_TRANSFER ? " transfer " : " " ,
errcode & IMX_DMA_ERR_BUFFER ? " buffer " : " " ) ;
2012-03-22 17:54:01 +04:00
}
return IRQ_HANDLED ;
2010-10-06 12:25:55 +04:00
}
2012-03-22 17:54:01 +04:00
static void dma_irq_handle_channel ( struct imxdma_channel * imxdmac )
2010-10-06 12:25:55 +04:00
{
2012-03-22 17:54:12 +04:00
struct imxdma_engine * imxdma = imxdmac - > imxdma ;
2012-03-22 17:54:01 +04:00
int chno = imxdmac - > channel ;
2012-03-22 17:54:03 +04:00
struct imxdma_desc * desc ;
2013-09-17 17:56:07 +04:00
unsigned long flags ;
2012-03-22 17:54:01 +04:00
2013-09-17 17:56:07 +04:00
spin_lock_irqsave ( & imxdma - > lock , flags ) ;
2012-03-22 17:54:07 +04:00
if ( list_empty ( & imxdmac - > ld_active ) ) {
2013-09-17 17:56:07 +04:00
spin_unlock_irqrestore ( & imxdma - > lock , flags ) ;
2012-03-22 17:54:07 +04:00
goto out ;
}
2012-03-22 17:54:03 +04:00
2012-03-22 17:54:07 +04:00
desc = list_first_entry ( & imxdmac - > ld_active ,
struct imxdma_desc ,
node ) ;
2013-09-17 17:56:07 +04:00
spin_unlock_irqrestore ( & imxdma - > lock , flags ) ;
2012-03-22 17:54:03 +04:00
2012-03-22 17:54:07 +04:00
if ( desc - > sg ) {
u32 tmp ;
desc - > sg = sg_next ( desc - > sg ) ;
2012-03-22 17:54:03 +04:00
2012-03-22 17:54:07 +04:00
if ( desc - > sg ) {
2012-03-22 17:54:11 +04:00
imxdma_sg_next ( desc ) ;
2012-03-22 17:54:01 +04:00
2012-03-22 17:54:12 +04:00
tmp = imx_dmav1_readl ( imxdma , DMA_CCR ( chno ) ) ;
2012-03-22 17:54:01 +04:00
2012-03-22 17:54:10 +04:00
if ( imxdma_hw_chain ( imxdmac ) ) {
2012-03-22 17:54:01 +04:00
/* FIXME: The timeout should probably be
* configurable
*/
2012-03-22 17:54:10 +04:00
mod_timer ( & imxdmac - > watchdog ,
2012-03-22 17:54:01 +04:00
jiffies + msecs_to_jiffies ( 500 ) ) ;
tmp | = CCR_CEN | CCR_RPT | CCR_ACRPT ;
2012-03-22 17:54:12 +04:00
imx_dmav1_writel ( imxdma , tmp , DMA_CCR ( chno ) ) ;
2012-03-22 17:54:01 +04:00
} else {
2012-03-22 17:54:12 +04:00
imx_dmav1_writel ( imxdma , tmp & ~ CCR_CEN ,
DMA_CCR ( chno ) ) ;
2012-03-22 17:54:01 +04:00
tmp | = CCR_CEN ;
}
2012-03-22 17:54:12 +04:00
imx_dmav1_writel ( imxdma , tmp , DMA_CCR ( chno ) ) ;
2012-03-22 17:54:01 +04:00
if ( imxdma_chan_is_doing_cyclic ( imxdmac ) )
/* Tasklet progression */
tasklet_schedule ( & imxdmac - > dma_tasklet ) ;
2010-10-06 12:25:55 +04:00
2012-03-22 17:54:01 +04:00
return ;
}
2012-03-22 17:54:10 +04:00
if ( imxdma_hw_chain ( imxdmac ) ) {
del_timer ( & imxdmac - > watchdog ) ;
2012-03-22 17:54:01 +04:00
return ;
}
}
2012-03-22 17:54:03 +04:00
out :
2012-03-22 17:54:12 +04:00
imx_dmav1_writel ( imxdma , 0 , DMA_CCR ( chno ) ) ;
2012-03-22 17:54:01 +04:00
/* Tasklet irq */
2012-03-02 12:28:47 +04:00
tasklet_schedule ( & imxdmac - > dma_tasklet ) ;
}
2012-03-22 17:54:01 +04:00
static irqreturn_t dma_irq_handler ( int irq , void * dev_id )
{
struct imxdma_engine * imxdma = dev_id ;
int i , disr ;
2012-09-15 17:11:28 +04:00
if ( ! is_imx1_dma ( imxdma ) )
2012-03-22 17:54:01 +04:00
imxdma_err_handler ( irq , dev_id ) ;
2012-03-22 17:54:12 +04:00
disr = imx_dmav1_readl ( imxdma , DMA_DISR ) ;
2012-03-22 17:54:01 +04:00
2012-03-22 17:54:13 +04:00
dev_dbg ( imxdma - > dev , " %s called, disr=0x%08x \n " , __func__ , disr ) ;
2012-03-22 17:54:01 +04:00
2012-03-22 17:54:12 +04:00
imx_dmav1_writel ( imxdma , disr , DMA_DISR ) ;
2012-03-22 17:54:01 +04:00
for ( i = 0 ; i < IMX_DMA_CHANNELS ; i + + ) {
2012-03-22 17:54:10 +04:00
if ( disr & ( 1 < < i ) )
2012-03-22 17:54:01 +04:00
dma_irq_handle_channel ( & imxdma - > channel [ i ] ) ;
}
return IRQ_HANDLED ;
}
2012-03-02 12:28:47 +04:00
static int imxdma_xfer_desc ( struct imxdma_desc * d )
{
struct imxdma_channel * imxdmac = to_imxdma_chan ( d - > desc . chan ) ;
2012-03-22 17:54:04 +04:00
struct imxdma_engine * imxdma = imxdmac - > imxdma ;
2012-03-22 17:54:14 +04:00
int slot = - 1 ;
int i ;
2012-03-02 12:28:47 +04:00
/* Configure and enable */
switch ( d - > type ) {
2012-03-22 17:54:14 +04:00
case IMXDMA_DESC_INTERLEAVED :
/* Try to get a free 2D slot */
for ( i = 0 ; i < IMX_DMA_2D_SLOTS ; i + + ) {
if ( ( imxdma - > slots_2d [ i ] . count > 0 ) & &
( ( imxdma - > slots_2d [ i ] . xsr ! = d - > x ) | |
( imxdma - > slots_2d [ i ] . ysr ! = d - > y ) | |
( imxdma - > slots_2d [ i ] . wsr ! = d - > w ) ) )
continue ;
slot = i ;
break ;
}
2013-09-17 17:56:07 +04:00
if ( slot < 0 )
2012-03-22 17:54:14 +04:00
return - EBUSY ;
imxdma - > slots_2d [ slot ] . xsr = d - > x ;
imxdma - > slots_2d [ slot ] . ysr = d - > y ;
imxdma - > slots_2d [ slot ] . wsr = d - > w ;
imxdma - > slots_2d [ slot ] . count + + ;
imxdmac - > slot_2d = slot ;
imxdmac - > enabled_2d = true ;
if ( slot = = IMX_DMA_2D_SLOT_A ) {
d - > config_mem & = ~ CCR_MSEL_B ;
d - > config_port & = ~ CCR_MSEL_B ;
imx_dmav1_writel ( imxdma , d - > x , DMA_XSRA ) ;
imx_dmav1_writel ( imxdma , d - > y , DMA_YSRA ) ;
imx_dmav1_writel ( imxdma , d - > w , DMA_WSRA ) ;
} else {
d - > config_mem | = CCR_MSEL_B ;
d - > config_port | = CCR_MSEL_B ;
imx_dmav1_writel ( imxdma , d - > x , DMA_XSRB ) ;
imx_dmav1_writel ( imxdma , d - > y , DMA_YSRB ) ;
imx_dmav1_writel ( imxdma , d - > w , DMA_WSRB ) ;
}
/*
* We fall - through here intentionally , since a 2 D transfer is
* similar to MEMCPY just adding the 2 D slot configuration .
*/
2019-07-30 01:52:21 +03:00
/* Fall through */
2012-03-02 12:28:47 +04:00
case IMXDMA_DESC_MEMCPY :
2012-03-22 17:54:12 +04:00
imx_dmav1_writel ( imxdma , d - > src , DMA_SAR ( imxdmac - > channel ) ) ;
imx_dmav1_writel ( imxdma , d - > dest , DMA_DAR ( imxdmac - > channel ) ) ;
imx_dmav1_writel ( imxdma , d - > config_mem | ( d - > config_port < < 2 ) ,
2012-03-22 17:54:04 +04:00
DMA_CCR ( imxdmac - > channel ) ) ;
2012-03-22 17:54:01 +04:00
2012-03-22 17:54:12 +04:00
imx_dmav1_writel ( imxdma , d - > len , DMA_CNTR ( imxdmac - > channel ) ) ;
2012-03-22 17:54:04 +04:00
dmaengine: imx-dma: fix format warnings
drivers/dma/imx-dma.c:575:3: warning: format '%x' expects argument of type 'unsigned int', but argument 6 has type 'dma_addr_t'
drivers/dma/imx-dma.c:575:3: warning: format '%x' expects argument of type 'unsigned int', but argument 7 has type 'dma_addr_t'
drivers/dma/imx-dma.c:589:4: warning: format '%x' expects argument of type 'unsigned int', but argument 9 has type 'dma_addr_t'
drivers/dma/imx-dma.c:599:4: warning: format '%x' expects argument of type 'unsigned int', but argument 9 has type 'dma_addr_t'
drivers/dma/imx-dma.c:929:2: warning: format '%x' expects argument of type 'unsigned int', but argument 6 has type 'dma_addr_t'
drivers/dma/imx-dma.c:929:2: warning: format '%x' expects argument of type 'unsigned int', but argument 7 has type 'dma_addr_t'
drivers/dma/imx-dma.c:959:2: warning: format '%x' expects argument of type 'unsigned int', but argument 6 has type 'dma_addr_t'
drivers/dma/imx-dma.c:959:2: warning: format '%x' expects argument of type 'unsigned int', but argument 7 has type 'dma_addr_t'
We can't use the %pa format for these because this relates to phys_addr_t,
and dma_addr_t can be a different size. So, fix these by converting them
to %llx and casting the dma_addr_t to always be unsigned long long.
While we're here, also use %zu for size_t.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
2013-10-31 04:40:30 +04:00
dev_dbg ( imxdma - > dev ,
" %s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu \n " ,
__func__ , imxdmac - > channel ,
( unsigned long long ) d - > dest ,
( unsigned long long ) d - > src , d - > len ) ;
2012-03-22 17:54:04 +04:00
break ;
2012-03-22 17:54:01 +04:00
/* Cyclic transfer is the same as slave_sg with special sg configuration. */
2012-03-02 12:28:47 +04:00
case IMXDMA_DESC_CYCLIC :
case IMXDMA_DESC_SLAVE_SG :
2012-03-22 17:54:06 +04:00
if ( d - > direction = = DMA_DEV_TO_MEM ) {
2012-03-22 17:54:12 +04:00
imx_dmav1_writel ( imxdma , imxdmac - > per_address ,
2012-03-22 17:54:06 +04:00
DMA_SAR ( imxdmac - > channel ) ) ;
2012-03-22 17:54:12 +04:00
imx_dmav1_writel ( imxdma , imxdmac - > ccr_from_device ,
2012-03-22 17:54:06 +04:00
DMA_CCR ( imxdmac - > channel ) ) ;
dmaengine: imx-dma: fix format warnings
drivers/dma/imx-dma.c:575:3: warning: format '%x' expects argument of type 'unsigned int', but argument 6 has type 'dma_addr_t'
drivers/dma/imx-dma.c:575:3: warning: format '%x' expects argument of type 'unsigned int', but argument 7 has type 'dma_addr_t'
drivers/dma/imx-dma.c:589:4: warning: format '%x' expects argument of type 'unsigned int', but argument 9 has type 'dma_addr_t'
drivers/dma/imx-dma.c:599:4: warning: format '%x' expects argument of type 'unsigned int', but argument 9 has type 'dma_addr_t'
drivers/dma/imx-dma.c:929:2: warning: format '%x' expects argument of type 'unsigned int', but argument 6 has type 'dma_addr_t'
drivers/dma/imx-dma.c:929:2: warning: format '%x' expects argument of type 'unsigned int', but argument 7 has type 'dma_addr_t'
drivers/dma/imx-dma.c:959:2: warning: format '%x' expects argument of type 'unsigned int', but argument 6 has type 'dma_addr_t'
drivers/dma/imx-dma.c:959:2: warning: format '%x' expects argument of type 'unsigned int', but argument 7 has type 'dma_addr_t'
We can't use the %pa format for these because this relates to phys_addr_t,
and dma_addr_t can be a different size. So, fix these by converting them
to %llx and casting the dma_addr_t to always be unsigned long long.
While we're here, also use %zu for size_t.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
2013-10-31 04:40:30 +04:00
dev_dbg ( imxdma - > dev ,
" %s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem) \n " ,
__func__ , imxdmac - > channel ,
d - > sg , d - > sgcount , d - > len ,
( unsigned long long ) imxdmac - > per_address ) ;
2012-03-22 17:54:06 +04:00
} else if ( d - > direction = = DMA_MEM_TO_DEV ) {
2012-03-22 17:54:12 +04:00
imx_dmav1_writel ( imxdma , imxdmac - > per_address ,
2012-03-22 17:54:06 +04:00
DMA_DAR ( imxdmac - > channel ) ) ;
2012-03-22 17:54:12 +04:00
imx_dmav1_writel ( imxdma , imxdmac - > ccr_to_device ,
2012-03-22 17:54:06 +04:00
DMA_CCR ( imxdmac - > channel ) ) ;
dmaengine: imx-dma: fix format warnings
drivers/dma/imx-dma.c:575:3: warning: format '%x' expects argument of type 'unsigned int', but argument 6 has type 'dma_addr_t'
drivers/dma/imx-dma.c:575:3: warning: format '%x' expects argument of type 'unsigned int', but argument 7 has type 'dma_addr_t'
drivers/dma/imx-dma.c:589:4: warning: format '%x' expects argument of type 'unsigned int', but argument 9 has type 'dma_addr_t'
drivers/dma/imx-dma.c:599:4: warning: format '%x' expects argument of type 'unsigned int', but argument 9 has type 'dma_addr_t'
drivers/dma/imx-dma.c:929:2: warning: format '%x' expects argument of type 'unsigned int', but argument 6 has type 'dma_addr_t'
drivers/dma/imx-dma.c:929:2: warning: format '%x' expects argument of type 'unsigned int', but argument 7 has type 'dma_addr_t'
drivers/dma/imx-dma.c:959:2: warning: format '%x' expects argument of type 'unsigned int', but argument 6 has type 'dma_addr_t'
drivers/dma/imx-dma.c:959:2: warning: format '%x' expects argument of type 'unsigned int', but argument 7 has type 'dma_addr_t'
We can't use the %pa format for these because this relates to phys_addr_t,
and dma_addr_t can be a different size. So, fix these by converting them
to %llx and casting the dma_addr_t to always be unsigned long long.
While we're here, also use %zu for size_t.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
2013-10-31 04:40:30 +04:00
dev_dbg ( imxdma - > dev ,
" %s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev) \n " ,
__func__ , imxdmac - > channel ,
d - > sg , d - > sgcount , d - > len ,
( unsigned long long ) imxdmac - > per_address ) ;
2012-03-22 17:54:06 +04:00
} else {
dev_err ( imxdma - > dev , " %s channel: %d bad dma mode \n " ,
__func__ , imxdmac - > channel ) ;
return - EINVAL ;
}
2012-03-22 17:54:11 +04:00
imxdma_sg_next ( d ) ;
2010-10-06 12:25:55 +04:00
2012-03-02 12:28:47 +04:00
break ;
default :
return - EINVAL ;
}
2012-03-22 17:54:03 +04:00
imxdma_enable_hw ( d ) ;
2012-03-02 12:28:47 +04:00
return 0 ;
2010-10-06 12:25:55 +04:00
}
2012-03-02 12:28:47 +04:00
static void imxdma_tasklet ( unsigned long data )
2010-10-06 12:25:55 +04:00
{
2012-03-02 12:28:47 +04:00
struct imxdma_channel * imxdmac = ( void * ) data ;
struct imxdma_engine * imxdma = imxdmac - > imxdma ;
2019-01-15 20:15:23 +03:00
struct imxdma_desc * desc , * next_desc ;
2013-09-17 17:56:07 +04:00
unsigned long flags ;
2010-10-06 12:25:55 +04:00
2013-09-17 17:56:07 +04:00
spin_lock_irqsave ( & imxdma - > lock , flags ) ;
2012-03-02 12:28:47 +04:00
if ( list_empty ( & imxdmac - > ld_active ) ) {
/* Someone might have called terminate all */
2013-09-17 17:56:08 +04:00
spin_unlock_irqrestore ( & imxdma - > lock , flags ) ;
return ;
2012-03-02 12:28:47 +04:00
}
desc = list_first_entry ( & imxdmac - > ld_active , struct imxdma_desc , node ) ;
2012-08-04 18:37:53 +04:00
/* If we are dealing with a cyclic descriptor, keep it on ld_active
* and dont mark the descriptor as complete .
2012-04-20 13:58:07 +04:00
* Only in non - cyclic cases it would be marked as complete
*/
2012-03-02 12:28:47 +04:00
if ( imxdma_chan_is_doing_cyclic ( imxdmac ) )
goto out ;
2012-04-20 13:58:07 +04:00
else
dma_cookie_complete ( & desc - > desc ) ;
2012-03-02 12:28:47 +04:00
2012-03-22 17:54:14 +04:00
/* Free 2D slot if it was an interleaved transfer */
if ( imxdmac - > enabled_2d ) {
imxdma - > slots_2d [ imxdmac - > slot_2d ] . count - - ;
imxdmac - > enabled_2d = false ;
}
2012-03-02 12:28:47 +04:00
list_move_tail ( imxdmac - > ld_active . next , & imxdmac - > ld_free ) ;
if ( ! list_empty ( & imxdmac - > ld_queue ) ) {
2019-01-15 20:15:23 +03:00
next_desc = list_first_entry ( & imxdmac - > ld_queue ,
struct imxdma_desc , node ) ;
2012-03-02 12:28:47 +04:00
list_move_tail ( imxdmac - > ld_queue . next , & imxdmac - > ld_active ) ;
2019-01-15 20:15:23 +03:00
if ( imxdma_xfer_desc ( next_desc ) < 0 )
2012-03-02 12:28:47 +04:00
dev_warn ( imxdma - > dev , " %s: channel: %d couldn't xfer desc \n " ,
__func__ , imxdmac - > channel ) ;
}
out :
2013-09-17 17:56:07 +04:00
spin_unlock_irqrestore ( & imxdma - > lock , flags ) ;
2013-09-17 17:56:08 +04:00
2016-07-20 23:11:22 +03:00
dmaengine_desc_get_callback_invoke ( & desc - > desc , NULL ) ;
2010-10-06 12:25:55 +04:00
}
2014-11-17 16:42:16 +03:00
static int imxdma_terminate_all ( struct dma_chan * chan )
2010-10-06 12:25:55 +04:00
{
struct imxdma_channel * imxdmac = to_imxdma_chan ( chan ) ;
2012-03-22 17:54:12 +04:00
struct imxdma_engine * imxdma = imxdmac - > imxdma ;
2012-03-02 12:28:47 +04:00
unsigned long flags ;
2014-11-17 16:42:16 +03:00
imxdma_disable_hw ( imxdmac ) ;
2010-10-06 12:25:55 +04:00
2014-11-17 16:42:16 +03:00
spin_lock_irqsave ( & imxdma - > lock , flags ) ;
list_splice_tail_init ( & imxdmac - > ld_active , & imxdmac - > ld_free ) ;
list_splice_tail_init ( & imxdmac - > ld_queue , & imxdmac - > ld_free ) ;
spin_unlock_irqrestore ( & imxdma - > lock , flags ) ;
return 0 ;
}
2012-10-30 19:58:50 +04:00
2018-07-19 19:52:26 +03:00
static int imxdma_config_write ( struct dma_chan * chan ,
struct dma_slave_config * dmaengine_cfg ,
enum dma_transfer_direction direction )
2014-11-17 16:42:16 +03:00
{
struct imxdma_channel * imxdmac = to_imxdma_chan ( chan ) ;
struct imxdma_engine * imxdma = imxdmac - > imxdma ;
unsigned int mode = 0 ;
2012-03-22 17:54:05 +04:00
2018-07-19 19:52:26 +03:00
if ( direction = = DMA_DEV_TO_MEM ) {
2014-11-17 16:42:16 +03:00
imxdmac - > per_address = dmaengine_cfg - > src_addr ;
imxdmac - > watermark_level = dmaengine_cfg - > src_maxburst ;
imxdmac - > word_size = dmaengine_cfg - > src_addr_width ;
} else {
imxdmac - > per_address = dmaengine_cfg - > dst_addr ;
imxdmac - > watermark_level = dmaengine_cfg - > dst_maxburst ;
imxdmac - > word_size = dmaengine_cfg - > dst_addr_width ;
}
2010-10-06 12:25:55 +04:00
2014-11-17 16:42:16 +03:00
switch ( imxdmac - > word_size ) {
case DMA_SLAVE_BUSWIDTH_1_BYTE :
mode = IMX_DMA_MEMSIZE_8 ;
break ;
case DMA_SLAVE_BUSWIDTH_2_BYTES :
mode = IMX_DMA_MEMSIZE_16 ;
break ;
2010-10-06 12:25:55 +04:00
default :
2014-11-17 16:42:16 +03:00
case DMA_SLAVE_BUSWIDTH_4_BYTES :
mode = IMX_DMA_MEMSIZE_32 ;
break ;
2010-10-06 12:25:55 +04:00
}
2014-11-17 16:42:16 +03:00
imxdmac - > hw_chaining = 0 ;
imxdmac - > ccr_from_device = ( mode | IMX_DMA_TYPE_FIFO ) |
( ( IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR ) < < 2 ) |
CCR_REN ;
imxdmac - > ccr_to_device =
( IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR ) |
( ( mode | IMX_DMA_TYPE_FIFO ) < < 2 ) | CCR_REN ;
imx_dmav1_writel ( imxdma , imxdmac - > dma_request ,
DMA_RSSR ( imxdmac - > channel ) ) ;
/* Set burst length */
imx_dmav1_writel ( imxdma , imxdmac - > watermark_level *
imxdmac - > word_size , DMA_BLR ( imxdmac - > channel ) ) ;
return 0 ;
2010-10-06 12:25:55 +04:00
}
2018-07-19 19:52:26 +03:00
static int imxdma_config ( struct dma_chan * chan ,
struct dma_slave_config * dmaengine_cfg )
{
struct imxdma_channel * imxdmac = to_imxdma_chan ( chan ) ;
memcpy ( & imxdmac - > config , dmaengine_cfg , sizeof ( * dmaengine_cfg ) ) ;
return 0 ;
}
2010-10-06 12:25:55 +04:00
static enum dma_status imxdma_tx_status ( struct dma_chan * chan ,
dma_cookie_t cookie ,
struct dma_tx_state * txstate )
{
2012-03-07 02:35:27 +04:00
return dma_cookie_status ( chan , cookie , txstate ) ;
2010-10-06 12:25:55 +04:00
}
static dma_cookie_t imxdma_tx_submit ( struct dma_async_tx_descriptor * tx )
{
struct imxdma_channel * imxdmac = to_imxdma_chan ( tx - > chan ) ;
2012-03-22 17:54:14 +04:00
struct imxdma_engine * imxdma = imxdmac - > imxdma ;
2010-10-06 12:25:55 +04:00
dma_cookie_t cookie ;
2012-03-02 12:28:47 +04:00
unsigned long flags ;
2010-10-06 12:25:55 +04:00
2012-03-22 17:54:14 +04:00
spin_lock_irqsave ( & imxdma - > lock , flags ) ;
2012-03-22 17:54:15 +04:00
list_move_tail ( imxdmac - > ld_free . next , & imxdmac - > ld_queue ) ;
2012-03-07 02:34:46 +04:00
cookie = dma_cookie_assign ( tx ) ;
2012-03-22 17:54:14 +04:00
spin_unlock_irqrestore ( & imxdma - > lock , flags ) ;
2010-10-06 12:25:55 +04:00
return cookie ;
}
static int imxdma_alloc_chan_resources ( struct dma_chan * chan )
{
struct imxdma_channel * imxdmac = to_imxdma_chan ( chan ) ;
struct imx_dma_data * data = chan - > private ;
2012-02-28 20:08:17 +04:00
if ( data ! = NULL )
imxdmac - > dma_request = data - > dma_request ;
2010-10-06 12:25:55 +04:00
2012-03-02 12:28:47 +04:00
while ( imxdmac - > descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS ) {
struct imxdma_desc * desc ;
2010-10-06 12:25:55 +04:00
2012-03-02 12:28:47 +04:00
desc = kzalloc ( sizeof ( * desc ) , GFP_KERNEL ) ;
if ( ! desc )
break ;
2018-01-19 20:17:46 +03:00
memset ( & desc - > desc , 0 , sizeof ( struct dma_async_tx_descriptor ) ) ;
2012-03-02 12:28:47 +04:00
dma_async_tx_descriptor_init ( & desc - > desc , chan ) ;
desc - > desc . tx_submit = imxdma_tx_submit ;
/* txd.flags will be overwritten in prep funcs */
desc - > desc . flags = DMA_CTRL_ACK ;
2013-10-16 12:36:24 +04:00
desc - > status = DMA_COMPLETE ;
2012-03-02 12:28:47 +04:00
list_add_tail ( & desc - > node , & imxdmac - > ld_free ) ;
imxdmac - > descs_allocated + + ;
}
2010-10-06 12:25:55 +04:00
2012-03-02 12:28:47 +04:00
if ( ! imxdmac - > descs_allocated )
return - ENOMEM ;
return imxdmac - > descs_allocated ;
2010-10-06 12:25:55 +04:00
}
static void imxdma_free_chan_resources ( struct dma_chan * chan )
{
struct imxdma_channel * imxdmac = to_imxdma_chan ( chan ) ;
2012-03-22 17:54:14 +04:00
struct imxdma_engine * imxdma = imxdmac - > imxdma ;
2012-03-02 12:28:47 +04:00
struct imxdma_desc * desc , * _desc ;
unsigned long flags ;
2012-03-22 17:54:14 +04:00
spin_lock_irqsave ( & imxdma - > lock , flags ) ;
2010-10-06 12:25:55 +04:00
2012-03-22 17:54:01 +04:00
imxdma_disable_hw ( imxdmac ) ;
2012-03-02 12:28:47 +04:00
list_splice_tail_init ( & imxdmac - > ld_active , & imxdmac - > ld_free ) ;
list_splice_tail_init ( & imxdmac - > ld_queue , & imxdmac - > ld_free ) ;
2010-10-06 12:25:55 +04:00
2012-03-22 17:54:14 +04:00
spin_unlock_irqrestore ( & imxdma - > lock , flags ) ;
2012-03-02 12:28:47 +04:00
list_for_each_entry_safe ( desc , _desc , & imxdmac - > ld_free , node ) {
kfree ( desc ) ;
imxdmac - > descs_allocated - - ;
}
INIT_LIST_HEAD ( & imxdmac - > ld_free ) ;
2010-10-06 12:25:55 +04:00
2013-09-02 11:51:18 +04:00
kfree ( imxdmac - > sg_list ) ;
imxdmac - > sg_list = NULL ;
2010-10-06 12:25:55 +04:00
}
static struct dma_async_tx_descriptor * imxdma_prep_slave_sg (
struct dma_chan * chan , struct scatterlist * sgl ,
2011-10-13 21:04:23 +04:00
unsigned int sg_len , enum dma_transfer_direction direction ,
2012-03-09 00:35:13 +04:00
unsigned long flags , void * context )
2010-10-06 12:25:55 +04:00
{
struct imxdma_channel * imxdmac = to_imxdma_chan ( chan ) ;
struct scatterlist * sg ;
2012-03-02 12:28:47 +04:00
int i , dma_length = 0 ;
struct imxdma_desc * desc ;
2010-10-06 12:25:55 +04:00
2012-03-02 12:28:47 +04:00
if ( list_empty ( & imxdmac - > ld_free ) | |
imxdma_chan_is_doing_cyclic ( imxdmac ) )
2010-10-06 12:25:55 +04:00
return NULL ;
2012-03-02 12:28:47 +04:00
desc = list_first_entry ( & imxdmac - > ld_free , struct imxdma_desc , node ) ;
2010-10-06 12:25:55 +04:00
for_each_sg ( sgl , sg , sg_len , i ) {
2012-04-25 22:50:52 +04:00
dma_length + = sg_dma_len ( sg ) ;
2010-10-06 12:25:55 +04:00
}
2011-01-12 16:13:23 +03:00
switch ( imxdmac - > word_size ) {
case DMA_SLAVE_BUSWIDTH_4_BYTES :
2012-04-25 22:50:52 +04:00
if ( sg_dma_len ( sgl ) & 3 | | sgl - > dma_address & 3 )
2011-01-12 16:13:23 +03:00
return NULL ;
break ;
case DMA_SLAVE_BUSWIDTH_2_BYTES :
2012-04-25 22:50:52 +04:00
if ( sg_dma_len ( sgl ) & 1 | | sgl - > dma_address & 1 )
2011-01-12 16:13:23 +03:00
return NULL ;
break ;
case DMA_SLAVE_BUSWIDTH_1_BYTE :
break ;
default :
return NULL ;
}
2012-03-02 12:28:47 +04:00
desc - > type = IMXDMA_DESC_SLAVE_SG ;
desc - > sg = sgl ;
desc - > sgcount = sg_len ;
desc - > len = dma_length ;
2012-03-22 17:54:03 +04:00
desc - > direction = direction ;
2012-03-02 12:28:47 +04:00
if ( direction = = DMA_DEV_TO_MEM ) {
desc - > src = imxdmac - > per_address ;
} else {
desc - > dest = imxdmac - > per_address ;
}
desc - > desc . callback = NULL ;
desc - > desc . callback_param = NULL ;
2010-10-06 12:25:55 +04:00
2012-03-02 12:28:47 +04:00
return & desc - > desc ;
2010-10-06 12:25:55 +04:00
}
static struct dma_async_tx_descriptor * imxdma_prep_dma_cyclic (
struct dma_chan * chan , dma_addr_t dma_addr , size_t buf_len ,
2012-03-09 00:35:13 +04:00
size_t period_len , enum dma_transfer_direction direction ,
2014-08-01 14:20:10 +04:00
unsigned long flags )
2010-10-06 12:25:55 +04:00
{
struct imxdma_channel * imxdmac = to_imxdma_chan ( chan ) ;
struct imxdma_engine * imxdma = imxdmac - > imxdma ;
2012-03-02 12:28:47 +04:00
struct imxdma_desc * desc ;
int i ;
2010-10-06 12:25:55 +04:00
unsigned int periods = buf_len / period_len ;
dmaengine: imx-dma: fix format warnings
drivers/dma/imx-dma.c:575:3: warning: format '%x' expects argument of type 'unsigned int', but argument 6 has type 'dma_addr_t'
drivers/dma/imx-dma.c:575:3: warning: format '%x' expects argument of type 'unsigned int', but argument 7 has type 'dma_addr_t'
drivers/dma/imx-dma.c:589:4: warning: format '%x' expects argument of type 'unsigned int', but argument 9 has type 'dma_addr_t'
drivers/dma/imx-dma.c:599:4: warning: format '%x' expects argument of type 'unsigned int', but argument 9 has type 'dma_addr_t'
drivers/dma/imx-dma.c:929:2: warning: format '%x' expects argument of type 'unsigned int', but argument 6 has type 'dma_addr_t'
drivers/dma/imx-dma.c:929:2: warning: format '%x' expects argument of type 'unsigned int', but argument 7 has type 'dma_addr_t'
drivers/dma/imx-dma.c:959:2: warning: format '%x' expects argument of type 'unsigned int', but argument 6 has type 'dma_addr_t'
drivers/dma/imx-dma.c:959:2: warning: format '%x' expects argument of type 'unsigned int', but argument 7 has type 'dma_addr_t'
We can't use the %pa format for these because this relates to phys_addr_t,
and dma_addr_t can be a different size. So, fix these by converting them
to %llx and casting the dma_addr_t to always be unsigned long long.
While we're here, also use %zu for size_t.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
2013-10-31 04:40:30 +04:00
dev_dbg ( imxdma - > dev , " %s channel: %d buf_len=%zu period_len=%zu \n " ,
2010-10-06 12:25:55 +04:00
__func__ , imxdmac - > channel , buf_len , period_len ) ;
2012-03-02 12:28:47 +04:00
if ( list_empty ( & imxdmac - > ld_free ) | |
imxdma_chan_is_doing_cyclic ( imxdmac ) )
2010-10-06 12:25:55 +04:00
return NULL ;
2012-03-02 12:28:47 +04:00
desc = list_first_entry ( & imxdmac - > ld_free , struct imxdma_desc , node ) ;
2010-10-06 12:25:55 +04:00
2013-02-25 03:16:26 +04:00
kfree ( imxdmac - > sg_list ) ;
2010-10-06 12:25:55 +04:00
imxdmac - > sg_list = kcalloc ( periods + 1 ,
2013-09-17 17:56:06 +04:00
sizeof ( struct scatterlist ) , GFP_ATOMIC ) ;
2010-10-06 12:25:55 +04:00
if ( ! imxdmac - > sg_list )
return NULL ;
sg_init_table ( imxdmac - > sg_list , periods ) ;
for ( i = 0 ; i < periods ; i + + ) {
2017-05-31 01:39:16 +03:00
sg_assign_page ( & imxdmac - > sg_list [ i ] , NULL ) ;
2010-10-06 12:25:55 +04:00
imxdmac - > sg_list [ i ] . offset = 0 ;
imxdmac - > sg_list [ i ] . dma_address = dma_addr ;
2012-04-25 22:50:52 +04:00
sg_dma_len ( & imxdmac - > sg_list [ i ] ) = period_len ;
2010-10-06 12:25:55 +04:00
dma_addr + = period_len ;
}
/* close the loop */
2017-05-31 01:39:16 +03:00
sg_chain ( imxdmac - > sg_list , periods + 1 , imxdmac - > sg_list ) ;
2010-10-06 12:25:55 +04:00
2012-03-02 12:28:47 +04:00
desc - > type = IMXDMA_DESC_CYCLIC ;
desc - > sg = imxdmac - > sg_list ;
desc - > sgcount = periods ;
desc - > len = IMX_DMA_LENGTH_LOOP ;
2012-03-22 17:54:03 +04:00
desc - > direction = direction ;
2012-03-02 12:28:47 +04:00
if ( direction = = DMA_DEV_TO_MEM ) {
desc - > src = imxdmac - > per_address ;
} else {
desc - > dest = imxdmac - > per_address ;
}
desc - > desc . callback = NULL ;
desc - > desc . callback_param = NULL ;
2010-10-06 12:25:55 +04:00
2018-07-19 19:52:26 +03:00
imxdma_config_write ( chan , & imxdmac - > config , direction ) ;
2012-03-02 12:28:47 +04:00
return & desc - > desc ;
2010-10-06 12:25:55 +04:00
}
2012-02-28 20:08:17 +04:00
static struct dma_async_tx_descriptor * imxdma_prep_dma_memcpy (
struct dma_chan * chan , dma_addr_t dest ,
dma_addr_t src , size_t len , unsigned long flags )
{
struct imxdma_channel * imxdmac = to_imxdma_chan ( chan ) ;
struct imxdma_engine * imxdma = imxdmac - > imxdma ;
2012-03-02 12:28:47 +04:00
struct imxdma_desc * desc ;
2010-10-06 12:25:55 +04:00
dmaengine: imx-dma: fix format warnings
drivers/dma/imx-dma.c:575:3: warning: format '%x' expects argument of type 'unsigned int', but argument 6 has type 'dma_addr_t'
drivers/dma/imx-dma.c:575:3: warning: format '%x' expects argument of type 'unsigned int', but argument 7 has type 'dma_addr_t'
drivers/dma/imx-dma.c:589:4: warning: format '%x' expects argument of type 'unsigned int', but argument 9 has type 'dma_addr_t'
drivers/dma/imx-dma.c:599:4: warning: format '%x' expects argument of type 'unsigned int', but argument 9 has type 'dma_addr_t'
drivers/dma/imx-dma.c:929:2: warning: format '%x' expects argument of type 'unsigned int', but argument 6 has type 'dma_addr_t'
drivers/dma/imx-dma.c:929:2: warning: format '%x' expects argument of type 'unsigned int', but argument 7 has type 'dma_addr_t'
drivers/dma/imx-dma.c:959:2: warning: format '%x' expects argument of type 'unsigned int', but argument 6 has type 'dma_addr_t'
drivers/dma/imx-dma.c:959:2: warning: format '%x' expects argument of type 'unsigned int', but argument 7 has type 'dma_addr_t'
We can't use the %pa format for these because this relates to phys_addr_t,
and dma_addr_t can be a different size. So, fix these by converting them
to %llx and casting the dma_addr_t to always be unsigned long long.
While we're here, also use %zu for size_t.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
2013-10-31 04:40:30 +04:00
dev_dbg ( imxdma - > dev , " %s channel: %d src=0x%llx dst=0x%llx len=%zu \n " ,
__func__ , imxdmac - > channel , ( unsigned long long ) src ,
( unsigned long long ) dest , len ) ;
2012-02-28 20:08:17 +04:00
2012-03-02 12:28:47 +04:00
if ( list_empty ( & imxdmac - > ld_free ) | |
imxdma_chan_is_doing_cyclic ( imxdmac ) )
2010-10-06 12:25:55 +04:00
return NULL ;
2012-03-02 12:28:47 +04:00
desc = list_first_entry ( & imxdmac - > ld_free , struct imxdma_desc , node ) ;
2012-02-28 20:08:17 +04:00
2012-03-02 12:28:47 +04:00
desc - > type = IMXDMA_DESC_MEMCPY ;
desc - > src = src ;
desc - > dest = dest ;
desc - > len = len ;
2012-03-22 17:54:03 +04:00
desc - > direction = DMA_MEM_TO_MEM ;
2012-03-02 12:28:47 +04:00
desc - > config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR ;
desc - > config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR ;
desc - > desc . callback = NULL ;
desc - > desc . callback_param = NULL ;
2012-02-28 20:08:17 +04:00
2012-03-02 12:28:47 +04:00
return & desc - > desc ;
2012-02-28 20:08:17 +04:00
}
2012-03-22 17:54:14 +04:00
static struct dma_async_tx_descriptor * imxdma_prep_dma_interleaved (
struct dma_chan * chan , struct dma_interleaved_template * xt ,
unsigned long flags )
{
struct imxdma_channel * imxdmac = to_imxdma_chan ( chan ) ;
struct imxdma_engine * imxdma = imxdmac - > imxdma ;
struct imxdma_desc * desc ;
dmaengine: imx-dma: fix format warnings
drivers/dma/imx-dma.c:575:3: warning: format '%x' expects argument of type 'unsigned int', but argument 6 has type 'dma_addr_t'
drivers/dma/imx-dma.c:575:3: warning: format '%x' expects argument of type 'unsigned int', but argument 7 has type 'dma_addr_t'
drivers/dma/imx-dma.c:589:4: warning: format '%x' expects argument of type 'unsigned int', but argument 9 has type 'dma_addr_t'
drivers/dma/imx-dma.c:599:4: warning: format '%x' expects argument of type 'unsigned int', but argument 9 has type 'dma_addr_t'
drivers/dma/imx-dma.c:929:2: warning: format '%x' expects argument of type 'unsigned int', but argument 6 has type 'dma_addr_t'
drivers/dma/imx-dma.c:929:2: warning: format '%x' expects argument of type 'unsigned int', but argument 7 has type 'dma_addr_t'
drivers/dma/imx-dma.c:959:2: warning: format '%x' expects argument of type 'unsigned int', but argument 6 has type 'dma_addr_t'
drivers/dma/imx-dma.c:959:2: warning: format '%x' expects argument of type 'unsigned int', but argument 7 has type 'dma_addr_t'
We can't use the %pa format for these because this relates to phys_addr_t,
and dma_addr_t can be a different size. So, fix these by converting them
to %llx and casting the dma_addr_t to always be unsigned long long.
While we're here, also use %zu for size_t.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
2013-10-31 04:40:30 +04:00
dev_dbg ( imxdma - > dev , " %s channel: %d src_start=0x%llx dst_start=0x%llx \n "
" src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu \n " , __func__ ,
imxdmac - > channel , ( unsigned long long ) xt - > src_start ,
( unsigned long long ) xt - > dst_start ,
2012-03-22 17:54:14 +04:00
xt - > src_sgl ? " true " : " false " , xt - > dst_sgl ? " true " : " false " ,
xt - > numf , xt - > frame_size ) ;
if ( list_empty ( & imxdmac - > ld_free ) | |
imxdma_chan_is_doing_cyclic ( imxdmac ) )
return NULL ;
if ( xt - > frame_size ! = 1 | | xt - > numf < = 0 | | xt - > dir ! = DMA_MEM_TO_MEM )
return NULL ;
desc = list_first_entry ( & imxdmac - > ld_free , struct imxdma_desc , node ) ;
desc - > type = IMXDMA_DESC_INTERLEAVED ;
desc - > src = xt - > src_start ;
desc - > dest = xt - > dst_start ;
desc - > x = xt - > sgl [ 0 ] . size ;
desc - > y = xt - > numf ;
desc - > w = xt - > sgl [ 0 ] . icg + desc - > x ;
desc - > len = desc - > x * desc - > y ;
desc - > direction = DMA_MEM_TO_MEM ;
desc - > config_port = IMX_DMA_MEMSIZE_32 ;
desc - > config_mem = IMX_DMA_MEMSIZE_32 ;
if ( xt - > src_sgl )
desc - > config_mem | = IMX_DMA_TYPE_2D ;
if ( xt - > dst_sgl )
desc - > config_port | = IMX_DMA_TYPE_2D ;
desc - > desc . callback = NULL ;
desc - > desc . callback_param = NULL ;
return & desc - > desc ;
2010-10-06 12:25:55 +04:00
}
static void imxdma_issue_pending ( struct dma_chan * chan )
{
2012-01-09 13:32:49 +04:00
struct imxdma_channel * imxdmac = to_imxdma_chan ( chan ) ;
2012-03-02 12:28:47 +04:00
struct imxdma_engine * imxdma = imxdmac - > imxdma ;
struct imxdma_desc * desc ;
unsigned long flags ;
2012-03-22 17:54:14 +04:00
spin_lock_irqsave ( & imxdma - > lock , flags ) ;
2012-03-02 12:28:47 +04:00
if ( list_empty ( & imxdmac - > ld_active ) & &
! list_empty ( & imxdmac - > ld_queue ) ) {
desc = list_first_entry ( & imxdmac - > ld_queue ,
struct imxdma_desc , node ) ;
if ( imxdma_xfer_desc ( desc ) < 0 ) {
dev_warn ( imxdma - > dev ,
" %s: channel: %d couldn't issue DMA xfer \n " ,
__func__ , imxdmac - > channel ) ;
} else {
list_move_tail ( imxdmac - > ld_queue . next ,
& imxdmac - > ld_active ) ;
}
}
2012-03-22 17:54:14 +04:00
spin_unlock_irqrestore ( & imxdma - > lock , flags ) ;
2010-10-06 12:25:55 +04:00
}
2013-05-26 13:53:20 +04:00
static bool imxdma_filter_fn ( struct dma_chan * chan , void * param )
{
struct imxdma_filter_data * fdata = param ;
struct imxdma_channel * imxdma_chan = to_imxdma_chan ( chan ) ;
if ( chan - > device - > dev ! = fdata - > imxdma - > dev )
return false ;
imxdma_chan - > dma_request = fdata - > request ;
chan - > private = NULL ;
return true ;
}
static struct dma_chan * imxdma_xlate ( struct of_phandle_args * dma_spec ,
struct of_dma * ofdma )
{
int count = dma_spec - > args_count ;
struct imxdma_engine * imxdma = ofdma - > of_dma_data ;
struct imxdma_filter_data fdata = {
. imxdma = imxdma ,
} ;
if ( count ! = 1 )
return NULL ;
fdata . request = dma_spec - > args [ 0 ] ;
return dma_request_channel ( imxdma - > dma_device . cap_mask ,
imxdma_filter_fn , & fdata ) ;
}
2010-10-06 12:25:55 +04:00
static int __init imxdma_probe ( struct platform_device * pdev )
2016-07-02 13:05:07 +03:00
{
2010-10-06 12:25:55 +04:00
struct imxdma_engine * imxdma ;
2012-09-15 11:57:00 +04:00
struct resource * res ;
2013-05-26 13:53:20 +04:00
const struct of_device_id * of_id ;
2010-10-06 12:25:55 +04:00
int ret , i ;
2012-09-15 11:57:00 +04:00
int irq , irq_err ;
2012-03-22 17:54:12 +04:00
2013-05-26 13:53:20 +04:00
of_id = of_match_device ( imx_dma_of_dev_id , & pdev - > dev ) ;
if ( of_id )
pdev - > id_entry = of_id - > data ;
2012-09-15 11:16:47 +04:00
imxdma = devm_kzalloc ( & pdev - > dev , sizeof ( * imxdma ) , GFP_KERNEL ) ;
2010-10-06 12:25:55 +04:00
if ( ! imxdma )
return - ENOMEM ;
2013-05-26 13:53:21 +04:00
imxdma - > dev = & pdev - > dev ;
2012-09-15 17:11:28 +04:00
imxdma - > devtype = pdev - > id_entry - > driver_data ;
2012-09-15 11:57:00 +04:00
res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
2013-01-21 14:09:00 +04:00
imxdma - > base = devm_ioremap_resource ( & pdev - > dev , res ) ;
if ( IS_ERR ( imxdma - > base ) )
return PTR_ERR ( imxdma - > base ) ;
2012-09-15 11:57:00 +04:00
irq = platform_get_irq ( pdev , 0 ) ;
if ( irq < 0 )
return irq ;
2012-03-22 17:54:01 +04:00
2012-07-03 22:33:29 +04:00
imxdma - > dma_ipg = devm_clk_get ( & pdev - > dev , " ipg " ) ;
2012-09-15 11:16:47 +04:00
if ( IS_ERR ( imxdma - > dma_ipg ) )
return PTR_ERR ( imxdma - > dma_ipg ) ;
2012-07-03 22:33:29 +04:00
imxdma - > dma_ahb = devm_clk_get ( & pdev - > dev , " ahb " ) ;
2012-09-15 11:16:47 +04:00
if ( IS_ERR ( imxdma - > dma_ahb ) )
return PTR_ERR ( imxdma - > dma_ahb ) ;
2012-07-03 22:33:29 +04:00
2015-06-21 00:43:44 +03:00
ret = clk_prepare_enable ( imxdma - > dma_ipg ) ;
if ( ret )
return ret ;
ret = clk_prepare_enable ( imxdma - > dma_ahb ) ;
if ( ret )
goto disable_dma_ipg_clk ;
2012-03-22 17:54:01 +04:00
/* reset DMA module */
2012-03-22 17:54:12 +04:00
imx_dmav1_writel ( imxdma , DCR_DRST , DMA_DCR ) ;
2012-03-22 17:54:01 +04:00
2012-09-15 17:11:28 +04:00
if ( is_imx1_dma ( imxdma ) ) {
2012-09-15 11:57:00 +04:00
ret = devm_request_irq ( & pdev - > dev , irq ,
2012-09-15 11:16:47 +04:00
dma_irq_handler , 0 , " DMA " , imxdma ) ;
2012-03-22 17:54:01 +04:00
if ( ret ) {
2012-03-22 17:54:13 +04:00
dev_warn ( imxdma - > dev , " Can't register IRQ for DMA \n " ) ;
2015-06-21 00:43:44 +03:00
goto disable_dma_ahb_clk ;
2012-03-22 17:54:01 +04:00
}
2016-07-02 12:55:01 +03:00
imxdma - > irq = irq ;
2012-03-22 17:54:01 +04:00
2012-09-15 11:57:00 +04:00
irq_err = platform_get_irq ( pdev , 1 ) ;
if ( irq_err < 0 ) {
ret = irq_err ;
2015-06-21 00:43:44 +03:00
goto disable_dma_ahb_clk ;
2012-09-15 11:57:00 +04:00
}
ret = devm_request_irq ( & pdev - > dev , irq_err ,
2012-09-15 11:16:47 +04:00
imxdma_err_handler , 0 , " DMA " , imxdma ) ;
2012-03-22 17:54:01 +04:00
if ( ret ) {
2012-03-22 17:54:13 +04:00
dev_warn ( imxdma - > dev , " Can't register ERRIRQ for DMA \n " ) ;
2015-06-21 00:43:44 +03:00
goto disable_dma_ahb_clk ;
2012-03-22 17:54:01 +04:00
}
2016-07-02 12:55:01 +03:00
imxdma - > irq_err = irq_err ;
2012-03-22 17:54:01 +04:00
}
/* enable DMA module */
2012-03-22 17:54:12 +04:00
imx_dmav1_writel ( imxdma , DCR_DEN , DMA_DCR ) ;
2012-03-22 17:54:01 +04:00
/* clear all interrupts */
2012-03-22 17:54:12 +04:00
imx_dmav1_writel ( imxdma , ( 1 < < IMX_DMA_CHANNELS ) - 1 , DMA_DISR ) ;
2012-03-22 17:54:01 +04:00
/* disable interrupts */
2012-03-22 17:54:12 +04:00
imx_dmav1_writel ( imxdma , ( 1 < < IMX_DMA_CHANNELS ) - 1 , DMA_DIMR ) ;
2010-10-06 12:25:55 +04:00
INIT_LIST_HEAD ( & imxdma - > dma_device . channels ) ;
2011-01-31 13:35:59 +03:00
dma_cap_set ( DMA_SLAVE , imxdma - > dma_device . cap_mask ) ;
dma_cap_set ( DMA_CYCLIC , imxdma - > dma_device . cap_mask ) ;
2012-02-28 20:08:17 +04:00
dma_cap_set ( DMA_MEMCPY , imxdma - > dma_device . cap_mask ) ;
2012-03-22 17:54:14 +04:00
dma_cap_set ( DMA_INTERLEAVE , imxdma - > dma_device . cap_mask ) ;
/* Initialize 2D global parameters */
for ( i = 0 ; i < IMX_DMA_2D_SLOTS ; i + + )
imxdma - > slots_2d [ i ] . count = 0 ;
spin_lock_init ( & imxdma - > lock ) ;
2011-01-31 13:35:59 +03:00
2010-10-06 12:25:55 +04:00
/* Initialize channel parameters */
2012-03-22 17:54:01 +04:00
for ( i = 0 ; i < IMX_DMA_CHANNELS ; i + + ) {
2010-10-06 12:25:55 +04:00
struct imxdma_channel * imxdmac = & imxdma - > channel [ i ] ;
2012-09-15 17:11:28 +04:00
if ( ! is_imx1_dma ( imxdma ) ) {
2012-09-15 11:57:00 +04:00
ret = devm_request_irq ( & pdev - > dev , irq + i ,
2012-03-22 17:54:01 +04:00
dma_irq_handler , 0 , " DMA " , imxdma ) ;
if ( ret ) {
2012-03-22 17:54:13 +04:00
dev_warn ( imxdma - > dev , " Can't register IRQ %d "
" for DMA channel %d \n " ,
2012-09-15 11:57:00 +04:00
irq + i , i ) ;
2015-06-21 00:43:44 +03:00
goto disable_dma_ahb_clk ;
2012-03-22 17:54:01 +04:00
}
2016-07-02 12:55:01 +03:00
imxdmac - > irq = irq + i ;
2017-10-24 13:02:23 +03:00
timer_setup ( & imxdmac - > watchdog , imxdma_watchdog , 0 ) ;
2010-10-20 10:37:19 +04:00
}
2010-10-06 12:25:55 +04:00
imxdmac - > imxdma = imxdma ;
2012-03-02 12:28:47 +04:00
INIT_LIST_HEAD ( & imxdmac - > ld_queue ) ;
INIT_LIST_HEAD ( & imxdmac - > ld_free ) ;
INIT_LIST_HEAD ( & imxdmac - > ld_active ) ;
tasklet_init ( & imxdmac - > dma_tasklet , imxdma_tasklet ,
( unsigned long ) imxdmac ) ;
2010-10-06 12:25:55 +04:00
imxdmac - > chan . device = & imxdma - > dma_device ;
2012-03-07 02:36:27 +04:00
dma_cookie_init ( & imxdmac - > chan ) ;
2010-10-06 12:25:55 +04:00
imxdmac - > channel = i ;
/* Add the channel to the DMAC list */
2012-03-02 12:28:47 +04:00
list_add_tail ( & imxdmac - > chan . device_node ,
& imxdma - > dma_device . channels ) ;
2010-10-06 12:25:55 +04:00
}
imxdma - > dma_device . dev = & pdev - > dev ;
imxdma - > dma_device . device_alloc_chan_resources = imxdma_alloc_chan_resources ;
imxdma - > dma_device . device_free_chan_resources = imxdma_free_chan_resources ;
imxdma - > dma_device . device_tx_status = imxdma_tx_status ;
imxdma - > dma_device . device_prep_slave_sg = imxdma_prep_slave_sg ;
imxdma - > dma_device . device_prep_dma_cyclic = imxdma_prep_dma_cyclic ;
2012-02-28 20:08:17 +04:00
imxdma - > dma_device . device_prep_dma_memcpy = imxdma_prep_dma_memcpy ;
2012-03-22 17:54:14 +04:00
imxdma - > dma_device . device_prep_interleaved_dma = imxdma_prep_dma_interleaved ;
2014-11-17 16:42:16 +03:00
imxdma - > dma_device . device_config = imxdma_config ;
imxdma - > dma_device . device_terminate_all = imxdma_terminate_all ;
2010-10-06 12:25:55 +04:00
imxdma - > dma_device . device_issue_pending = imxdma_issue_pending ;
platform_set_drvdata ( pdev , imxdma ) ;
2015-07-20 11:41:32 +03:00
imxdma - > dma_device . copy_align = DMAENGINE_ALIGN_4_BYTES ;
2011-01-12 15:14:37 +03:00
imxdma - > dma_device . dev - > dma_parms = & imxdma - > dma_parms ;
dma_set_max_seg_size ( imxdma - > dma_device . dev , 0xffffff ) ;
2010-10-06 12:25:55 +04:00
ret = dma_async_device_register ( & imxdma - > dma_device ) ;
if ( ret ) {
dev_err ( & pdev - > dev , " unable to register \n " ) ;
2015-06-21 00:43:44 +03:00
goto disable_dma_ahb_clk ;
2010-10-06 12:25:55 +04:00
}
2013-05-26 13:53:20 +04:00
if ( pdev - > dev . of_node ) {
ret = of_dma_controller_register ( pdev - > dev . of_node ,
imxdma_xlate , imxdma ) ;
if ( ret ) {
dev_err ( & pdev - > dev , " unable to register of_dma_controller \n " ) ;
goto err_of_dma_controller ;
}
}
2010-10-06 12:25:55 +04:00
return 0 ;
2013-05-26 13:53:20 +04:00
err_of_dma_controller :
dma_async_device_unregister ( & imxdma - > dma_device ) ;
2015-06-21 00:43:44 +03:00
disable_dma_ahb_clk :
2012-07-03 22:33:29 +04:00
clk_disable_unprepare ( imxdma - > dma_ahb ) ;
2015-06-21 00:43:44 +03:00
disable_dma_ipg_clk :
clk_disable_unprepare ( imxdma - > dma_ipg ) ;
2010-10-06 12:25:55 +04:00
return ret ;
}
2016-07-02 12:55:01 +03:00
static void imxdma_free_irq ( struct platform_device * pdev , struct imxdma_engine * imxdma )
{
int i ;
if ( is_imx1_dma ( imxdma ) ) {
disable_irq ( imxdma - > irq ) ;
disable_irq ( imxdma - > irq_err ) ;
}
for ( i = 0 ; i < IMX_DMA_CHANNELS ; i + + ) {
struct imxdma_channel * imxdmac = & imxdma - > channel [ i ] ;
if ( ! is_imx1_dma ( imxdma ) )
disable_irq ( imxdmac - > irq ) ;
tasklet_kill ( & imxdmac - > dma_tasklet ) ;
}
}
2013-02-20 04:07:04 +04:00
static int imxdma_remove ( struct platform_device * pdev )
2010-10-06 12:25:55 +04:00
{
struct imxdma_engine * imxdma = platform_get_drvdata ( pdev ) ;
2016-07-02 12:55:01 +03:00
imxdma_free_irq ( pdev , imxdma ) ;
2010-10-06 12:25:55 +04:00
dma_async_device_unregister ( & imxdma - > dma_device ) ;
2013-05-26 13:53:20 +04:00
if ( pdev - > dev . of_node )
of_dma_controller_free ( pdev - > dev . of_node ) ;
2012-07-03 22:33:29 +04:00
clk_disable_unprepare ( imxdma - > dma_ipg ) ;
clk_disable_unprepare ( imxdma - > dma_ahb ) ;
2010-10-06 12:25:55 +04:00
return 0 ;
}
static struct platform_driver imxdma_driver = {
. driver = {
. name = " imx-dma " ,
2013-05-26 13:53:20 +04:00
. of_match_table = imx_dma_of_dev_id ,
2010-10-06 12:25:55 +04:00
} ,
2012-09-15 17:11:28 +04:00
. id_table = imx_dma_devtype ,
2013-02-20 04:07:04 +04:00
. remove = imxdma_remove ,
2010-10-06 12:25:55 +04:00
} ;
static int __init imxdma_module_init ( void )
{
return platform_driver_probe ( & imxdma_driver , imxdma_probe ) ;
}
subsys_initcall ( imxdma_module_init ) ;
MODULE_AUTHOR ( " Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> " ) ;
MODULE_DESCRIPTION ( " i.MX dma driver " ) ;
MODULE_LICENSE ( " GPL " ) ;