2014-04-23 18:53:26 +04:00
/*
* DMA driver for Xilinx Video DMA Engine
*
* Copyright ( C ) 2010 - 2014 Xilinx , Inc . All rights reserved .
*
* Based on the Freescale DMA driver .
*
* Description :
* The AXI Video Direct Memory Access ( AXI VDMA ) core is a soft Xilinx IP
* core that provides high - bandwidth direct memory access between memory
* and AXI4 - Stream type video target peripherals . The core provides efficient
* two dimensional DMA operations with independent asynchronous read ( S2MM )
* and write ( MM2S ) channel operation . It can be configured to have either
* one channel or two channels . If configured as two channels , one is to
* transmit to the video device ( MM2S ) and another is to receive from the
* video device ( S2MM ) . Initialization , status , interrupt and management
* registers are accessed through an AXI4 - Lite slave interface .
*
* This program is free software : you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation , either version 2 of the License , or
* ( at your option ) any later version .
*/
# include <linux/bitops.h>
# include <linux/dmapool.h>
2015-03-02 20:54:24 +03:00
# include <linux/dma/xilinx_dma.h>
2014-04-23 18:53:26 +04:00
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/io.h>
2016-02-26 17:03:54 +03:00
# include <linux/iopoll.h>
2014-04-23 18:53:26 +04:00
# include <linux/module.h>
# include <linux/of_address.h>
# include <linux/of_dma.h>
# include <linux/of_platform.h>
# include <linux/of_irq.h>
# include <linux/slab.h>
# include "../dmaengine.h"
/* Register/Descriptor Offsets */
2016-04-07 08:29:41 +03:00
# define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
# define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
2014-04-23 18:53:26 +04:00
# define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
# define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
/* Control Registers */
2016-04-07 08:29:41 +03:00
# define XILINX_DMA_REG_DMACR 0x0000
# define XILINX_DMA_DMACR_DELAY_MAX 0xff
# define XILINX_DMA_DMACR_DELAY_SHIFT 24
# define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
# define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
# define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
# define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
# define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
# define XILINX_DMA_DMACR_MASTER_SHIFT 8
# define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
# define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
# define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
# define XILINX_DMA_DMACR_RESET BIT(2)
# define XILINX_DMA_DMACR_CIRC_EN BIT(1)
# define XILINX_DMA_DMACR_RUNSTOP BIT(0)
# define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
# define XILINX_DMA_REG_DMASR 0x0004
# define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
# define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
# define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
# define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
# define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
# define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
# define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
# define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
# define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
# define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
# define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
# define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
# define XILINX_DMA_DMASR_IDLE BIT(1)
# define XILINX_DMA_DMASR_HALTED BIT(0)
# define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
# define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
# define XILINX_DMA_REG_CURDESC 0x0008
# define XILINX_DMA_REG_TAILDESC 0x0010
# define XILINX_DMA_REG_REG_INDEX 0x0014
# define XILINX_DMA_REG_FRMSTORE 0x0018
# define XILINX_DMA_REG_THRESHOLD 0x001c
# define XILINX_DMA_REG_FRMPTR_STS 0x0024
# define XILINX_DMA_REG_PARK_PTR 0x0028
# define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
# define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
# define XILINX_DMA_REG_VDMA_VERSION 0x002c
2014-04-23 18:53:26 +04:00
/* Register Direct Mode Registers */
2016-04-07 08:29:41 +03:00
# define XILINX_DMA_REG_VSIZE 0x0000
# define XILINX_DMA_REG_HSIZE 0x0004
2014-04-23 18:53:26 +04:00
2016-04-07 08:29:41 +03:00
# define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
# define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
# define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
2014-04-23 18:53:26 +04:00
# define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
2016-04-06 08:08:08 +03:00
# define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
2014-04-23 18:53:26 +04:00
/* HW specific definitions */
2016-04-07 08:29:41 +03:00
# define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
# define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
( XILINX_DMA_DMASR_FRM_CNT_IRQ | \
XILINX_DMA_DMASR_DLY_CNT_IRQ | \
XILINX_DMA_DMASR_ERR_IRQ )
# define XILINX_DMA_DMASR_ALL_ERR_MASK \
( XILINX_DMA_DMASR_EOL_LATE_ERR | \
XILINX_DMA_DMASR_SOF_LATE_ERR | \
XILINX_DMA_DMASR_SG_DEC_ERR | \
XILINX_DMA_DMASR_SG_SLV_ERR | \
XILINX_DMA_DMASR_EOF_EARLY_ERR | \
XILINX_DMA_DMASR_SOF_EARLY_ERR | \
XILINX_DMA_DMASR_DMA_DEC_ERR | \
XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
XILINX_DMA_DMASR_DMA_INT_ERR )
2014-04-23 18:53:26 +04:00
/*
* Recoverable errors are DMA Internal error , SOF Early , EOF Early
* and SOF Late . They are only recoverable when C_FLUSH_ON_FSYNC
* is enabled in the h / w system .
*/
2016-04-07 08:29:41 +03:00
# define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
( XILINX_DMA_DMASR_SOF_LATE_ERR | \
XILINX_DMA_DMASR_EOF_EARLY_ERR | \
XILINX_DMA_DMASR_SOF_EARLY_ERR | \
XILINX_DMA_DMASR_DMA_INT_ERR )
2014-04-23 18:53:26 +04:00
/* Axi VDMA Flush on Fsync bits */
2016-04-07 08:29:41 +03:00
# define XILINX_DMA_FLUSH_S2MM 3
# define XILINX_DMA_FLUSH_MM2S 2
# define XILINX_DMA_FLUSH_BOTH 1
2014-04-23 18:53:26 +04:00
/* Delay loop counter to prevent hardware failure */
2016-04-07 08:29:41 +03:00
# define XILINX_DMA_LOOP_COUNT 1000000
2014-04-23 18:53:26 +04:00
/**
* struct xilinx_vdma_desc_hw - Hardware Descriptor
* @ next_desc : Next Descriptor Pointer @ 0x00
* @ pad1 : Reserved @ 0x04
* @ buf_addr : Buffer address @ 0x08
2016-04-06 08:08:08 +03:00
* @ buf_addr_msb : MSB of Buffer address @ 0x0C
2014-04-23 18:53:26 +04:00
* @ vsize : Vertical Size @ 0x10
* @ hsize : Horizontal Size @ 0x14
* @ stride : Number of bytes between the first
* pixels of each horizontal line @ 0x18
*/
struct xilinx_vdma_desc_hw {
u32 next_desc ;
u32 pad1 ;
u32 buf_addr ;
2016-04-06 08:08:08 +03:00
u32 buf_addr_msb ;
2014-04-23 18:53:26 +04:00
u32 vsize ;
u32 hsize ;
u32 stride ;
} __aligned ( 64 ) ;
/**
* struct xilinx_vdma_tx_segment - Descriptor segment
* @ hw : Hardware descriptor
* @ node : Node in the descriptor segments list
* @ phys : Physical address of segment
*/
struct xilinx_vdma_tx_segment {
struct xilinx_vdma_desc_hw hw ;
struct list_head node ;
dma_addr_t phys ;
} __aligned ( 64 ) ;
/**
2016-04-07 08:29:41 +03:00
* struct xilinx_dma_tx_descriptor - Per Transaction structure
2014-04-23 18:53:26 +04:00
* @ async_tx : Async transaction descriptor
* @ segments : TX segments list
* @ node : Node in the channel descriptors list
*/
2016-04-07 08:29:41 +03:00
struct xilinx_dma_tx_descriptor {
2014-04-23 18:53:26 +04:00
struct dma_async_tx_descriptor async_tx ;
struct list_head segments ;
struct list_head node ;
} ;
/**
2016-04-07 08:29:41 +03:00
* struct xilinx_dma_chan - Driver specific DMA channel structure
2014-04-23 18:53:26 +04:00
* @ xdev : Driver specific device structure
* @ ctrl_offset : Control registers offset
* @ desc_offset : TX descriptor registers offset
* @ lock : Descriptor operation lock
* @ pending_list : Descriptors waiting
2016-02-26 17:03:51 +03:00
* @ active_list : Descriptors ready to submit
2014-04-23 18:53:26 +04:00
* @ done_list : Complete descriptors
* @ common : DMA common channel
* @ desc_pool : Descriptors pool
* @ dev : The dma device
* @ irq : Channel IRQ
* @ id : Channel ID
* @ direction : Transfer direction
* @ num_frms : Number of frames
* @ has_sg : Support scatter transfers
* @ genlock : Support genlock mode
* @ err : Channel has errors
* @ tasklet : Cleanup work after irq
* @ config : Device configuration info
* @ flush_on_fsync : Flush on Frame sync
2016-02-26 17:03:51 +03:00
* @ desc_pendingcount : Descriptor pending count
2016-04-06 08:08:08 +03:00
* @ ext_addr : Indicates 64 bit addressing is supported by dma channel
2016-04-06 08:08:09 +03:00
* @ desc_submitcount : Descriptor h / w submitted count
2014-04-23 18:53:26 +04:00
*/
2016-04-07 08:29:41 +03:00
struct xilinx_dma_chan {
struct xilinx_dma_device * xdev ;
2014-04-23 18:53:26 +04:00
u32 ctrl_offset ;
u32 desc_offset ;
spinlock_t lock ;
struct list_head pending_list ;
2016-02-26 17:03:51 +03:00
struct list_head active_list ;
2014-04-23 18:53:26 +04:00
struct list_head done_list ;
struct dma_chan common ;
struct dma_pool * desc_pool ;
struct device * dev ;
int irq ;
int id ;
enum dma_transfer_direction direction ;
int num_frms ;
bool has_sg ;
bool genlock ;
bool err ;
struct tasklet_struct tasklet ;
struct xilinx_vdma_config config ;
bool flush_on_fsync ;
2016-02-26 17:03:51 +03:00
u32 desc_pendingcount ;
2016-04-06 08:08:08 +03:00
bool ext_addr ;
2016-04-06 08:08:09 +03:00
u32 desc_submitcount ;
2014-04-23 18:53:26 +04:00
} ;
/**
2016-04-07 08:29:41 +03:00
* struct xilinx_dma_device - DMA device structure
2014-04-23 18:53:26 +04:00
* @ regs : I / O mapped base address
* @ dev : Device Structure
* @ common : DMA device structure
2016-04-07 08:29:41 +03:00
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
* @ has_sg : Specifies whether Scatter - Gather is present or not
* @ flush_on_fsync : Flush on frame sync
2016-04-06 08:08:08 +03:00
* @ ext_addr : Indicates 64 bit addressing is supported by dma device
2014-04-23 18:53:26 +04:00
*/
2016-04-07 08:29:41 +03:00
struct xilinx_dma_device {
2014-04-23 18:53:26 +04:00
void __iomem * regs ;
struct device * dev ;
struct dma_device common ;
2016-04-07 08:29:41 +03:00
struct xilinx_dma_chan * chan [ XILINX_DMA_MAX_CHANS_PER_DEVICE ] ;
2014-04-23 18:53:26 +04:00
bool has_sg ;
u32 flush_on_fsync ;
2016-04-06 08:08:08 +03:00
bool ext_addr ;
2014-04-23 18:53:26 +04:00
} ;
/* Macros */
# define to_xilinx_chan(chan) \
2016-04-07 08:29:41 +03:00
container_of ( chan , struct xilinx_dma_chan , common )
# define to_dma_tx_descriptor(tx) \
container_of ( tx , struct xilinx_dma_tx_descriptor , async_tx )
# define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
2016-02-26 17:03:54 +03:00
readl_poll_timeout ( chan - > xdev - > regs + chan - > ctrl_offset + reg , val , \
cond , delay_us , timeout_us )
2014-04-23 18:53:26 +04:00
/* IO accessors */
2016-04-07 08:29:41 +03:00
static inline u32 dma_read ( struct xilinx_dma_chan * chan , u32 reg )
2014-04-23 18:53:26 +04:00
{
return ioread32 ( chan - > xdev - > regs + reg ) ;
}
2016-04-07 08:29:41 +03:00
static inline void dma_write ( struct xilinx_dma_chan * chan , u32 reg , u32 value )
2014-04-23 18:53:26 +04:00
{
iowrite32 ( value , chan - > xdev - > regs + reg ) ;
}
2016-04-07 08:29:41 +03:00
static inline void vdma_desc_write ( struct xilinx_dma_chan * chan , u32 reg ,
2014-04-23 18:53:26 +04:00
u32 value )
{
2016-04-07 08:29:41 +03:00
dma_write ( chan , chan - > desc_offset + reg , value ) ;
2014-04-23 18:53:26 +04:00
}
2016-04-07 08:29:41 +03:00
static inline u32 dma_ctrl_read ( struct xilinx_dma_chan * chan , u32 reg )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
return dma_read ( chan , chan - > ctrl_offset + reg ) ;
2014-04-23 18:53:26 +04:00
}
2016-04-07 08:29:41 +03:00
static inline void dma_ctrl_write ( struct xilinx_dma_chan * chan , u32 reg ,
2014-04-23 18:53:26 +04:00
u32 value )
{
2016-04-07 08:29:41 +03:00
dma_write ( chan , chan - > ctrl_offset + reg , value ) ;
2014-04-23 18:53:26 +04:00
}
2016-04-07 08:29:41 +03:00
static inline void dma_ctrl_clr ( struct xilinx_dma_chan * chan , u32 reg ,
2014-04-23 18:53:26 +04:00
u32 clr )
{
2016-04-07 08:29:41 +03:00
dma_ctrl_write ( chan , reg , dma_ctrl_read ( chan , reg ) & ~ clr ) ;
2014-04-23 18:53:26 +04:00
}
2016-04-07 08:29:41 +03:00
static inline void dma_ctrl_set ( struct xilinx_dma_chan * chan , u32 reg ,
2014-04-23 18:53:26 +04:00
u32 set )
{
2016-04-07 08:29:41 +03:00
dma_ctrl_write ( chan , reg , dma_ctrl_read ( chan , reg ) | set ) ;
2014-04-23 18:53:26 +04:00
}
2016-04-06 08:08:08 +03:00
/**
* vdma_desc_write_64 - 64 - bit descriptor write
* @ chan : Driver specific VDMA channel
* @ reg : Register to write
* @ value_lsb : lower address of the descriptor .
* @ value_msb : upper address of the descriptor .
*
* Since vdma driver is trying to write to a register offset which is not a
* multiple of 64 bits ( ex : 0x5c ) , we are writing as two separate 32 bits
* instead of a single 64 bit register write .
*/
2016-04-07 08:29:41 +03:00
static inline void vdma_desc_write_64 ( struct xilinx_dma_chan * chan , u32 reg ,
2016-04-06 08:08:08 +03:00
u32 value_lsb , u32 value_msb )
{
/* Write the lsb 32 bits*/
writel ( value_lsb , chan - > xdev - > regs + chan - > desc_offset + reg ) ;
/* Write the msb 32 bits */
writel ( value_msb , chan - > xdev - > regs + chan - > desc_offset + reg + 4 ) ;
}
2014-04-23 18:53:26 +04:00
/* -----------------------------------------------------------------------------
* Descriptors and segments alloc and free
*/
/**
* xilinx_vdma_alloc_tx_segment - Allocate transaction segment
2016-04-07 08:29:41 +03:00
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
*
* Return : The allocated segment on success and NULL on failure .
*/
static struct xilinx_vdma_tx_segment *
2016-04-07 08:29:41 +03:00
xilinx_vdma_alloc_tx_segment ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
struct xilinx_vdma_tx_segment * segment ;
dma_addr_t phys ;
2016-04-29 23:09:09 +03:00
segment = dma_pool_zalloc ( chan - > desc_pool , GFP_ATOMIC , & phys ) ;
2014-04-23 18:53:26 +04:00
if ( ! segment )
return NULL ;
segment - > phys = phys ;
return segment ;
}
/**
* xilinx_vdma_free_tx_segment - Free transaction segment
2016-04-07 08:29:41 +03:00
* @ chan : Driver specific DMA channel
* @ segment : DMA transaction segment
2014-04-23 18:53:26 +04:00
*/
2016-04-07 08:29:41 +03:00
static void xilinx_vdma_free_tx_segment ( struct xilinx_dma_chan * chan ,
2014-04-23 18:53:26 +04:00
struct xilinx_vdma_tx_segment * segment )
{
dma_pool_free ( chan - > desc_pool , segment , segment - > phys ) ;
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_tx_descriptor - Allocate transaction descriptor
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
*
* Return : The allocated descriptor on success and NULL on failure .
*/
2016-04-07 08:29:41 +03:00
static struct xilinx_dma_tx_descriptor *
xilinx_dma_alloc_tx_descriptor ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_tx_descriptor * desc ;
2014-04-23 18:53:26 +04:00
desc = kzalloc ( sizeof ( * desc ) , GFP_KERNEL ) ;
if ( ! desc )
return NULL ;
INIT_LIST_HEAD ( & desc - > segments ) ;
return desc ;
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_free_tx_descriptor - Free transaction descriptor
* @ chan : Driver specific DMA channel
* @ desc : DMA transaction descriptor
2014-04-23 18:53:26 +04:00
*/
static void
2016-04-07 08:29:41 +03:00
xilinx_dma_free_tx_descriptor ( struct xilinx_dma_chan * chan ,
struct xilinx_dma_tx_descriptor * desc )
2014-04-23 18:53:26 +04:00
{
struct xilinx_vdma_tx_segment * segment , * next ;
if ( ! desc )
return ;
list_for_each_entry_safe ( segment , next , & desc - > segments , node ) {
list_del ( & segment - > node ) ;
xilinx_vdma_free_tx_segment ( chan , segment ) ;
}
kfree ( desc ) ;
}
/* Required functions */
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_free_desc_list - Free descriptors list
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
* @ list : List to parse and delete the descriptor
*/
2016-04-07 08:29:41 +03:00
static void xilinx_dma_free_desc_list ( struct xilinx_dma_chan * chan ,
2014-04-23 18:53:26 +04:00
struct list_head * list )
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_tx_descriptor * desc , * next ;
2014-04-23 18:53:26 +04:00
list_for_each_entry_safe ( desc , next , list , node ) {
list_del ( & desc - > node ) ;
2016-04-07 08:29:41 +03:00
xilinx_dma_free_tx_descriptor ( chan , desc ) ;
2014-04-23 18:53:26 +04:00
}
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_free_descriptors - Free channel descriptors
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
*/
2016-04-07 08:29:41 +03:00
static void xilinx_dma_free_descriptors ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
unsigned long flags ;
spin_lock_irqsave ( & chan - > lock , flags ) ;
2016-04-07 08:29:41 +03:00
xilinx_dma_free_desc_list ( chan , & chan - > pending_list ) ;
xilinx_dma_free_desc_list ( chan , & chan - > done_list ) ;
xilinx_dma_free_desc_list ( chan , & chan - > active_list ) ;
2014-04-23 18:53:26 +04:00
spin_unlock_irqrestore ( & chan - > lock , flags ) ;
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_free_chan_resources - Free channel resources
2014-04-23 18:53:26 +04:00
* @ dchan : DMA channel
*/
2016-04-07 08:29:41 +03:00
static void xilinx_dma_free_chan_resources ( struct dma_chan * dchan )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_chan * chan = to_xilinx_chan ( dchan ) ;
2014-04-23 18:53:26 +04:00
dev_dbg ( chan - > dev , " Free all channel resources. \n " ) ;
2016-04-07 08:29:41 +03:00
xilinx_dma_free_descriptors ( chan ) ;
2014-04-23 18:53:26 +04:00
dma_pool_destroy ( chan - > desc_pool ) ;
chan - > desc_pool = NULL ;
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_chan_desc_cleanup - Clean channel descriptors
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
*/
2016-04-07 08:29:41 +03:00
static void xilinx_dma_chan_desc_cleanup ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_tx_descriptor * desc , * next ;
2014-04-23 18:53:26 +04:00
unsigned long flags ;
spin_lock_irqsave ( & chan - > lock , flags ) ;
list_for_each_entry_safe ( desc , next , & chan - > done_list , node ) {
dma_async_tx_callback callback ;
void * callback_param ;
/* Remove from the list of running transactions */
list_del ( & desc - > node ) ;
/* Run the link descriptor callback function */
callback = desc - > async_tx . callback ;
callback_param = desc - > async_tx . callback_param ;
if ( callback ) {
spin_unlock_irqrestore ( & chan - > lock , flags ) ;
callback ( callback_param ) ;
spin_lock_irqsave ( & chan - > lock , flags ) ;
}
/* Run any dependencies, then free the descriptor */
dma_run_dependencies ( & desc - > async_tx ) ;
2016-04-07 08:29:41 +03:00
xilinx_dma_free_tx_descriptor ( chan , desc ) ;
2014-04-23 18:53:26 +04:00
}
spin_unlock_irqrestore ( & chan - > lock , flags ) ;
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_do_tasklet - Schedule completion tasklet
* @ data : Pointer to the Xilinx DMA channel structure
2014-04-23 18:53:26 +04:00
*/
2016-04-07 08:29:41 +03:00
static void xilinx_dma_do_tasklet ( unsigned long data )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_chan * chan = ( struct xilinx_dma_chan * ) data ;
2014-04-23 18:53:26 +04:00
2016-04-07 08:29:41 +03:00
xilinx_dma_chan_desc_cleanup ( chan ) ;
2014-04-23 18:53:26 +04:00
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_alloc_chan_resources - Allocate channel resources
2014-04-23 18:53:26 +04:00
* @ dchan : DMA channel
*
* Return : ' 0 ' on success and failure value on error
*/
2016-04-07 08:29:41 +03:00
static int xilinx_dma_alloc_chan_resources ( struct dma_chan * dchan )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_chan * chan = to_xilinx_chan ( dchan ) ;
2014-04-23 18:53:26 +04:00
/* Has this channel already been allocated? */
if ( chan - > desc_pool )
return 0 ;
/*
* We need the descriptor to be aligned to 64 bytes
* for meeting Xilinx VDMA specification requirement .
*/
chan - > desc_pool = dma_pool_create ( " xilinx_vdma_desc_pool " ,
chan - > dev ,
sizeof ( struct xilinx_vdma_tx_segment ) ,
__alignof__ ( struct xilinx_vdma_tx_segment ) , 0 ) ;
if ( ! chan - > desc_pool ) {
dev_err ( chan - > dev ,
" unable to allocate channel %d descriptor pool \n " ,
chan - > id ) ;
return - ENOMEM ;
}
dma_cookie_init ( dchan ) ;
return 0 ;
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_tx_status - Get DMA transaction status
2014-04-23 18:53:26 +04:00
* @ dchan : DMA channel
* @ cookie : Transaction identifier
* @ txstate : Transaction state
*
* Return : DMA transaction status
*/
2016-04-07 08:29:41 +03:00
static enum dma_status xilinx_dma_tx_status ( struct dma_chan * dchan ,
2014-04-23 18:53:26 +04:00
dma_cookie_t cookie ,
struct dma_tx_state * txstate )
{
return dma_cookie_status ( dchan , cookie , txstate ) ;
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_is_running - Check if DMA channel is running
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
*
* Return : ' 1 ' if running , ' 0 ' if not .
*/
2016-04-07 08:29:41 +03:00
static bool xilinx_dma_is_running ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
return ! ( dma_ctrl_read ( chan , XILINX_DMA_REG_DMASR ) &
XILINX_DMA_DMASR_HALTED ) & &
( dma_ctrl_read ( chan , XILINX_DMA_REG_DMACR ) &
XILINX_DMA_DMACR_RUNSTOP ) ;
2014-04-23 18:53:26 +04:00
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_is_idle - Check if DMA channel is idle
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
*
* Return : ' 1 ' if idle , ' 0 ' if not .
*/
2016-04-07 08:29:41 +03:00
static bool xilinx_dma_is_idle ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
return dma_ctrl_read ( chan , XILINX_DMA_REG_DMASR ) &
XILINX_DMA_DMASR_IDLE ;
2014-04-23 18:53:26 +04:00
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_halt - Halt DMA channel
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
*/
2016-04-07 08:29:41 +03:00
static void xilinx_dma_halt ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
2016-03-03 20:32:42 +03:00
int err ;
2016-02-26 17:03:54 +03:00
u32 val ;
2014-04-23 18:53:26 +04:00
2016-04-07 08:29:41 +03:00
dma_ctrl_clr ( chan , XILINX_DMA_REG_DMACR , XILINX_DMA_DMACR_RUNSTOP ) ;
2014-04-23 18:53:26 +04:00
/* Wait for the hardware to halt */
2016-04-07 08:29:41 +03:00
err = xilinx_dma_poll_timeout ( chan , XILINX_DMA_REG_DMASR , val ,
( val & XILINX_DMA_DMASR_HALTED ) , 0 ,
XILINX_DMA_LOOP_COUNT ) ;
2014-04-23 18:53:26 +04:00
2016-02-26 17:03:54 +03:00
if ( err ) {
2014-04-23 18:53:26 +04:00
dev_err ( chan - > dev , " Cannot stop channel %p: %x \n " ,
2016-04-07 08:29:41 +03:00
chan , dma_ctrl_read ( chan , XILINX_DMA_REG_DMASR ) ) ;
2014-04-23 18:53:26 +04:00
chan - > err = true ;
}
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_start - Start DMA channel
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
*/
2016-04-07 08:29:41 +03:00
static void xilinx_dma_start ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
2016-03-03 20:32:42 +03:00
int err ;
2016-02-26 17:03:54 +03:00
u32 val ;
2014-04-23 18:53:26 +04:00
2016-04-07 08:29:41 +03:00
dma_ctrl_set ( chan , XILINX_DMA_REG_DMACR , XILINX_DMA_DMACR_RUNSTOP ) ;
2014-04-23 18:53:26 +04:00
/* Wait for the hardware to start */
2016-04-07 08:29:41 +03:00
err = xilinx_dma_poll_timeout ( chan , XILINX_DMA_REG_DMASR , val ,
! ( val & XILINX_DMA_DMASR_HALTED ) , 0 ,
XILINX_DMA_LOOP_COUNT ) ;
2014-04-23 18:53:26 +04:00
2016-02-26 17:03:54 +03:00
if ( err ) {
2014-04-23 18:53:26 +04:00
dev_err ( chan - > dev , " Cannot start channel %p: %x \n " ,
2016-04-07 08:29:41 +03:00
chan , dma_ctrl_read ( chan , XILINX_DMA_REG_DMASR ) ) ;
2014-04-23 18:53:26 +04:00
chan - > err = true ;
}
}
/**
* xilinx_vdma_start_transfer - Starts VDMA transfer
* @ chan : Driver specific channel struct pointer
*/
2016-04-07 08:29:41 +03:00
static void xilinx_vdma_start_transfer ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
struct xilinx_vdma_config * config = & chan - > config ;
2016-04-07 08:29:41 +03:00
struct xilinx_dma_tx_descriptor * desc , * tail_desc ;
2014-04-23 18:53:26 +04:00
u32 reg ;
2016-02-26 17:03:51 +03:00
struct xilinx_vdma_tx_segment * tail_segment ;
2014-04-23 18:53:26 +04:00
2016-02-26 17:03:52 +03:00
/* This function was invoked with lock held */
2014-04-23 18:53:26 +04:00
if ( chan - > err )
return ;
if ( list_empty ( & chan - > pending_list ) )
2016-02-26 17:03:52 +03:00
return ;
2014-04-23 18:53:26 +04:00
desc = list_first_entry ( & chan - > pending_list ,
2016-04-07 08:29:41 +03:00
struct xilinx_dma_tx_descriptor , node ) ;
2016-02-26 17:03:51 +03:00
tail_desc = list_last_entry ( & chan - > pending_list ,
2016-04-07 08:29:41 +03:00
struct xilinx_dma_tx_descriptor , node ) ;
2016-02-26 17:03:51 +03:00
tail_segment = list_last_entry ( & tail_desc - > segments ,
struct xilinx_vdma_tx_segment , node ) ;
2014-04-23 18:53:26 +04:00
/* If it is SG mode and hardware is busy, cannot submit */
2016-04-07 08:29:41 +03:00
if ( chan - > has_sg & & xilinx_dma_is_running ( chan ) & &
! xilinx_dma_is_idle ( chan ) ) {
2014-04-23 18:53:26 +04:00
dev_dbg ( chan - > dev , " DMA controller still busy \n " ) ;
2016-02-26 17:03:52 +03:00
return ;
2014-04-23 18:53:26 +04:00
}
/*
* If hardware is idle , then all descriptors on the running lists are
* done , start new transfers
*/
2016-02-26 17:03:51 +03:00
if ( chan - > has_sg )
2016-04-07 08:29:41 +03:00
dma_ctrl_write ( chan , XILINX_DMA_REG_CURDESC ,
2016-02-26 17:03:51 +03:00
desc - > async_tx . phys ) ;
2014-04-23 18:53:26 +04:00
/* Configure the hardware using info in the config structure */
2016-04-07 08:29:41 +03:00
reg = dma_ctrl_read ( chan , XILINX_DMA_REG_DMACR ) ;
2014-04-23 18:53:26 +04:00
if ( config - > frm_cnt_en )
2016-04-07 08:29:41 +03:00
reg | = XILINX_DMA_DMACR_FRAMECNT_EN ;
2014-04-23 18:53:26 +04:00
else
2016-04-07 08:29:41 +03:00
reg & = ~ XILINX_DMA_DMACR_FRAMECNT_EN ;
2014-04-23 18:53:26 +04:00
2016-02-26 17:03:53 +03:00
/* Configure channel to allow number frame buffers */
2016-04-07 08:29:41 +03:00
dma_ctrl_write ( chan , XILINX_DMA_REG_FRMSTORE ,
2016-02-26 17:03:53 +03:00
chan - > desc_pendingcount ) ;
2014-04-23 18:53:26 +04:00
/*
* With SG , start with circular mode , so that BDs can be fetched .
* In direct register mode , if not parking , enable circular mode
*/
if ( chan - > has_sg | | ! config - > park )
2016-04-07 08:29:41 +03:00
reg | = XILINX_DMA_DMACR_CIRC_EN ;
2014-04-23 18:53:26 +04:00
if ( config - > park )
2016-04-07 08:29:41 +03:00
reg & = ~ XILINX_DMA_DMACR_CIRC_EN ;
2014-04-23 18:53:26 +04:00
2016-04-07 08:29:41 +03:00
dma_ctrl_write ( chan , XILINX_DMA_REG_DMACR , reg ) ;
2014-04-23 18:53:26 +04:00
if ( config - > park & & ( config - > park_frm > = 0 ) & &
( config - > park_frm < chan - > num_frms ) ) {
if ( chan - > direction = = DMA_MEM_TO_DEV )
2016-04-07 08:29:41 +03:00
dma_write ( chan , XILINX_DMA_REG_PARK_PTR ,
2014-04-23 18:53:26 +04:00
config - > park_frm < <
2016-04-07 08:29:41 +03:00
XILINX_DMA_PARK_PTR_RD_REF_SHIFT ) ;
2014-04-23 18:53:26 +04:00
else
2016-04-07 08:29:41 +03:00
dma_write ( chan , XILINX_DMA_REG_PARK_PTR ,
2014-04-23 18:53:26 +04:00
config - > park_frm < <
2016-04-07 08:29:41 +03:00
XILINX_DMA_PARK_PTR_WR_REF_SHIFT ) ;
2014-04-23 18:53:26 +04:00
}
/* Start the hardware */
2016-04-07 08:29:41 +03:00
xilinx_dma_start ( chan ) ;
2014-04-23 18:53:26 +04:00
if ( chan - > err )
2016-02-26 17:03:52 +03:00
return ;
2014-04-23 18:53:26 +04:00
/* Start the transfer */
if ( chan - > has_sg ) {
2016-04-07 08:29:41 +03:00
dma_ctrl_write ( chan , XILINX_DMA_REG_TAILDESC ,
2016-02-26 17:03:51 +03:00
tail_segment - > phys ) ;
2014-04-23 18:53:26 +04:00
} else {
struct xilinx_vdma_tx_segment * segment , * last = NULL ;
int i = 0 ;
2016-04-06 08:08:09 +03:00
if ( chan - > desc_submitcount < chan - > num_frms )
i = chan - > desc_submitcount ;
list_for_each_entry ( segment , & desc - > segments , node ) {
2016-04-06 08:08:08 +03:00
if ( chan - > ext_addr )
vdma_desc_write_64 ( chan ,
XILINX_VDMA_REG_START_ADDRESS_64 ( i + + ) ,
segment - > hw . buf_addr ,
segment - > hw . buf_addr_msb ) ;
else
vdma_desc_write ( chan ,
2014-04-23 18:53:26 +04:00
XILINX_VDMA_REG_START_ADDRESS ( i + + ) ,
segment - > hw . buf_addr ) ;
2016-04-06 08:08:08 +03:00
2014-04-23 18:53:26 +04:00
last = segment ;
}
if ( ! last )
2016-02-26 17:03:52 +03:00
return ;
2014-04-23 18:53:26 +04:00
/* HW expects these parameters to be same for one transaction */
2016-04-07 08:29:41 +03:00
vdma_desc_write ( chan , XILINX_DMA_REG_HSIZE , last - > hw . hsize ) ;
vdma_desc_write ( chan , XILINX_DMA_REG_FRMDLY_STRIDE ,
2014-04-23 18:53:26 +04:00
last - > hw . stride ) ;
2016-04-07 08:29:41 +03:00
vdma_desc_write ( chan , XILINX_DMA_REG_VSIZE , last - > hw . vsize ) ;
2014-04-23 18:53:26 +04:00
}
2016-04-06 08:08:09 +03:00
if ( ! chan - > has_sg ) {
list_del ( & desc - > node ) ;
list_add_tail ( & desc - > node , & chan - > active_list ) ;
chan - > desc_submitcount + + ;
chan - > desc_pendingcount - - ;
if ( chan - > desc_submitcount = = chan - > num_frms )
chan - > desc_submitcount = 0 ;
} else {
list_splice_tail_init ( & chan - > pending_list , & chan - > active_list ) ;
chan - > desc_pendingcount = 0 ;
}
2014-04-23 18:53:26 +04:00
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_issue_pending - Issue pending transactions
2014-04-23 18:53:26 +04:00
* @ dchan : DMA channel
*/
2016-04-07 08:29:41 +03:00
static void xilinx_dma_issue_pending ( struct dma_chan * dchan )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_chan * chan = to_xilinx_chan ( dchan ) ;
2016-02-26 17:03:52 +03:00
unsigned long flags ;
2014-04-23 18:53:26 +04:00
2016-02-26 17:03:52 +03:00
spin_lock_irqsave ( & chan - > lock , flags ) ;
2014-04-23 18:53:26 +04:00
xilinx_vdma_start_transfer ( chan ) ;
2016-02-26 17:03:52 +03:00
spin_unlock_irqrestore ( & chan - > lock , flags ) ;
2014-04-23 18:53:26 +04:00
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_complete_descriptor - Mark the active descriptor as complete
2014-04-23 18:53:26 +04:00
* @ chan : xilinx DMA channel
*
* CONTEXT : hardirq
*/
2016-04-07 08:29:41 +03:00
static void xilinx_dma_complete_descriptor ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_tx_descriptor * desc , * next ;
2014-04-23 18:53:26 +04:00
2016-02-26 17:03:52 +03:00
/* This function was invoked with lock held */
2016-02-26 17:03:51 +03:00
if ( list_empty ( & chan - > active_list ) )
2016-02-26 17:03:52 +03:00
return ;
2014-04-23 18:53:26 +04:00
2016-02-26 17:03:51 +03:00
list_for_each_entry_safe ( desc , next , & chan - > active_list , node ) {
list_del ( & desc - > node ) ;
dma_cookie_complete ( & desc - > async_tx ) ;
list_add_tail ( & desc - > node , & chan - > done_list ) ;
}
2014-04-23 18:53:26 +04:00
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_reset - Reset DMA channel
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
*
* Return : ' 0 ' on success and failure value on error
*/
2016-04-07 08:29:41 +03:00
static int xilinx_dma_reset ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
2016-03-03 20:32:42 +03:00
int err ;
2014-04-23 18:53:26 +04:00
u32 tmp ;
2016-04-07 08:29:41 +03:00
dma_ctrl_set ( chan , XILINX_DMA_REG_DMACR , XILINX_DMA_DMACR_RESET ) ;
2014-04-23 18:53:26 +04:00
/* Wait for the hardware to finish reset */
2016-04-07 08:29:41 +03:00
err = xilinx_dma_poll_timeout ( chan , XILINX_DMA_REG_DMACR , tmp ,
! ( tmp & XILINX_DMA_DMACR_RESET ) , 0 ,
XILINX_DMA_LOOP_COUNT ) ;
2014-04-23 18:53:26 +04:00
2016-02-26 17:03:54 +03:00
if ( err ) {
2014-04-23 18:53:26 +04:00
dev_err ( chan - > dev , " reset timeout, cr %x, sr %x \n " ,
2016-04-07 08:29:41 +03:00
dma_ctrl_read ( chan , XILINX_DMA_REG_DMACR ) ,
dma_ctrl_read ( chan , XILINX_DMA_REG_DMASR ) ) ;
2014-04-23 18:53:26 +04:00
return - ETIMEDOUT ;
}
chan - > err = false ;
2016-02-26 17:03:54 +03:00
return err ;
2014-04-23 18:53:26 +04:00
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
*
* Return : ' 0 ' on success and failure value on error
*/
2016-04-07 08:29:41 +03:00
static int xilinx_dma_chan_reset ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
int err ;
/* Reset VDMA */
2016-04-07 08:29:41 +03:00
err = xilinx_dma_reset ( chan ) ;
2014-04-23 18:53:26 +04:00
if ( err )
return err ;
/* Enable interrupts */
2016-04-07 08:29:41 +03:00
dma_ctrl_set ( chan , XILINX_DMA_REG_DMACR ,
XILINX_DMA_DMAXR_ALL_IRQ_MASK ) ;
2014-04-23 18:53:26 +04:00
return 0 ;
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_irq_handler - DMA Interrupt handler
2014-04-23 18:53:26 +04:00
* @ irq : IRQ number
2016-04-07 08:29:41 +03:00
* @ data : Pointer to the Xilinx DMA channel structure
2014-04-23 18:53:26 +04:00
*
* Return : IRQ_HANDLED / IRQ_NONE
*/
2016-04-07 08:29:41 +03:00
static irqreturn_t xilinx_dma_irq_handler ( int irq , void * data )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_chan * chan = data ;
2014-04-23 18:53:26 +04:00
u32 status ;
/* Read the status and ack the interrupts. */
2016-04-07 08:29:41 +03:00
status = dma_ctrl_read ( chan , XILINX_DMA_REG_DMASR ) ;
if ( ! ( status & XILINX_DMA_DMAXR_ALL_IRQ_MASK ) )
2014-04-23 18:53:26 +04:00
return IRQ_NONE ;
2016-04-07 08:29:41 +03:00
dma_ctrl_write ( chan , XILINX_DMA_REG_DMASR ,
status & XILINX_DMA_DMAXR_ALL_IRQ_MASK ) ;
2014-04-23 18:53:26 +04:00
2016-04-07 08:29:41 +03:00
if ( status & XILINX_DMA_DMASR_ERR_IRQ ) {
2014-04-23 18:53:26 +04:00
/*
* An error occurred . If C_FLUSH_ON_FSYNC is enabled and the
* error is recoverable , ignore it . Otherwise flag the error .
*
* Only recoverable errors can be cleared in the DMASR register ,
* make sure not to write to other error bits to 1.
*/
2016-04-07 08:29:41 +03:00
u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK ;
2016-04-06 08:14:55 +03:00
2016-04-07 08:29:41 +03:00
dma_ctrl_write ( chan , XILINX_DMA_REG_DMASR ,
errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK ) ;
2014-04-23 18:53:26 +04:00
if ( ! chan - > flush_on_fsync | |
2016-04-07 08:29:41 +03:00
( errors & ~ XILINX_DMA_DMASR_ERR_RECOVER_MASK ) ) {
2014-04-23 18:53:26 +04:00
dev_err ( chan - > dev ,
" Channel %p has errors %x, cdr %x tdr %x \n " ,
chan , errors ,
2016-04-07 08:29:41 +03:00
dma_ctrl_read ( chan , XILINX_DMA_REG_CURDESC ) ,
dma_ctrl_read ( chan , XILINX_DMA_REG_TAILDESC ) ) ;
2014-04-23 18:53:26 +04:00
chan - > err = true ;
}
}
2016-04-07 08:29:41 +03:00
if ( status & XILINX_DMA_DMASR_DLY_CNT_IRQ ) {
2014-04-23 18:53:26 +04:00
/*
* Device takes too long to do the transfer when user requires
* responsiveness .
*/
dev_dbg ( chan - > dev , " Inter-packet latency too long \n " ) ;
}
2016-04-07 08:29:41 +03:00
if ( status & XILINX_DMA_DMASR_FRM_CNT_IRQ ) {
2016-02-26 17:03:52 +03:00
spin_lock ( & chan - > lock ) ;
2016-04-07 08:29:41 +03:00
xilinx_dma_complete_descriptor ( chan ) ;
2014-04-23 18:53:26 +04:00
xilinx_vdma_start_transfer ( chan ) ;
2016-02-26 17:03:52 +03:00
spin_unlock ( & chan - > lock ) ;
2014-04-23 18:53:26 +04:00
}
tasklet_schedule ( & chan - > tasklet ) ;
return IRQ_HANDLED ;
}
2016-02-26 17:03:51 +03:00
/**
* append_desc_queue - Queuing descriptor
* @ chan : Driver specific dma channel
* @ desc : dma transaction descriptor
*/
2016-04-07 08:29:41 +03:00
static void append_desc_queue ( struct xilinx_dma_chan * chan ,
struct xilinx_dma_tx_descriptor * desc )
2016-02-26 17:03:51 +03:00
{
struct xilinx_vdma_tx_segment * tail_segment ;
2016-04-07 08:29:41 +03:00
struct xilinx_dma_tx_descriptor * tail_desc ;
2016-02-26 17:03:51 +03:00
if ( list_empty ( & chan - > pending_list ) )
goto append ;
/*
* Add the hardware descriptor to the chain of hardware descriptors
* that already exists in memory .
*/
tail_desc = list_last_entry ( & chan - > pending_list ,
2016-04-07 08:29:41 +03:00
struct xilinx_dma_tx_descriptor , node ) ;
2016-02-26 17:03:51 +03:00
tail_segment = list_last_entry ( & tail_desc - > segments ,
struct xilinx_vdma_tx_segment , node ) ;
tail_segment - > hw . next_desc = ( u32 ) desc - > async_tx . phys ;
/*
* Add the software descriptor and all children to the list
* of pending transactions
*/
append :
list_add_tail ( & desc - > node , & chan - > pending_list ) ;
chan - > desc_pendingcount + + ;
2016-04-06 08:08:09 +03:00
if ( chan - > has_sg & &
unlikely ( chan - > desc_pendingcount > chan - > num_frms ) ) {
2016-02-26 17:03:51 +03:00
dev_dbg ( chan - > dev , " desc pendingcount is too high \n " ) ;
chan - > desc_pendingcount = chan - > num_frms ;
}
}
2014-04-23 18:53:26 +04:00
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_tx_submit - Submit DMA transaction
2014-04-23 18:53:26 +04:00
* @ tx : Async transaction descriptor
*
* Return : cookie value on success and failure value on error
*/
2016-04-07 08:29:41 +03:00
static dma_cookie_t xilinx_dma_tx_submit ( struct dma_async_tx_descriptor * tx )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_tx_descriptor * desc = to_dma_tx_descriptor ( tx ) ;
struct xilinx_dma_chan * chan = to_xilinx_chan ( tx - > chan ) ;
2014-04-23 18:53:26 +04:00
dma_cookie_t cookie ;
unsigned long flags ;
int err ;
if ( chan - > err ) {
/*
* If reset fails , need to hard reset the system .
* Channel is no longer functional
*/
2016-04-07 08:29:41 +03:00
err = xilinx_dma_chan_reset ( chan ) ;
2014-04-23 18:53:26 +04:00
if ( err < 0 )
return err ;
}
spin_lock_irqsave ( & chan - > lock , flags ) ;
cookie = dma_cookie_assign ( tx ) ;
2016-02-26 17:03:51 +03:00
/* Put this transaction onto the tail of the pending queue */
append_desc_queue ( chan , desc ) ;
2014-04-23 18:53:26 +04:00
spin_unlock_irqrestore ( & chan - > lock , flags ) ;
return cookie ;
}
/**
* xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
* DMA_SLAVE transaction
* @ dchan : DMA channel
* @ xt : Interleaved template pointer
* @ flags : transfer ack flags
*
* Return : Async transaction descriptor on success and NULL on failure
*/
static struct dma_async_tx_descriptor *
xilinx_vdma_dma_prep_interleaved ( struct dma_chan * dchan ,
struct dma_interleaved_template * xt ,
unsigned long flags )
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_chan * chan = to_xilinx_chan ( dchan ) ;
struct xilinx_dma_tx_descriptor * desc ;
2014-04-23 18:53:26 +04:00
struct xilinx_vdma_tx_segment * segment , * prev = NULL ;
struct xilinx_vdma_desc_hw * hw ;
if ( ! is_slave_direction ( xt - > dir ) )
return NULL ;
if ( ! xt - > numf | | ! xt - > sgl [ 0 ] . size )
return NULL ;
2014-11-05 21:37:01 +03:00
if ( xt - > frame_size ! = 1 )
return NULL ;
2014-04-23 18:53:26 +04:00
/* Allocate a transaction descriptor. */
2016-04-07 08:29:41 +03:00
desc = xilinx_dma_alloc_tx_descriptor ( chan ) ;
2014-04-23 18:53:26 +04:00
if ( ! desc )
return NULL ;
dma_async_tx_descriptor_init ( & desc - > async_tx , & chan - > common ) ;
2016-04-07 08:29:41 +03:00
desc - > async_tx . tx_submit = xilinx_dma_tx_submit ;
2014-04-23 18:53:26 +04:00
async_tx_ack ( & desc - > async_tx ) ;
/* Allocate the link descriptor from DMA pool */
segment = xilinx_vdma_alloc_tx_segment ( chan ) ;
if ( ! segment )
goto error ;
/* Fill in the hardware descriptor */
hw = & segment - > hw ;
hw - > vsize = xt - > numf ;
hw - > hsize = xt - > sgl [ 0 ] . size ;
2014-11-05 21:37:02 +03:00
hw - > stride = ( xt - > sgl [ 0 ] . icg + xt - > sgl [ 0 ] . size ) < <
2016-04-07 08:29:41 +03:00
XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT ;
2014-04-23 18:53:26 +04:00
hw - > stride | = chan - > config . frm_dly < <
2016-04-07 08:29:41 +03:00
XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT ;
2014-04-23 18:53:26 +04:00
2016-04-06 08:08:08 +03:00
if ( xt - > dir ! = DMA_MEM_TO_DEV ) {
if ( chan - > ext_addr ) {
hw - > buf_addr = lower_32_bits ( xt - > dst_start ) ;
hw - > buf_addr_msb = upper_32_bits ( xt - > dst_start ) ;
} else {
hw - > buf_addr = xt - > dst_start ;
}
} else {
if ( chan - > ext_addr ) {
hw - > buf_addr = lower_32_bits ( xt - > src_start ) ;
hw - > buf_addr_msb = upper_32_bits ( xt - > src_start ) ;
} else {
hw - > buf_addr = xt - > src_start ;
}
}
2014-04-23 18:53:26 +04:00
/* Insert the segment into the descriptor segments list. */
list_add_tail ( & segment - > node , & desc - > segments ) ;
prev = segment ;
/* Link the last hardware descriptor with the first. */
segment = list_first_entry ( & desc - > segments ,
struct xilinx_vdma_tx_segment , node ) ;
2016-02-26 17:03:51 +03:00
desc - > async_tx . phys = segment - > phys ;
2014-04-23 18:53:26 +04:00
return & desc - > async_tx ;
error :
2016-04-07 08:29:41 +03:00
xilinx_dma_free_tx_descriptor ( chan , desc ) ;
2014-04-23 18:53:26 +04:00
return NULL ;
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_terminate_all - Halt the channel and free descriptors
* @ chan : Driver specific DMA Channel pointer
2014-04-23 18:53:26 +04:00
*/
2016-04-07 08:29:41 +03:00
static int xilinx_dma_terminate_all ( struct dma_chan * dchan )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_chan * chan = to_xilinx_chan ( dchan ) ;
2014-11-17 16:42:38 +03:00
2014-04-23 18:53:26 +04:00
/* Halt the DMA engine */
2016-04-07 08:29:41 +03:00
xilinx_dma_halt ( chan ) ;
2014-04-23 18:53:26 +04:00
/* Remove and free all of the descriptors in the lists */
2016-04-07 08:29:41 +03:00
xilinx_dma_free_descriptors ( chan ) ;
2014-11-17 16:42:38 +03:00
return 0 ;
2014-04-23 18:53:26 +04:00
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_channel_set_config - Configure VDMA channel
2014-04-23 18:53:26 +04:00
* Run - time configuration for Axi VDMA , supports :
* . halt the channel
* . configure interrupt coalescing and inter - packet delay threshold
* . start / stop parking
* . enable genlock
*
* @ dchan : DMA channel
* @ cfg : VDMA device configuration pointer
*
* Return : ' 0 ' on success and failure value on error
*/
int xilinx_vdma_channel_set_config ( struct dma_chan * dchan ,
struct xilinx_vdma_config * cfg )
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_chan * chan = to_xilinx_chan ( dchan ) ;
2014-04-23 18:53:26 +04:00
u32 dmacr ;
if ( cfg - > reset )
2016-04-07 08:29:41 +03:00
return xilinx_dma_chan_reset ( chan ) ;
2014-04-23 18:53:26 +04:00
2016-04-07 08:29:41 +03:00
dmacr = dma_ctrl_read ( chan , XILINX_DMA_REG_DMACR ) ;
2014-04-23 18:53:26 +04:00
chan - > config . frm_dly = cfg - > frm_dly ;
chan - > config . park = cfg - > park ;
/* genlock settings */
chan - > config . gen_lock = cfg - > gen_lock ;
chan - > config . master = cfg - > master ;
if ( cfg - > gen_lock & & chan - > genlock ) {
2016-04-07 08:29:41 +03:00
dmacr | = XILINX_DMA_DMACR_GENLOCK_EN ;
dmacr | = cfg - > master < < XILINX_DMA_DMACR_MASTER_SHIFT ;
2014-04-23 18:53:26 +04:00
}
chan - > config . frm_cnt_en = cfg - > frm_cnt_en ;
if ( cfg - > park )
chan - > config . park_frm = cfg - > park_frm ;
else
chan - > config . park_frm = - 1 ;
chan - > config . coalesc = cfg - > coalesc ;
chan - > config . delay = cfg - > delay ;
2016-04-07 08:29:41 +03:00
if ( cfg - > coalesc < = XILINX_DMA_DMACR_FRAME_COUNT_MAX ) {
dmacr | = cfg - > coalesc < < XILINX_DMA_DMACR_FRAME_COUNT_SHIFT ;
2014-04-23 18:53:26 +04:00
chan - > config . coalesc = cfg - > coalesc ;
}
2016-04-07 08:29:41 +03:00
if ( cfg - > delay < = XILINX_DMA_DMACR_DELAY_MAX ) {
dmacr | = cfg - > delay < < XILINX_DMA_DMACR_DELAY_SHIFT ;
2014-04-23 18:53:26 +04:00
chan - > config . delay = cfg - > delay ;
}
/* FSync Source selection */
2016-04-07 08:29:41 +03:00
dmacr & = ~ XILINX_DMA_DMACR_FSYNCSRC_MASK ;
dmacr | = cfg - > ext_fsync < < XILINX_DMA_DMACR_FSYNCSRC_SHIFT ;
2014-04-23 18:53:26 +04:00
2016-04-07 08:29:41 +03:00
dma_ctrl_write ( chan , XILINX_DMA_REG_DMACR , dmacr ) ;
2014-04-23 18:53:26 +04:00
return 0 ;
}
EXPORT_SYMBOL ( xilinx_vdma_channel_set_config ) ;
/* -----------------------------------------------------------------------------
* Probe and remove
*/
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_chan_remove - Per Channel remove function
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
*/
2016-04-07 08:29:41 +03:00
static void xilinx_dma_chan_remove ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
/* Disable all interrupts */
2016-04-07 08:29:41 +03:00
dma_ctrl_clr ( chan , XILINX_DMA_REG_DMACR ,
XILINX_DMA_DMAXR_ALL_IRQ_MASK ) ;
2014-04-23 18:53:26 +04:00
if ( chan - > irq > 0 )
free_irq ( chan - > irq , chan ) ;
tasklet_kill ( & chan - > tasklet ) ;
list_del ( & chan - > common . device_node ) ;
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_chan_probe - Per Channel Probing
2014-04-23 18:53:26 +04:00
* It get channel features from the device tree entry and
* initialize special channel handling routines
*
* @ xdev : Driver specific device structure
* @ node : Device node
*
* Return : ' 0 ' on success and failure value on error
*/
2016-04-07 08:29:41 +03:00
static int xilinx_dma_chan_probe ( struct xilinx_dma_device * xdev ,
2014-04-23 18:53:26 +04:00
struct device_node * node )
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_chan * chan ;
2014-04-23 18:53:26 +04:00
bool has_dre = false ;
u32 value , width ;
int err ;
/* Allocate and initialize the channel structure */
chan = devm_kzalloc ( xdev - > dev , sizeof ( * chan ) , GFP_KERNEL ) ;
if ( ! chan )
return - ENOMEM ;
chan - > dev = xdev - > dev ;
chan - > xdev = xdev ;
chan - > has_sg = xdev - > has_sg ;
2016-02-26 17:03:51 +03:00
chan - > desc_pendingcount = 0x0 ;
2016-04-06 08:08:08 +03:00
chan - > ext_addr = xdev - > ext_addr ;
2014-04-23 18:53:26 +04:00
spin_lock_init ( & chan - > lock ) ;
INIT_LIST_HEAD ( & chan - > pending_list ) ;
INIT_LIST_HEAD ( & chan - > done_list ) ;
2016-02-26 17:03:51 +03:00
INIT_LIST_HEAD ( & chan - > active_list ) ;
2014-04-23 18:53:26 +04:00
/* Retrieve the channel properties from the device tree */
has_dre = of_property_read_bool ( node , " xlnx,include-dre " ) ;
chan - > genlock = of_property_read_bool ( node , " xlnx,genlock-mode " ) ;
err = of_property_read_u32 ( node , " xlnx,datawidth " , & value ) ;
if ( err ) {
dev_err ( xdev - > dev , " missing xlnx,datawidth property \n " ) ;
return err ;
}
width = value > > 3 ; /* Convert bits to bytes */
/* If data width is greater than 8 bytes, DRE is not in hw */
if ( width > 8 )
has_dre = false ;
if ( ! has_dre )
xdev - > common . copy_align = fls ( width - 1 ) ;
if ( of_device_is_compatible ( node , " xlnx,axi-vdma-mm2s-channel " ) ) {
chan - > direction = DMA_MEM_TO_DEV ;
chan - > id = 0 ;
2016-04-07 08:29:41 +03:00
chan - > ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET ;
2014-04-23 18:53:26 +04:00
chan - > desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET ;
2016-04-07 08:29:41 +03:00
if ( xdev - > flush_on_fsync = = XILINX_DMA_FLUSH_BOTH | |
xdev - > flush_on_fsync = = XILINX_DMA_FLUSH_MM2S )
2014-04-23 18:53:26 +04:00
chan - > flush_on_fsync = true ;
} else if ( of_device_is_compatible ( node ,
" xlnx,axi-vdma-s2mm-channel " ) ) {
chan - > direction = DMA_DEV_TO_MEM ;
chan - > id = 1 ;
2016-04-07 08:29:41 +03:00
chan - > ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET ;
2014-04-23 18:53:26 +04:00
chan - > desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET ;
2016-04-07 08:29:41 +03:00
if ( xdev - > flush_on_fsync = = XILINX_DMA_FLUSH_BOTH | |
xdev - > flush_on_fsync = = XILINX_DMA_FLUSH_S2MM )
2014-04-23 18:53:26 +04:00
chan - > flush_on_fsync = true ;
} else {
dev_err ( xdev - > dev , " Invalid channel compatible node \n " ) ;
return - EINVAL ;
}
/* Request the interrupt */
chan - > irq = irq_of_parse_and_map ( node , 0 ) ;
2016-04-07 08:29:41 +03:00
err = request_irq ( chan - > irq , xilinx_dma_irq_handler , IRQF_SHARED ,
" xilinx-dma-controller " , chan ) ;
2014-04-23 18:53:26 +04:00
if ( err ) {
dev_err ( xdev - > dev , " unable to request IRQ %d \n " , chan - > irq ) ;
return err ;
}
/* Initialize the tasklet */
2016-04-07 08:29:41 +03:00
tasklet_init ( & chan - > tasklet , xilinx_dma_do_tasklet ,
2014-04-23 18:53:26 +04:00
( unsigned long ) chan ) ;
/*
* Initialize the DMA channel and add it to the DMA engine channels
* list .
*/
chan - > common . device = & xdev - > common ;
list_add_tail ( & chan - > common . device_node , & xdev - > common . channels ) ;
xdev - > chan [ chan - > id ] = chan ;
/* Reset the channel */
2016-04-07 08:29:41 +03:00
err = xilinx_dma_chan_reset ( chan ) ;
2014-04-23 18:53:26 +04:00
if ( err < 0 ) {
dev_err ( xdev - > dev , " Reset channel failed \n " ) ;
return err ;
}
return 0 ;
}
/**
* of_dma_xilinx_xlate - Translation function
* @ dma_spec : Pointer to DMA specifier as found in the device tree
* @ ofdma : Pointer to DMA controller data
*
* Return : DMA channel pointer on success and NULL on error
*/
static struct dma_chan * of_dma_xilinx_xlate ( struct of_phandle_args * dma_spec ,
struct of_dma * ofdma )
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_device * xdev = ofdma - > of_dma_data ;
2014-04-23 18:53:26 +04:00
int chan_id = dma_spec - > args [ 0 ] ;
2016-04-07 08:29:41 +03:00
if ( chan_id > = XILINX_DMA_MAX_CHANS_PER_DEVICE )
2014-04-23 18:53:26 +04:00
return NULL ;
return dma_get_slave_channel ( & xdev - > chan [ chan_id ] - > common ) ;
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_probe - Driver probe function
2014-04-23 18:53:26 +04:00
* @ pdev : Pointer to the platform_device structure
*
* Return : ' 0 ' on success and failure value on error
*/
2016-04-07 08:29:41 +03:00
static int xilinx_dma_probe ( struct platform_device * pdev )
2014-04-23 18:53:26 +04:00
{
struct device_node * node = pdev - > dev . of_node ;
2016-04-07 08:29:41 +03:00
struct xilinx_dma_device * xdev ;
2014-04-23 18:53:26 +04:00
struct device_node * child ;
struct resource * io ;
2016-04-06 08:08:08 +03:00
u32 num_frames , addr_width ;
2014-04-23 18:53:26 +04:00
int i , err ;
/* Allocate and initialize the DMA engine structure */
xdev = devm_kzalloc ( & pdev - > dev , sizeof ( * xdev ) , GFP_KERNEL ) ;
if ( ! xdev )
return - ENOMEM ;
xdev - > dev = & pdev - > dev ;
/* Request and map I/O memory */
io = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
xdev - > regs = devm_ioremap_resource ( & pdev - > dev , io ) ;
if ( IS_ERR ( xdev - > regs ) )
return PTR_ERR ( xdev - > regs ) ;
/* Retrieve the DMA engine properties from the device tree */
xdev - > has_sg = of_property_read_bool ( node , " xlnx,include-sg " ) ;
err = of_property_read_u32 ( node , " xlnx,num-fstores " , & num_frames ) ;
if ( err < 0 ) {
dev_err ( xdev - > dev , " missing xlnx,num-fstores property \n " ) ;
return err ;
}
err = of_property_read_u32 ( node , " xlnx,flush-fsync " ,
& xdev - > flush_on_fsync ) ;
if ( err < 0 )
dev_warn ( xdev - > dev , " missing xlnx,flush-fsync property \n " ) ;
2016-04-06 08:08:08 +03:00
err = of_property_read_u32 ( node , " xlnx,addrwidth " , & addr_width ) ;
if ( err < 0 )
dev_warn ( xdev - > dev , " missing xlnx,addrwidth property \n " ) ;
if ( addr_width > 32 )
xdev - > ext_addr = true ;
else
xdev - > ext_addr = false ;
/* Set the dma mask bits */
dma_set_mask ( xdev - > dev , DMA_BIT_MASK ( addr_width ) ) ;
2014-04-23 18:53:26 +04:00
/* Initialize the DMA engine */
xdev - > common . dev = & pdev - > dev ;
INIT_LIST_HEAD ( & xdev - > common . channels ) ;
dma_cap_set ( DMA_SLAVE , xdev - > common . cap_mask ) ;
dma_cap_set ( DMA_PRIVATE , xdev - > common . cap_mask ) ;
xdev - > common . device_alloc_chan_resources =
2016-04-07 08:29:41 +03:00
xilinx_dma_alloc_chan_resources ;
2014-04-23 18:53:26 +04:00
xdev - > common . device_free_chan_resources =
2016-04-07 08:29:41 +03:00
xilinx_dma_free_chan_resources ;
2014-04-23 18:53:26 +04:00
xdev - > common . device_prep_interleaved_dma =
xilinx_vdma_dma_prep_interleaved ;
2016-04-07 08:29:41 +03:00
xdev - > common . device_terminate_all = xilinx_dma_terminate_all ;
xdev - > common . device_tx_status = xilinx_dma_tx_status ;
xdev - > common . device_issue_pending = xilinx_dma_issue_pending ;
2014-04-23 18:53:26 +04:00
platform_set_drvdata ( pdev , xdev ) ;
/* Initialize the channels */
for_each_child_of_node ( node , child ) {
2016-04-07 08:29:41 +03:00
err = xilinx_dma_chan_probe ( xdev , child ) ;
2014-04-23 18:53:26 +04:00
if ( err < 0 )
goto error ;
}
2016-04-07 08:29:41 +03:00
for ( i = 0 ; i < XILINX_DMA_MAX_CHANS_PER_DEVICE ; i + + )
2014-04-23 18:53:26 +04:00
if ( xdev - > chan [ i ] )
xdev - > chan [ i ] - > num_frms = num_frames ;
/* Register the DMA engine with the core */
dma_async_device_register ( & xdev - > common ) ;
err = of_dma_controller_register ( node , of_dma_xilinx_xlate ,
xdev ) ;
if ( err < 0 ) {
dev_err ( & pdev - > dev , " Unable to register DMA to DT \n " ) ;
dma_async_device_unregister ( & xdev - > common ) ;
goto error ;
}
dev_info ( & pdev - > dev , " Xilinx AXI VDMA Engine Driver Probed!! \n " ) ;
return 0 ;
error :
2016-04-07 08:29:41 +03:00
for ( i = 0 ; i < XILINX_DMA_MAX_CHANS_PER_DEVICE ; i + + )
2014-04-23 18:53:26 +04:00
if ( xdev - > chan [ i ] )
2016-04-07 08:29:41 +03:00
xilinx_dma_chan_remove ( xdev - > chan [ i ] ) ;
2014-04-23 18:53:26 +04:00
return err ;
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_remove - Driver remove function
2014-04-23 18:53:26 +04:00
* @ pdev : Pointer to the platform_device structure
*
* Return : Always ' 0 '
*/
2016-04-07 08:29:41 +03:00
static int xilinx_dma_remove ( struct platform_device * pdev )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_device * xdev = platform_get_drvdata ( pdev ) ;
2014-04-23 18:53:26 +04:00
int i ;
of_dma_controller_free ( pdev - > dev . of_node ) ;
dma_async_device_unregister ( & xdev - > common ) ;
2016-04-07 08:29:41 +03:00
for ( i = 0 ; i < XILINX_DMA_MAX_CHANS_PER_DEVICE ; i + + )
2014-04-23 18:53:26 +04:00
if ( xdev - > chan [ i ] )
2016-04-07 08:29:41 +03:00
xilinx_dma_chan_remove ( xdev - > chan [ i ] ) ;
2014-04-23 18:53:26 +04:00
return 0 ;
}
2016-04-07 08:29:41 +03:00
static const struct of_device_id xilinx_dma_of_ids [ ] = {
2014-04-23 18:53:26 +04:00
{ . compatible = " xlnx,axi-vdma-1.00.a " , } ,
{ }
} ;
2016-04-07 08:29:41 +03:00
MODULE_DEVICE_TABLE ( of , xilinx_dma_of_ids ) ;
2014-04-23 18:53:26 +04:00
static struct platform_driver xilinx_vdma_driver = {
. driver = {
. name = " xilinx-vdma " ,
2016-04-07 08:29:41 +03:00
. of_match_table = xilinx_dma_of_ids ,
2014-04-23 18:53:26 +04:00
} ,
2016-04-07 08:29:41 +03:00
. probe = xilinx_dma_probe ,
. remove = xilinx_dma_remove ,
2014-04-23 18:53:26 +04:00
} ;
module_platform_driver ( xilinx_vdma_driver ) ;
MODULE_AUTHOR ( " Xilinx, Inc. " ) ;
MODULE_DESCRIPTION ( " Xilinx VDMA driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;