2014-04-23 18:53:26 +04:00
/*
* DMA driver for Xilinx Video DMA Engine
*
* Copyright ( C ) 2010 - 2014 Xilinx , Inc . All rights reserved .
*
* Based on the Freescale DMA driver .
*
* Description :
* The AXI Video Direct Memory Access ( AXI VDMA ) core is a soft Xilinx IP
* core that provides high - bandwidth direct memory access between memory
* and AXI4 - Stream type video target peripherals . The core provides efficient
* two dimensional DMA operations with independent asynchronous read ( S2MM )
* and write ( MM2S ) channel operation . It can be configured to have either
* one channel or two channels . If configured as two channels , one is to
* transmit to the video device ( MM2S ) and another is to receive from the
* video device ( S2MM ) . Initialization , status , interrupt and management
* registers are accessed through an AXI4 - Lite slave interface .
*
2016-04-07 08:29:43 +03:00
* The AXI Direct Memory Access ( AXI DMA ) core is a soft Xilinx IP core that
* provides high - bandwidth one dimensional direct memory access between memory
* and AXI4 - Stream target peripherals . It supports one receive and one
* transmit channel , both of them optional at synthesis time .
*
2016-04-07 08:29:45 +03:00
* The AXI CDMA , is a soft IP , which provides high - bandwidth Direct Memory
* Access ( DMA ) between a memory - mapped source address and a memory - mapped
* destination address .
*
2014-04-23 18:53:26 +04:00
* This program is free software : you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation , either version 2 of the License , or
* ( at your option ) any later version .
*/
# include <linux/bitops.h>
# include <linux/dmapool.h>
2015-03-02 20:54:24 +03:00
# include <linux/dma/xilinx_dma.h>
2014-04-23 18:53:26 +04:00
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/io.h>
2016-02-26 17:03:54 +03:00
# include <linux/iopoll.h>
2014-04-23 18:53:26 +04:00
# include <linux/module.h>
# include <linux/of_address.h>
# include <linux/of_dma.h>
# include <linux/of_platform.h>
# include <linux/of_irq.h>
# include <linux/slab.h>
2016-05-13 10:03:31 +03:00
# include <linux/clk.h>
2016-06-07 16:51:15 +03:00
# include <linux/io-64-nonatomic-lo-hi.h>
2014-04-23 18:53:26 +04:00
# include "../dmaengine.h"
/* Register/Descriptor Offsets */
2016-04-07 08:29:41 +03:00
# define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
# define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
2014-04-23 18:53:26 +04:00
# define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
# define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
/* Control Registers */
2016-04-07 08:29:41 +03:00
# define XILINX_DMA_REG_DMACR 0x0000
# define XILINX_DMA_DMACR_DELAY_MAX 0xff
# define XILINX_DMA_DMACR_DELAY_SHIFT 24
# define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
# define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
# define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
# define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
# define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
# define XILINX_DMA_DMACR_MASTER_SHIFT 8
# define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
# define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
# define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
# define XILINX_DMA_DMACR_RESET BIT(2)
# define XILINX_DMA_DMACR_CIRC_EN BIT(1)
# define XILINX_DMA_DMACR_RUNSTOP BIT(0)
# define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
# define XILINX_DMA_REG_DMASR 0x0004
# define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
# define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
# define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
# define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
# define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
# define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
# define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
# define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
# define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
# define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
# define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
# define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
# define XILINX_DMA_DMASR_IDLE BIT(1)
# define XILINX_DMA_DMASR_HALTED BIT(0)
# define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
# define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
# define XILINX_DMA_REG_CURDESC 0x0008
# define XILINX_DMA_REG_TAILDESC 0x0010
# define XILINX_DMA_REG_REG_INDEX 0x0014
# define XILINX_DMA_REG_FRMSTORE 0x0018
# define XILINX_DMA_REG_THRESHOLD 0x001c
# define XILINX_DMA_REG_FRMPTR_STS 0x0024
# define XILINX_DMA_REG_PARK_PTR 0x0028
# define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
2017-12-07 08:21:03 +03:00
# define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
2016-04-07 08:29:41 +03:00
# define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
2017-12-07 08:21:03 +03:00
# define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
2016-04-07 08:29:41 +03:00
# define XILINX_DMA_REG_VDMA_VERSION 0x002c
2014-04-23 18:53:26 +04:00
/* Register Direct Mode Registers */
2016-04-07 08:29:41 +03:00
# define XILINX_DMA_REG_VSIZE 0x0000
# define XILINX_DMA_REG_HSIZE 0x0004
2014-04-23 18:53:26 +04:00
2016-04-07 08:29:41 +03:00
# define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
# define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
# define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
2014-04-23 18:53:26 +04:00
# define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
2016-04-06 08:08:08 +03:00
# define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
2014-04-23 18:53:26 +04:00
2018-06-13 10:34:48 +03:00
# define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
# define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
2014-04-23 18:53:26 +04:00
/* HW specific definitions */
2016-06-24 08:21:23 +03:00
# define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
2016-04-07 08:29:41 +03:00
# define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
( XILINX_DMA_DMASR_FRM_CNT_IRQ | \
XILINX_DMA_DMASR_DLY_CNT_IRQ | \
XILINX_DMA_DMASR_ERR_IRQ )
# define XILINX_DMA_DMASR_ALL_ERR_MASK \
( XILINX_DMA_DMASR_EOL_LATE_ERR | \
XILINX_DMA_DMASR_SOF_LATE_ERR | \
XILINX_DMA_DMASR_SG_DEC_ERR | \
XILINX_DMA_DMASR_SG_SLV_ERR | \
XILINX_DMA_DMASR_EOF_EARLY_ERR | \
XILINX_DMA_DMASR_SOF_EARLY_ERR | \
XILINX_DMA_DMASR_DMA_DEC_ERR | \
XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
XILINX_DMA_DMASR_DMA_INT_ERR )
2014-04-23 18:53:26 +04:00
/*
* Recoverable errors are DMA Internal error , SOF Early , EOF Early
* and SOF Late . They are only recoverable when C_FLUSH_ON_FSYNC
* is enabled in the h / w system .
*/
2016-04-07 08:29:41 +03:00
# define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
( XILINX_DMA_DMASR_SOF_LATE_ERR | \
XILINX_DMA_DMASR_EOF_EARLY_ERR | \
XILINX_DMA_DMASR_SOF_EARLY_ERR | \
XILINX_DMA_DMASR_DMA_INT_ERR )
2014-04-23 18:53:26 +04:00
/* Axi VDMA Flush on Fsync bits */
2016-04-07 08:29:41 +03:00
# define XILINX_DMA_FLUSH_S2MM 3
# define XILINX_DMA_FLUSH_MM2S 2
# define XILINX_DMA_FLUSH_BOTH 1
2014-04-23 18:53:26 +04:00
/* Delay loop counter to prevent hardware failure */
2016-04-07 08:29:41 +03:00
# define XILINX_DMA_LOOP_COUNT 1000000
2014-04-23 18:53:26 +04:00
2016-04-07 08:29:43 +03:00
/* AXI DMA Specific Registers/Offsets */
# define XILINX_DMA_REG_SRCDSTADDR 0x18
# define XILINX_DMA_REG_BTT 0x28
/* AXI DMA Specific Masks/Bit fields */
# define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
# define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
2016-05-18 10:47:30 +03:00
# define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
2016-04-07 08:29:43 +03:00
# define XILINX_DMA_CR_COALESCE_SHIFT 16
# define XILINX_DMA_BD_SOP BIT(27)
# define XILINX_DMA_BD_EOP BIT(26)
# define XILINX_DMA_COALESCE_MAX 255
2017-12-07 08:21:04 +03:00
# define XILINX_DMA_NUM_DESCS 255
2016-04-07 08:29:43 +03:00
# define XILINX_DMA_NUM_APP_WORDS 5
2016-06-24 08:21:23 +03:00
/* Multi-Channel DMA Descriptor offsets*/
# define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
# define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
/* Multi-Channel DMA Masks/Shifts */
# define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
# define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
# define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
# define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
# define XILINX_DMA_BD_STRIDE_SHIFT 0
# define XILINX_DMA_BD_VSIZE_SHIFT 19
2016-04-07 08:29:45 +03:00
/* AXI CDMA Specific Registers/Offsets */
# define XILINX_CDMA_REG_SRCADDR 0x18
# define XILINX_CDMA_REG_DSTADDR 0x20
/* AXI CDMA Specific Masks */
# define XILINX_CDMA_CR_SGMODE BIT(3)
2014-04-23 18:53:26 +04:00
2018-09-29 20:17:59 +03:00
# define xilinx_prep_dma_addr_t(addr) \
( ( dma_addr_t ) ( ( u64 ) addr # # _ # # msb < < 32 | ( addr ) ) )
2014-04-23 18:53:26 +04:00
/**
* struct xilinx_vdma_desc_hw - Hardware Descriptor
* @ next_desc : Next Descriptor Pointer @ 0x00
* @ pad1 : Reserved @ 0x04
* @ buf_addr : Buffer address @ 0x08
2016-04-06 08:08:08 +03:00
* @ buf_addr_msb : MSB of Buffer address @ 0x0C
2014-04-23 18:53:26 +04:00
* @ vsize : Vertical Size @ 0x10
* @ hsize : Horizontal Size @ 0x14
* @ stride : Number of bytes between the first
* pixels of each horizontal line @ 0x18
*/
struct xilinx_vdma_desc_hw {
u32 next_desc ;
u32 pad1 ;
u32 buf_addr ;
2016-04-06 08:08:08 +03:00
u32 buf_addr_msb ;
2014-04-23 18:53:26 +04:00
u32 vsize ;
u32 hsize ;
u32 stride ;
} __aligned ( 64 ) ;
2016-04-07 08:29:43 +03:00
/**
* struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
* @ next_desc : Next Descriptor Pointer @ 0x00
2016-06-07 16:51:15 +03:00
* @ next_desc_msb : MSB of Next Descriptor Pointer @ 0x04
2016-04-07 08:29:43 +03:00
* @ buf_addr : Buffer address @ 0x08
2016-06-07 16:51:15 +03:00
* @ buf_addr_msb : MSB of Buffer address @ 0x0C
2017-12-07 08:21:05 +03:00
* @ mcdma_control : Control field for mcdma @ 0x10
* @ vsize_stride : Vsize and Stride field for mcdma @ 0x14
2016-04-07 08:29:43 +03:00
* @ control : Control field @ 0x18
* @ status : Status field @ 0x1C
* @ app : APP Fields @ 0x20 - 0x30
*/
struct xilinx_axidma_desc_hw {
u32 next_desc ;
2016-06-07 16:51:15 +03:00
u32 next_desc_msb ;
2016-04-07 08:29:43 +03:00
u32 buf_addr ;
2016-06-07 16:51:15 +03:00
u32 buf_addr_msb ;
2016-06-24 08:21:23 +03:00
u32 mcdma_control ;
u32 vsize_stride ;
2016-04-07 08:29:43 +03:00
u32 control ;
u32 status ;
u32 app [ XILINX_DMA_NUM_APP_WORDS ] ;
} __aligned ( 64 ) ;
2016-04-07 08:29:45 +03:00
/**
* struct xilinx_cdma_desc_hw - Hardware Descriptor
* @ next_desc : Next Descriptor Pointer @ 0x00
2017-12-07 08:21:05 +03:00
* @ next_desc_msb : Next Descriptor Pointer MSB @ 0x04
2016-04-07 08:29:45 +03:00
* @ src_addr : Source address @ 0x08
2017-12-07 08:21:05 +03:00
* @ src_addr_msb : Source address MSB @ 0x0C
2016-04-07 08:29:45 +03:00
* @ dest_addr : Destination address @ 0x10
2017-12-07 08:21:05 +03:00
* @ dest_addr_msb : Destination address MSB @ 0x14
2016-04-07 08:29:45 +03:00
* @ control : Control field @ 0x18
* @ status : Status field @ 0x1C
*/
struct xilinx_cdma_desc_hw {
u32 next_desc ;
2016-06-07 16:51:16 +03:00
u32 next_desc_msb ;
2016-04-07 08:29:45 +03:00
u32 src_addr ;
2016-06-07 16:51:16 +03:00
u32 src_addr_msb ;
2016-04-07 08:29:45 +03:00
u32 dest_addr ;
2016-06-07 16:51:16 +03:00
u32 dest_addr_msb ;
2016-04-07 08:29:45 +03:00
u32 control ;
u32 status ;
} __aligned ( 64 ) ;
2014-04-23 18:53:26 +04:00
/**
* struct xilinx_vdma_tx_segment - Descriptor segment
* @ hw : Hardware descriptor
* @ node : Node in the descriptor segments list
* @ phys : Physical address of segment
*/
struct xilinx_vdma_tx_segment {
struct xilinx_vdma_desc_hw hw ;
struct list_head node ;
dma_addr_t phys ;
} __aligned ( 64 ) ;
/**
2016-04-07 08:29:43 +03:00
* struct xilinx_axidma_tx_segment - Descriptor segment
* @ hw : Hardware descriptor
* @ node : Node in the descriptor segments list
* @ phys : Physical address of segment
*/
struct xilinx_axidma_tx_segment {
struct xilinx_axidma_desc_hw hw ;
struct list_head node ;
dma_addr_t phys ;
} __aligned ( 64 ) ;
2016-04-07 08:29:45 +03:00
/**
* struct xilinx_cdma_tx_segment - Descriptor segment
* @ hw : Hardware descriptor
* @ node : Node in the descriptor segments list
* @ phys : Physical address of segment
*/
struct xilinx_cdma_tx_segment {
struct xilinx_cdma_desc_hw hw ;
struct list_head node ;
dma_addr_t phys ;
} __aligned ( 64 ) ;
2014-04-23 18:53:26 +04:00
/**
2016-04-07 08:29:41 +03:00
* struct xilinx_dma_tx_descriptor - Per Transaction structure
2014-04-23 18:53:26 +04:00
* @ async_tx : Async transaction descriptor
* @ segments : TX segments list
* @ node : Node in the channel descriptors list
2016-05-18 10:47:30 +03:00
* @ cyclic : Check for cyclic transfers .
2014-04-23 18:53:26 +04:00
*/
2016-04-07 08:29:41 +03:00
struct xilinx_dma_tx_descriptor {
2014-04-23 18:53:26 +04:00
struct dma_async_tx_descriptor async_tx ;
struct list_head segments ;
struct list_head node ;
2016-05-18 10:47:30 +03:00
bool cyclic ;
2014-04-23 18:53:26 +04:00
} ;
/**
2016-04-07 08:29:41 +03:00
* struct xilinx_dma_chan - Driver specific DMA channel structure
2014-04-23 18:53:26 +04:00
* @ xdev : Driver specific device structure
* @ ctrl_offset : Control registers offset
* @ desc_offset : TX descriptor registers offset
* @ lock : Descriptor operation lock
* @ pending_list : Descriptors waiting
2016-02-26 17:03:51 +03:00
* @ active_list : Descriptors ready to submit
2014-04-23 18:53:26 +04:00
* @ done_list : Complete descriptors
2017-12-07 08:21:04 +03:00
* @ free_seg_list : Free descriptors
2014-04-23 18:53:26 +04:00
* @ common : DMA common channel
* @ desc_pool : Descriptors pool
* @ dev : The dma device
* @ irq : Channel IRQ
* @ id : Channel ID
* @ direction : Transfer direction
* @ num_frms : Number of frames
* @ has_sg : Support scatter transfers
2016-05-18 10:47:30 +03:00
* @ cyclic : Check for cyclic transfers .
2014-04-23 18:53:26 +04:00
* @ genlock : Support genlock mode
* @ err : Channel has errors
2017-12-07 08:21:02 +03:00
* @ idle : Check for channel idle
2014-04-23 18:53:26 +04:00
* @ tasklet : Cleanup work after irq
* @ config : Device configuration info
* @ flush_on_fsync : Flush on Frame sync
2016-02-26 17:03:51 +03:00
* @ desc_pendingcount : Descriptor pending count
2016-04-06 08:08:08 +03:00
* @ ext_addr : Indicates 64 bit addressing is supported by dma channel
2016-04-06 08:08:09 +03:00
* @ desc_submitcount : Descriptor h / w submitted count
2016-04-07 08:29:43 +03:00
* @ residue : Residue for AXI DMA
* @ seg_v : Statically allocated segments base
2017-12-07 08:21:04 +03:00
* @ seg_p : Physical allocated segments base
2016-05-18 10:47:30 +03:00
* @ cyclic_seg_v : Statically allocated segment base for cyclic transfers
2017-12-07 08:21:04 +03:00
* @ cyclic_seg_p : Physical allocated segments base for cyclic dma
2016-04-07 08:29:43 +03:00
* @ start_transfer : Differentiate b / w DMA IP ' s transfer
2017-03-13 18:59:11 +03:00
* @ stop_transfer : Differentiate b / w DMA IP ' s quiesce
2017-12-07 08:21:05 +03:00
* @ tdest : TDEST value for mcdma
2018-06-13 10:34:48 +03:00
* @ has_vflip : S2MM vertical flip
2014-04-23 18:53:26 +04:00
*/
2016-04-07 08:29:41 +03:00
struct xilinx_dma_chan {
struct xilinx_dma_device * xdev ;
2014-04-23 18:53:26 +04:00
u32 ctrl_offset ;
u32 desc_offset ;
spinlock_t lock ;
struct list_head pending_list ;
2016-02-26 17:03:51 +03:00
struct list_head active_list ;
2014-04-23 18:53:26 +04:00
struct list_head done_list ;
2017-12-07 08:21:04 +03:00
struct list_head free_seg_list ;
2014-04-23 18:53:26 +04:00
struct dma_chan common ;
struct dma_pool * desc_pool ;
struct device * dev ;
int irq ;
int id ;
enum dma_transfer_direction direction ;
int num_frms ;
bool has_sg ;
2016-05-18 10:47:30 +03:00
bool cyclic ;
2014-04-23 18:53:26 +04:00
bool genlock ;
bool err ;
2017-12-07 08:21:02 +03:00
bool idle ;
2014-04-23 18:53:26 +04:00
struct tasklet_struct tasklet ;
struct xilinx_vdma_config config ;
bool flush_on_fsync ;
2016-02-26 17:03:51 +03:00
u32 desc_pendingcount ;
2016-04-06 08:08:08 +03:00
bool ext_addr ;
2016-04-06 08:08:09 +03:00
u32 desc_submitcount ;
2016-04-07 08:29:43 +03:00
u32 residue ;
struct xilinx_axidma_tx_segment * seg_v ;
2017-12-07 08:21:04 +03:00
dma_addr_t seg_p ;
2016-05-18 10:47:30 +03:00
struct xilinx_axidma_tx_segment * cyclic_seg_v ;
2017-12-07 08:21:04 +03:00
dma_addr_t cyclic_seg_p ;
2016-04-07 08:29:43 +03:00
void ( * start_transfer ) ( struct xilinx_dma_chan * chan ) ;
2017-03-13 18:59:11 +03:00
int ( * stop_transfer ) ( struct xilinx_dma_chan * chan ) ;
2016-06-24 08:21:23 +03:00
u16 tdest ;
2018-06-13 10:34:48 +03:00
bool has_vflip ;
2014-04-23 18:53:26 +04:00
} ;
2017-09-05 17:43:49 +03:00
/**
2017-12-07 08:21:05 +03:00
* enum xdma_ip_type - DMA IP type .
2017-09-05 17:43:49 +03:00
*
2017-12-07 08:21:05 +03:00
* @ XDMA_TYPE_AXIDMA : Axi dma ip .
* @ XDMA_TYPE_CDMA : Axi cdma ip .
* @ XDMA_TYPE_VDMA : Axi vdma ip .
2017-09-05 17:43:49 +03:00
*
*/
enum xdma_ip_type {
XDMA_TYPE_AXIDMA = 0 ,
XDMA_TYPE_CDMA ,
XDMA_TYPE_VDMA ,
} ;
2016-05-13 10:03:29 +03:00
struct xilinx_dma_config {
enum xdma_ip_type dmatype ;
2016-05-13 10:03:31 +03:00
int ( * clk_init ) ( struct platform_device * pdev , struct clk * * axi_clk ,
struct clk * * tx_clk , struct clk * * txs_clk ,
struct clk * * rx_clk , struct clk * * rxs_clk ) ;
2014-04-23 18:53:26 +04:00
} ;
/**
2016-04-07 08:29:41 +03:00
* struct xilinx_dma_device - DMA device structure
2014-04-23 18:53:26 +04:00
* @ regs : I / O mapped base address
* @ dev : Device Structure
* @ common : DMA device structure
2016-04-07 08:29:41 +03:00
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
* @ has_sg : Specifies whether Scatter - Gather is present or not
2016-06-24 08:21:23 +03:00
* @ mcdma : Specifies whether Multi - Channel is present or not
2014-04-23 18:53:26 +04:00
* @ flush_on_fsync : Flush on frame sync
2016-04-06 08:08:08 +03:00
* @ ext_addr : Indicates 64 bit addressing is supported by dma device
2016-05-13 10:03:31 +03:00
* @ pdev : Platform device structure pointer
2016-05-13 10:03:29 +03:00
* @ dma_config : DMA config structure
2016-05-13 10:03:31 +03:00
* @ axi_clk : DMA Axi4 - lite interace clock
* @ tx_clk : DMA mm2s clock
* @ txs_clk : DMA mm2s stream clock
* @ rx_clk : DMA s2mm clock
* @ rxs_clk : DMA s2mm stream clock
2016-06-24 08:21:23 +03:00
* @ nr_channels : Number of channels DMA device supports
* @ chan_id : DMA channel identifier
2018-11-20 18:31:45 +03:00
* @ max_buffer_len : Max buffer length
2014-04-23 18:53:26 +04:00
*/
2016-04-07 08:29:41 +03:00
struct xilinx_dma_device {
2014-04-23 18:53:26 +04:00
void __iomem * regs ;
struct device * dev ;
struct dma_device common ;
2016-04-07 08:29:41 +03:00
struct xilinx_dma_chan * chan [ XILINX_DMA_MAX_CHANS_PER_DEVICE ] ;
2014-04-23 18:53:26 +04:00
bool has_sg ;
2016-06-24 08:21:23 +03:00
bool mcdma ;
2014-04-23 18:53:26 +04:00
u32 flush_on_fsync ;
2016-04-06 08:08:08 +03:00
bool ext_addr ;
2016-05-13 10:03:31 +03:00
struct platform_device * pdev ;
2016-05-13 10:03:29 +03:00
const struct xilinx_dma_config * dma_config ;
2016-05-13 10:03:31 +03:00
struct clk * axi_clk ;
struct clk * tx_clk ;
struct clk * txs_clk ;
struct clk * rx_clk ;
struct clk * rxs_clk ;
2016-06-24 08:21:23 +03:00
u32 nr_channels ;
u32 chan_id ;
2018-11-20 18:31:45 +03:00
u32 max_buffer_len ;
2014-04-23 18:53:26 +04:00
} ;
/* Macros */
# define to_xilinx_chan(chan) \
2016-04-07 08:29:41 +03:00
container_of ( chan , struct xilinx_dma_chan , common )
# define to_dma_tx_descriptor(tx) \
container_of ( tx , struct xilinx_dma_tx_descriptor , async_tx )
# define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
2016-02-26 17:03:54 +03:00
readl_poll_timeout ( chan - > xdev - > regs + chan - > ctrl_offset + reg , val , \
cond , delay_us , timeout_us )
2014-04-23 18:53:26 +04:00
/* IO accessors */
2016-04-07 08:29:41 +03:00
static inline u32 dma_read ( struct xilinx_dma_chan * chan , u32 reg )
2014-04-23 18:53:26 +04:00
{
return ioread32 ( chan - > xdev - > regs + reg ) ;
}
2016-04-07 08:29:41 +03:00
static inline void dma_write ( struct xilinx_dma_chan * chan , u32 reg , u32 value )
2014-04-23 18:53:26 +04:00
{
iowrite32 ( value , chan - > xdev - > regs + reg ) ;
}
2016-04-07 08:29:41 +03:00
static inline void vdma_desc_write ( struct xilinx_dma_chan * chan , u32 reg ,
2014-04-23 18:53:26 +04:00
u32 value )
{
2016-04-07 08:29:41 +03:00
dma_write ( chan , chan - > desc_offset + reg , value ) ;
2014-04-23 18:53:26 +04:00
}
2016-04-07 08:29:41 +03:00
static inline u32 dma_ctrl_read ( struct xilinx_dma_chan * chan , u32 reg )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
return dma_read ( chan , chan - > ctrl_offset + reg ) ;
2014-04-23 18:53:26 +04:00
}
2016-04-07 08:29:41 +03:00
static inline void dma_ctrl_write ( struct xilinx_dma_chan * chan , u32 reg ,
2014-04-23 18:53:26 +04:00
u32 value )
{
2016-04-07 08:29:41 +03:00
dma_write ( chan , chan - > ctrl_offset + reg , value ) ;
2014-04-23 18:53:26 +04:00
}
2016-04-07 08:29:41 +03:00
static inline void dma_ctrl_clr ( struct xilinx_dma_chan * chan , u32 reg ,
2014-04-23 18:53:26 +04:00
u32 clr )
{
2016-04-07 08:29:41 +03:00
dma_ctrl_write ( chan , reg , dma_ctrl_read ( chan , reg ) & ~ clr ) ;
2014-04-23 18:53:26 +04:00
}
2016-04-07 08:29:41 +03:00
static inline void dma_ctrl_set ( struct xilinx_dma_chan * chan , u32 reg ,
2014-04-23 18:53:26 +04:00
u32 set )
{
2016-04-07 08:29:41 +03:00
dma_ctrl_write ( chan , reg , dma_ctrl_read ( chan , reg ) | set ) ;
2014-04-23 18:53:26 +04:00
}
2016-04-06 08:08:08 +03:00
/**
* vdma_desc_write_64 - 64 - bit descriptor write
* @ chan : Driver specific VDMA channel
* @ reg : Register to write
* @ value_lsb : lower address of the descriptor .
* @ value_msb : upper address of the descriptor .
*
* Since vdma driver is trying to write to a register offset which is not a
* multiple of 64 bits ( ex : 0x5c ) , we are writing as two separate 32 bits
* instead of a single 64 bit register write .
*/
2016-04-07 08:29:41 +03:00
static inline void vdma_desc_write_64 ( struct xilinx_dma_chan * chan , u32 reg ,
2016-04-06 08:08:08 +03:00
u32 value_lsb , u32 value_msb )
{
/* Write the lsb 32 bits*/
writel ( value_lsb , chan - > xdev - > regs + chan - > desc_offset + reg ) ;
/* Write the msb 32 bits */
writel ( value_msb , chan - > xdev - > regs + chan - > desc_offset + reg + 4 ) ;
2014-04-23 18:53:26 +04:00
}
2016-06-07 16:51:15 +03:00
static inline void dma_writeq ( struct xilinx_dma_chan * chan , u32 reg , u64 value )
{
lo_hi_writeq ( value , chan - > xdev - > regs + chan - > ctrl_offset + reg ) ;
}
static inline void xilinx_write ( struct xilinx_dma_chan * chan , u32 reg ,
dma_addr_t addr )
{
if ( chan - > ext_addr )
dma_writeq ( chan , reg , addr ) ;
else
dma_ctrl_write ( chan , reg , addr ) ;
}
static inline void xilinx_axidma_buf ( struct xilinx_dma_chan * chan ,
struct xilinx_axidma_desc_hw * hw ,
dma_addr_t buf_addr , size_t sg_used ,
size_t period_len )
{
if ( chan - > ext_addr ) {
hw - > buf_addr = lower_32_bits ( buf_addr + sg_used + period_len ) ;
hw - > buf_addr_msb = upper_32_bits ( buf_addr + sg_used +
period_len ) ;
} else {
hw - > buf_addr = buf_addr + sg_used + period_len ;
}
}
2014-04-23 18:53:26 +04:00
/* -----------------------------------------------------------------------------
* Descriptors and segments alloc and free
*/
/**
* xilinx_vdma_alloc_tx_segment - Allocate transaction segment
2016-04-07 08:29:41 +03:00
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
*
* Return : The allocated segment on success and NULL on failure .
*/
static struct xilinx_vdma_tx_segment *
2016-04-07 08:29:41 +03:00
xilinx_vdma_alloc_tx_segment ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
struct xilinx_vdma_tx_segment * segment ;
dma_addr_t phys ;
2016-04-29 23:09:09 +03:00
segment = dma_pool_zalloc ( chan - > desc_pool , GFP_ATOMIC , & phys ) ;
2014-04-23 18:53:26 +04:00
if ( ! segment )
return NULL ;
segment - > phys = phys ;
return segment ;
}
2016-04-07 08:29:45 +03:00
/**
* xilinx_cdma_alloc_tx_segment - Allocate transaction segment
* @ chan : Driver specific DMA channel
*
* Return : The allocated segment on success and NULL on failure .
*/
static struct xilinx_cdma_tx_segment *
xilinx_cdma_alloc_tx_segment ( struct xilinx_dma_chan * chan )
{
struct xilinx_cdma_tx_segment * segment ;
dma_addr_t phys ;
2016-05-18 10:47:31 +03:00
segment = dma_pool_zalloc ( chan - > desc_pool , GFP_ATOMIC , & phys ) ;
2016-04-07 08:29:45 +03:00
if ( ! segment )
return NULL ;
segment - > phys = phys ;
return segment ;
}
2016-04-07 08:29:43 +03:00
/**
* xilinx_axidma_alloc_tx_segment - Allocate transaction segment
* @ chan : Driver specific DMA channel
*
* Return : The allocated segment on success and NULL on failure .
*/
static struct xilinx_axidma_tx_segment *
xilinx_axidma_alloc_tx_segment ( struct xilinx_dma_chan * chan )
{
2017-12-07 08:21:04 +03:00
struct xilinx_axidma_tx_segment * segment = NULL ;
unsigned long flags ;
2014-04-23 18:53:26 +04:00
2017-12-07 08:21:04 +03:00
spin_lock_irqsave ( & chan - > lock , flags ) ;
if ( ! list_empty ( & chan - > free_seg_list ) ) {
segment = list_first_entry ( & chan - > free_seg_list ,
struct xilinx_axidma_tx_segment ,
node ) ;
list_del ( & segment - > node ) ;
}
spin_unlock_irqrestore ( & chan - > lock , flags ) ;
2014-04-23 18:53:26 +04:00
return segment ;
}
2017-12-07 08:21:04 +03:00
static void xilinx_dma_clean_hw_desc ( struct xilinx_axidma_desc_hw * hw )
{
u32 next_desc = hw - > next_desc ;
u32 next_desc_msb = hw - > next_desc_msb ;
memset ( hw , 0 , sizeof ( struct xilinx_axidma_desc_hw ) ) ;
hw - > next_desc = next_desc ;
hw - > next_desc_msb = next_desc_msb ;
}
2016-04-07 08:29:43 +03:00
/**
* xilinx_dma_free_tx_segment - Free transaction segment
* @ chan : Driver specific DMA channel
* @ segment : DMA transaction segment
*/
static void xilinx_dma_free_tx_segment ( struct xilinx_dma_chan * chan ,
struct xilinx_axidma_tx_segment * segment )
{
2017-12-07 08:21:04 +03:00
xilinx_dma_clean_hw_desc ( & segment - > hw ) ;
list_add_tail ( & segment - > node , & chan - > free_seg_list ) ;
2016-04-07 08:29:43 +03:00
}
2016-04-07 08:29:45 +03:00
/**
* xilinx_cdma_free_tx_segment - Free transaction segment
* @ chan : Driver specific DMA channel
* @ segment : DMA transaction segment
*/
static void xilinx_cdma_free_tx_segment ( struct xilinx_dma_chan * chan ,
struct xilinx_cdma_tx_segment * segment )
{
dma_pool_free ( chan - > desc_pool , segment , segment - > phys ) ;
}
2014-04-23 18:53:26 +04:00
/**
* xilinx_vdma_free_tx_segment - Free transaction segment
2016-04-07 08:29:41 +03:00
* @ chan : Driver specific DMA channel
* @ segment : DMA transaction segment
2014-04-23 18:53:26 +04:00
*/
2016-04-07 08:29:41 +03:00
static void xilinx_vdma_free_tx_segment ( struct xilinx_dma_chan * chan ,
2014-04-23 18:53:26 +04:00
struct xilinx_vdma_tx_segment * segment )
{
dma_pool_free ( chan - > desc_pool , segment , segment - > phys ) ;
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_tx_descriptor - Allocate transaction descriptor
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
*
* Return : The allocated descriptor on success and NULL on failure .
*/
2016-04-07 08:29:41 +03:00
static struct xilinx_dma_tx_descriptor *
xilinx_dma_alloc_tx_descriptor ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_tx_descriptor * desc ;
2014-04-23 18:53:26 +04:00
desc = kzalloc ( sizeof ( * desc ) , GFP_KERNEL ) ;
if ( ! desc )
return NULL ;
INIT_LIST_HEAD ( & desc - > segments ) ;
return desc ;
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_free_tx_descriptor - Free transaction descriptor
* @ chan : Driver specific DMA channel
* @ desc : DMA transaction descriptor
2014-04-23 18:53:26 +04:00
*/
static void
2016-04-07 08:29:41 +03:00
xilinx_dma_free_tx_descriptor ( struct xilinx_dma_chan * chan ,
struct xilinx_dma_tx_descriptor * desc )
2014-04-23 18:53:26 +04:00
{
struct xilinx_vdma_tx_segment * segment , * next ;
2016-04-07 08:29:45 +03:00
struct xilinx_cdma_tx_segment * cdma_segment , * cdma_next ;
2016-04-07 08:29:43 +03:00
struct xilinx_axidma_tx_segment * axidma_segment , * axidma_next ;
2014-04-23 18:53:26 +04:00
if ( ! desc )
return ;
2016-05-13 10:03:29 +03:00
if ( chan - > xdev - > dma_config - > dmatype = = XDMA_TYPE_VDMA ) {
2016-04-07 08:29:43 +03:00
list_for_each_entry_safe ( segment , next , & desc - > segments , node ) {
list_del ( & segment - > node ) ;
xilinx_vdma_free_tx_segment ( chan , segment ) ;
}
2016-05-13 10:03:29 +03:00
} else if ( chan - > xdev - > dma_config - > dmatype = = XDMA_TYPE_CDMA ) {
2016-04-07 08:29:45 +03:00
list_for_each_entry_safe ( cdma_segment , cdma_next ,
& desc - > segments , node ) {
list_del ( & cdma_segment - > node ) ;
xilinx_cdma_free_tx_segment ( chan , cdma_segment ) ;
}
2016-04-07 08:29:43 +03:00
} else {
list_for_each_entry_safe ( axidma_segment , axidma_next ,
& desc - > segments , node ) {
list_del ( & axidma_segment - > node ) ;
xilinx_dma_free_tx_segment ( chan , axidma_segment ) ;
}
2014-04-23 18:53:26 +04:00
}
kfree ( desc ) ;
}
/* Required functions */
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_free_desc_list - Free descriptors list
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
* @ list : List to parse and delete the descriptor
*/
2016-04-07 08:29:41 +03:00
static void xilinx_dma_free_desc_list ( struct xilinx_dma_chan * chan ,
2014-04-23 18:53:26 +04:00
struct list_head * list )
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_tx_descriptor * desc , * next ;
2014-04-23 18:53:26 +04:00
list_for_each_entry_safe ( desc , next , list , node ) {
list_del ( & desc - > node ) ;
2016-04-07 08:29:41 +03:00
xilinx_dma_free_tx_descriptor ( chan , desc ) ;
2014-04-23 18:53:26 +04:00
}
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_free_descriptors - Free channel descriptors
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
*/
2016-04-07 08:29:41 +03:00
static void xilinx_dma_free_descriptors ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
unsigned long flags ;
spin_lock_irqsave ( & chan - > lock , flags ) ;
2016-04-07 08:29:41 +03:00
xilinx_dma_free_desc_list ( chan , & chan - > pending_list ) ;
xilinx_dma_free_desc_list ( chan , & chan - > done_list ) ;
xilinx_dma_free_desc_list ( chan , & chan - > active_list ) ;
2014-04-23 18:53:26 +04:00
spin_unlock_irqrestore ( & chan - > lock , flags ) ;
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_free_chan_resources - Free channel resources
2014-04-23 18:53:26 +04:00
* @ dchan : DMA channel
*/
2016-04-07 08:29:41 +03:00
static void xilinx_dma_free_chan_resources ( struct dma_chan * dchan )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_chan * chan = to_xilinx_chan ( dchan ) ;
2017-12-07 08:21:04 +03:00
unsigned long flags ;
2014-04-23 18:53:26 +04:00
dev_dbg ( chan - > dev , " Free all channel resources. \n " ) ;
2016-04-07 08:29:41 +03:00
xilinx_dma_free_descriptors ( chan ) ;
2017-12-07 08:21:04 +03:00
2016-05-18 10:47:30 +03:00
if ( chan - > xdev - > dma_config - > dmatype = = XDMA_TYPE_AXIDMA ) {
2017-12-07 08:21:04 +03:00
spin_lock_irqsave ( & chan - > lock , flags ) ;
INIT_LIST_HEAD ( & chan - > free_seg_list ) ;
spin_unlock_irqrestore ( & chan - > lock , flags ) ;
2018-01-03 09:42:11 +03:00
/* Free memory that is allocated for BD */
dma_free_coherent ( chan - > dev , sizeof ( * chan - > seg_v ) *
XILINX_DMA_NUM_DESCS , chan - > seg_v ,
chan - > seg_p ) ;
2017-12-07 08:21:04 +03:00
/* Free Memory that is allocated for cyclic DMA Mode */
dma_free_coherent ( chan - > dev , sizeof ( * chan - > cyclic_seg_v ) ,
chan - > cyclic_seg_v , chan - > cyclic_seg_p ) ;
}
if ( chan - > xdev - > dma_config - > dmatype ! = XDMA_TYPE_AXIDMA ) {
dma_pool_destroy ( chan - > desc_pool ) ;
chan - > desc_pool = NULL ;
2016-05-18 10:47:30 +03:00
}
2014-04-23 18:53:26 +04:00
}
2016-05-18 10:47:30 +03:00
/**
* xilinx_dma_chan_handle_cyclic - Cyclic dma callback
* @ chan : Driver specific dma channel
* @ desc : dma transaction descriptor
* @ flags : flags for spin lock
*/
static void xilinx_dma_chan_handle_cyclic ( struct xilinx_dma_chan * chan ,
struct xilinx_dma_tx_descriptor * desc ,
unsigned long * flags )
{
dma_async_tx_callback callback ;
void * callback_param ;
callback = desc - > async_tx . callback ;
callback_param = desc - > async_tx . callback_param ;
if ( callback ) {
spin_unlock_irqrestore ( & chan - > lock , * flags ) ;
callback ( callback_param ) ;
spin_lock_irqsave ( & chan - > lock , * flags ) ;
}
}
2014-04-23 18:53:26 +04:00
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_chan_desc_cleanup - Clean channel descriptors
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
*/
2016-04-07 08:29:41 +03:00
static void xilinx_dma_chan_desc_cleanup ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_tx_descriptor * desc , * next ;
2014-04-23 18:53:26 +04:00
unsigned long flags ;
spin_lock_irqsave ( & chan - > lock , flags ) ;
list_for_each_entry_safe ( desc , next , & chan - > done_list , node ) {
2016-08-04 15:36:13 +03:00
struct dmaengine_desc_callback cb ;
2014-04-23 18:53:26 +04:00
2016-05-18 10:47:30 +03:00
if ( desc - > cyclic ) {
xilinx_dma_chan_handle_cyclic ( chan , desc , & flags ) ;
break ;
}
2014-04-23 18:53:26 +04:00
/* Remove from the list of running transactions */
list_del ( & desc - > node ) ;
/* Run the link descriptor callback function */
2016-08-04 15:36:13 +03:00
dmaengine_desc_get_callback ( & desc - > async_tx , & cb ) ;
if ( dmaengine_desc_callback_valid ( & cb ) ) {
2014-04-23 18:53:26 +04:00
spin_unlock_irqrestore ( & chan - > lock , flags ) ;
2016-08-04 15:36:13 +03:00
dmaengine_desc_callback_invoke ( & cb , NULL ) ;
2014-04-23 18:53:26 +04:00
spin_lock_irqsave ( & chan - > lock , flags ) ;
}
/* Run any dependencies, then free the descriptor */
dma_run_dependencies ( & desc - > async_tx ) ;
2016-04-07 08:29:41 +03:00
xilinx_dma_free_tx_descriptor ( chan , desc ) ;
2014-04-23 18:53:26 +04:00
}
spin_unlock_irqrestore ( & chan - > lock , flags ) ;
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_do_tasklet - Schedule completion tasklet
* @ data : Pointer to the Xilinx DMA channel structure
2014-04-23 18:53:26 +04:00
*/
2016-04-07 08:29:41 +03:00
static void xilinx_dma_do_tasklet ( unsigned long data )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_chan * chan = ( struct xilinx_dma_chan * ) data ;
2014-04-23 18:53:26 +04:00
2016-04-07 08:29:41 +03:00
xilinx_dma_chan_desc_cleanup ( chan ) ;
2014-04-23 18:53:26 +04:00
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_alloc_chan_resources - Allocate channel resources
2014-04-23 18:53:26 +04:00
* @ dchan : DMA channel
*
* Return : ' 0 ' on success and failure value on error
*/
2016-04-07 08:29:41 +03:00
static int xilinx_dma_alloc_chan_resources ( struct dma_chan * dchan )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_chan * chan = to_xilinx_chan ( dchan ) ;
2017-12-07 08:21:04 +03:00
int i ;
2014-04-23 18:53:26 +04:00
/* Has this channel already been allocated? */
if ( chan - > desc_pool )
return 0 ;
/*
* We need the descriptor to be aligned to 64 bytes
* for meeting Xilinx VDMA specification requirement .
*/
2016-05-13 10:03:29 +03:00
if ( chan - > xdev - > dma_config - > dmatype = = XDMA_TYPE_AXIDMA ) {
2017-12-07 08:21:04 +03:00
/* Allocate the buffer descriptors. */
chan - > seg_v = dma_zalloc_coherent ( chan - > dev ,
sizeof ( * chan - > seg_v ) *
XILINX_DMA_NUM_DESCS ,
& chan - > seg_p , GFP_KERNEL ) ;
if ( ! chan - > seg_v ) {
dev_err ( chan - > dev ,
" unable to allocate channel %d descriptors \n " ,
chan - > id ) ;
return - ENOMEM ;
}
2018-09-29 20:17:57 +03:00
/*
* For cyclic DMA mode we need to program the tail Descriptor
* register with a value which is not a part of the BD chain
* so allocating a desc segment during channel allocation for
* programming tail descriptor .
*/
chan - > cyclic_seg_v = dma_zalloc_coherent ( chan - > dev ,
sizeof ( * chan - > cyclic_seg_v ) ,
& chan - > cyclic_seg_p , GFP_KERNEL ) ;
if ( ! chan - > cyclic_seg_v ) {
dev_err ( chan - > dev ,
" unable to allocate desc segment for cyclic DMA \n " ) ;
dma_free_coherent ( chan - > dev , sizeof ( * chan - > seg_v ) *
XILINX_DMA_NUM_DESCS , chan - > seg_v ,
chan - > seg_p ) ;
return - ENOMEM ;
}
chan - > cyclic_seg_v - > phys = chan - > cyclic_seg_p ;
2017-12-07 08:21:04 +03:00
for ( i = 0 ; i < XILINX_DMA_NUM_DESCS ; i + + ) {
chan - > seg_v [ i ] . hw . next_desc =
lower_32_bits ( chan - > seg_p + sizeof ( * chan - > seg_v ) *
( ( i + 1 ) % XILINX_DMA_NUM_DESCS ) ) ;
chan - > seg_v [ i ] . hw . next_desc_msb =
upper_32_bits ( chan - > seg_p + sizeof ( * chan - > seg_v ) *
( ( i + 1 ) % XILINX_DMA_NUM_DESCS ) ) ;
chan - > seg_v [ i ] . phys = chan - > seg_p +
sizeof ( * chan - > seg_v ) * i ;
list_add_tail ( & chan - > seg_v [ i ] . node ,
& chan - > free_seg_list ) ;
}
2016-05-13 10:03:29 +03:00
} else if ( chan - > xdev - > dma_config - > dmatype = = XDMA_TYPE_CDMA ) {
2016-04-07 08:29:45 +03:00
chan - > desc_pool = dma_pool_create ( " xilinx_cdma_desc_pool " ,
chan - > dev ,
sizeof ( struct xilinx_cdma_tx_segment ) ,
__alignof__ ( struct xilinx_cdma_tx_segment ) ,
0 ) ;
2016-04-07 08:29:43 +03:00
} else {
chan - > desc_pool = dma_pool_create ( " xilinx_vdma_desc_pool " ,
chan - > dev ,
sizeof ( struct xilinx_vdma_tx_segment ) ,
__alignof__ ( struct xilinx_vdma_tx_segment ) ,
0 ) ;
}
2017-12-07 08:21:04 +03:00
if ( ! chan - > desc_pool & &
( chan - > xdev - > dma_config - > dmatype ! = XDMA_TYPE_AXIDMA ) ) {
2014-04-23 18:53:26 +04:00
dev_err ( chan - > dev ,
" unable to allocate channel %d descriptor pool \n " ,
chan - > id ) ;
return - ENOMEM ;
}
dma_cookie_init ( dchan ) ;
2016-04-07 08:29:43 +03:00
2016-05-13 10:03:29 +03:00
if ( chan - > xdev - > dma_config - > dmatype = = XDMA_TYPE_AXIDMA ) {
2016-04-07 08:29:43 +03:00
/* For AXI DMA resetting once channel will reset the
* other channel as well so enable the interrupts here .
*/
dma_ctrl_set ( chan , XILINX_DMA_REG_DMACR ,
XILINX_DMA_DMAXR_ALL_IRQ_MASK ) ;
}
2016-05-13 10:03:29 +03:00
if ( ( chan - > xdev - > dma_config - > dmatype = = XDMA_TYPE_CDMA ) & & chan - > has_sg )
2016-04-07 08:29:45 +03:00
dma_ctrl_set ( chan , XILINX_DMA_REG_DMACR ,
XILINX_CDMA_CR_SGMODE ) ;
2014-04-23 18:53:26 +04:00
return 0 ;
}
2018-11-20 18:31:45 +03:00
/**
* xilinx_dma_calc_copysize - Calculate the amount of data to copy
* @ chan : Driver specific DMA channel
* @ size : Total data that needs to be copied
* @ done : Amount of data that has been already copied
*
* Return : Amount of data that has to be copied
*/
static int xilinx_dma_calc_copysize ( struct xilinx_dma_chan * chan ,
int size , int done )
{
size_t copy ;
copy = min_t ( size_t , size - done ,
chan - > xdev - > max_buffer_len ) ;
2018-11-20 18:31:46 +03:00
if ( ( copy + done < size ) & &
chan - > xdev - > common . copy_align ) {
/*
* If this is not the last descriptor , make sure
* the next one will be properly aligned
*/
copy = rounddown ( copy ,
( 1 < < chan - > xdev - > common . copy_align ) ) ;
}
2018-11-20 18:31:45 +03:00
return copy ;
}
2014-04-23 18:53:26 +04:00
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_tx_status - Get DMA transaction status
2014-04-23 18:53:26 +04:00
* @ dchan : DMA channel
* @ cookie : Transaction identifier
* @ txstate : Transaction state
*
* Return : DMA transaction status
*/
2016-04-07 08:29:41 +03:00
static enum dma_status xilinx_dma_tx_status ( struct dma_chan * dchan ,
2014-04-23 18:53:26 +04:00
dma_cookie_t cookie ,
struct dma_tx_state * txstate )
{
2016-04-07 08:29:43 +03:00
struct xilinx_dma_chan * chan = to_xilinx_chan ( dchan ) ;
struct xilinx_dma_tx_descriptor * desc ;
struct xilinx_axidma_tx_segment * segment ;
struct xilinx_axidma_desc_hw * hw ;
enum dma_status ret ;
unsigned long flags ;
u32 residue = 0 ;
ret = dma_cookie_status ( dchan , cookie , txstate ) ;
if ( ret = = DMA_COMPLETE | | ! txstate )
return ret ;
2016-05-13 10:03:29 +03:00
if ( chan - > xdev - > dma_config - > dmatype = = XDMA_TYPE_AXIDMA ) {
2016-04-07 08:29:43 +03:00
spin_lock_irqsave ( & chan - > lock , flags ) ;
desc = list_last_entry ( & chan - > active_list ,
struct xilinx_dma_tx_descriptor , node ) ;
if ( chan - > has_sg ) {
list_for_each_entry ( segment , & desc - > segments , node ) {
hw = & segment - > hw ;
residue + = ( hw - > control - hw - > status ) &
2018-11-20 18:31:45 +03:00
chan - > xdev - > max_buffer_len ;
2016-04-07 08:29:43 +03:00
}
}
spin_unlock_irqrestore ( & chan - > lock , flags ) ;
chan - > residue = residue ;
dma_set_residue ( txstate , chan - > residue ) ;
}
return ret ;
2014-04-23 18:53:26 +04:00
}
/**
2017-03-13 18:59:11 +03:00
* xilinx_dma_stop_transfer - Halt DMA channel
2016-04-07 08:29:41 +03:00
* @ chan : Driver specific DMA channel
2017-12-07 08:21:05 +03:00
*
* Return : ' 0 ' on success and failure value on error
2014-04-23 18:53:26 +04:00
*/
2017-03-13 18:59:11 +03:00
static int xilinx_dma_stop_transfer ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
2016-02-26 17:03:54 +03:00
u32 val ;
2014-04-23 18:53:26 +04:00
2016-04-07 08:29:41 +03:00
dma_ctrl_clr ( chan , XILINX_DMA_REG_DMACR , XILINX_DMA_DMACR_RUNSTOP ) ;
2014-04-23 18:53:26 +04:00
/* Wait for the hardware to halt */
2017-03-13 18:59:11 +03:00
return xilinx_dma_poll_timeout ( chan , XILINX_DMA_REG_DMASR , val ,
val & XILINX_DMA_DMASR_HALTED , 0 ,
XILINX_DMA_LOOP_COUNT ) ;
}
2014-04-23 18:53:26 +04:00
2017-03-13 18:59:11 +03:00
/**
* xilinx_cdma_stop_transfer - Wait for the current transfer to complete
* @ chan : Driver specific DMA channel
2017-12-07 08:21:05 +03:00
*
* Return : ' 0 ' on success and failure value on error
2017-03-13 18:59:11 +03:00
*/
static int xilinx_cdma_stop_transfer ( struct xilinx_dma_chan * chan )
{
u32 val ;
return xilinx_dma_poll_timeout ( chan , XILINX_DMA_REG_DMASR , val ,
val & XILINX_DMA_DMASR_IDLE , 0 ,
XILINX_DMA_LOOP_COUNT ) ;
2014-04-23 18:53:26 +04:00
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_start - Start DMA channel
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
*/
2016-04-07 08:29:41 +03:00
static void xilinx_dma_start ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
2016-03-03 20:32:42 +03:00
int err ;
2016-02-26 17:03:54 +03:00
u32 val ;
2014-04-23 18:53:26 +04:00
2016-04-07 08:29:41 +03:00
dma_ctrl_set ( chan , XILINX_DMA_REG_DMACR , XILINX_DMA_DMACR_RUNSTOP ) ;
2014-04-23 18:53:26 +04:00
/* Wait for the hardware to start */
2016-04-07 08:29:41 +03:00
err = xilinx_dma_poll_timeout ( chan , XILINX_DMA_REG_DMASR , val ,
! ( val & XILINX_DMA_DMASR_HALTED ) , 0 ,
XILINX_DMA_LOOP_COUNT ) ;
2014-04-23 18:53:26 +04:00
2016-02-26 17:03:54 +03:00
if ( err ) {
2014-04-23 18:53:26 +04:00
dev_err ( chan - > dev , " Cannot start channel %p: %x \n " ,
2016-04-07 08:29:41 +03:00
chan , dma_ctrl_read ( chan , XILINX_DMA_REG_DMASR ) ) ;
2014-04-23 18:53:26 +04:00
chan - > err = true ;
}
}
/**
* xilinx_vdma_start_transfer - Starts VDMA transfer
* @ chan : Driver specific channel struct pointer
*/
2016-04-07 08:29:41 +03:00
static void xilinx_vdma_start_transfer ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
struct xilinx_vdma_config * config = & chan - > config ;
2016-04-07 08:29:41 +03:00
struct xilinx_dma_tx_descriptor * desc , * tail_desc ;
2017-12-07 08:21:03 +03:00
u32 reg , j ;
2016-02-26 17:03:51 +03:00
struct xilinx_vdma_tx_segment * tail_segment ;
2014-04-23 18:53:26 +04:00
2016-02-26 17:03:52 +03:00
/* This function was invoked with lock held */
2014-04-23 18:53:26 +04:00
if ( chan - > err )
return ;
2017-12-07 08:21:02 +03:00
if ( ! chan - > idle )
return ;
2014-04-23 18:53:26 +04:00
if ( list_empty ( & chan - > pending_list ) )
2016-02-26 17:03:52 +03:00
return ;
2014-04-23 18:53:26 +04:00
desc = list_first_entry ( & chan - > pending_list ,
2016-04-07 08:29:41 +03:00
struct xilinx_dma_tx_descriptor , node ) ;
2016-02-26 17:03:51 +03:00
tail_desc = list_last_entry ( & chan - > pending_list ,
2016-04-07 08:29:41 +03:00
struct xilinx_dma_tx_descriptor , node ) ;
2016-02-26 17:03:51 +03:00
tail_segment = list_last_entry ( & tail_desc - > segments ,
struct xilinx_vdma_tx_segment , node ) ;
2014-04-23 18:53:26 +04:00
/*
* If hardware is idle , then all descriptors on the running lists are
* done , start new transfers
*/
2016-02-26 17:03:51 +03:00
if ( chan - > has_sg )
2016-04-07 08:29:41 +03:00
dma_ctrl_write ( chan , XILINX_DMA_REG_CURDESC ,
2016-02-26 17:03:51 +03:00
desc - > async_tx . phys ) ;
2014-04-23 18:53:26 +04:00
/* Configure the hardware using info in the config structure */
2018-06-13 10:34:48 +03:00
if ( chan - > has_vflip ) {
reg = dma_read ( chan , XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP ) ;
reg & = ~ XILINX_VDMA_ENABLE_VERTICAL_FLIP ;
reg | = config - > vflip_en ;
dma_write ( chan , XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP ,
reg ) ;
}
2016-04-07 08:29:41 +03:00
reg = dma_ctrl_read ( chan , XILINX_DMA_REG_DMACR ) ;
2014-04-23 18:53:26 +04:00
if ( config - > frm_cnt_en )
2016-04-07 08:29:41 +03:00
reg | = XILINX_DMA_DMACR_FRAMECNT_EN ;
2014-04-23 18:53:26 +04:00
else
2016-04-07 08:29:41 +03:00
reg & = ~ XILINX_DMA_DMACR_FRAMECNT_EN ;
2014-04-23 18:53:26 +04:00
/*
* With SG , start with circular mode , so that BDs can be fetched .
* In direct register mode , if not parking , enable circular mode
*/
if ( chan - > has_sg | | ! config - > park )
2016-04-07 08:29:41 +03:00
reg | = XILINX_DMA_DMACR_CIRC_EN ;
2014-04-23 18:53:26 +04:00
if ( config - > park )
2016-04-07 08:29:41 +03:00
reg & = ~ XILINX_DMA_DMACR_CIRC_EN ;
2014-04-23 18:53:26 +04:00
2016-04-07 08:29:41 +03:00
dma_ctrl_write ( chan , XILINX_DMA_REG_DMACR , reg ) ;
2014-04-23 18:53:26 +04:00
2017-12-07 08:21:03 +03:00
j = chan - > desc_submitcount ;
reg = dma_read ( chan , XILINX_DMA_REG_PARK_PTR ) ;
if ( chan - > direction = = DMA_MEM_TO_DEV ) {
reg & = ~ XILINX_DMA_PARK_PTR_RD_REF_MASK ;
reg | = j < < XILINX_DMA_PARK_PTR_RD_REF_SHIFT ;
} else {
reg & = ~ XILINX_DMA_PARK_PTR_WR_REF_MASK ;
reg | = j < < XILINX_DMA_PARK_PTR_WR_REF_SHIFT ;
2014-04-23 18:53:26 +04:00
}
2017-12-07 08:21:03 +03:00
dma_write ( chan , XILINX_DMA_REG_PARK_PTR , reg ) ;
2014-04-23 18:53:26 +04:00
/* Start the hardware */
2016-04-07 08:29:41 +03:00
xilinx_dma_start ( chan ) ;
2014-04-23 18:53:26 +04:00
if ( chan - > err )
2016-02-26 17:03:52 +03:00
return ;
2014-04-23 18:53:26 +04:00
/* Start the transfer */
if ( chan - > has_sg ) {
2016-04-07 08:29:41 +03:00
dma_ctrl_write ( chan , XILINX_DMA_REG_TAILDESC ,
2016-02-26 17:03:51 +03:00
tail_segment - > phys ) ;
2017-12-07 08:21:03 +03:00
list_splice_tail_init ( & chan - > pending_list , & chan - > active_list ) ;
chan - > desc_pendingcount = 0 ;
2014-04-23 18:53:26 +04:00
} else {
struct xilinx_vdma_tx_segment * segment , * last = NULL ;
int i = 0 ;
2016-04-06 08:08:09 +03:00
if ( chan - > desc_submitcount < chan - > num_frms )
i = chan - > desc_submitcount ;
list_for_each_entry ( segment , & desc - > segments , node ) {
2016-04-06 08:08:08 +03:00
if ( chan - > ext_addr )
vdma_desc_write_64 ( chan ,
XILINX_VDMA_REG_START_ADDRESS_64 ( i + + ) ,
segment - > hw . buf_addr ,
segment - > hw . buf_addr_msb ) ;
else
vdma_desc_write ( chan ,
2014-04-23 18:53:26 +04:00
XILINX_VDMA_REG_START_ADDRESS ( i + + ) ,
segment - > hw . buf_addr ) ;
2016-04-06 08:08:08 +03:00
2014-04-23 18:53:26 +04:00
last = segment ;
}
if ( ! last )
2016-02-26 17:03:52 +03:00
return ;
2014-04-23 18:53:26 +04:00
/* HW expects these parameters to be same for one transaction */
2016-04-07 08:29:41 +03:00
vdma_desc_write ( chan , XILINX_DMA_REG_HSIZE , last - > hw . hsize ) ;
vdma_desc_write ( chan , XILINX_DMA_REG_FRMDLY_STRIDE ,
2014-04-23 18:53:26 +04:00
last - > hw . stride ) ;
2016-04-07 08:29:41 +03:00
vdma_desc_write ( chan , XILINX_DMA_REG_VSIZE , last - > hw . vsize ) ;
2014-04-23 18:53:26 +04:00
2016-04-06 08:08:09 +03:00
chan - > desc_submitcount + + ;
chan - > desc_pendingcount - - ;
2017-12-07 08:21:03 +03:00
list_del ( & desc - > node ) ;
list_add_tail ( & desc - > node , & chan - > active_list ) ;
2016-04-06 08:08:09 +03:00
if ( chan - > desc_submitcount = = chan - > num_frms )
chan - > desc_submitcount = 0 ;
}
2017-12-07 08:21:02 +03:00
chan - > idle = false ;
2014-04-23 18:53:26 +04:00
}
2016-04-07 08:29:45 +03:00
/**
* xilinx_cdma_start_transfer - Starts cdma transfer
* @ chan : Driver specific channel struct pointer
*/
static void xilinx_cdma_start_transfer ( struct xilinx_dma_chan * chan )
{
struct xilinx_dma_tx_descriptor * head_desc , * tail_desc ;
struct xilinx_cdma_tx_segment * tail_segment ;
u32 ctrl_reg = dma_read ( chan , XILINX_DMA_REG_DMACR ) ;
if ( chan - > err )
return ;
2017-12-07 08:21:02 +03:00
if ( ! chan - > idle )
return ;
2016-04-07 08:29:45 +03:00
if ( list_empty ( & chan - > pending_list ) )
return ;
head_desc = list_first_entry ( & chan - > pending_list ,
struct xilinx_dma_tx_descriptor , node ) ;
tail_desc = list_last_entry ( & chan - > pending_list ,
struct xilinx_dma_tx_descriptor , node ) ;
tail_segment = list_last_entry ( & tail_desc - > segments ,
struct xilinx_cdma_tx_segment , node ) ;
if ( chan - > desc_pendingcount < = XILINX_DMA_COALESCE_MAX ) {
ctrl_reg & = ~ XILINX_DMA_CR_COALESCE_MAX ;
ctrl_reg | = chan - > desc_pendingcount < <
XILINX_DMA_CR_COALESCE_SHIFT ;
dma_ctrl_write ( chan , XILINX_DMA_REG_DMACR , ctrl_reg ) ;
}
if ( chan - > has_sg ) {
2018-01-03 09:42:09 +03:00
dma_ctrl_clr ( chan , XILINX_DMA_REG_DMACR ,
XILINX_CDMA_CR_SGMODE ) ;
dma_ctrl_set ( chan , XILINX_DMA_REG_DMACR ,
XILINX_CDMA_CR_SGMODE ) ;
2016-06-07 16:51:16 +03:00
xilinx_write ( chan , XILINX_DMA_REG_CURDESC ,
head_desc - > async_tx . phys ) ;
2016-04-07 08:29:45 +03:00
/* Update tail ptr register which will start the transfer */
2016-06-07 16:51:16 +03:00
xilinx_write ( chan , XILINX_DMA_REG_TAILDESC ,
tail_segment - > phys ) ;
2016-04-07 08:29:45 +03:00
} else {
/* In simple mode */
struct xilinx_cdma_tx_segment * segment ;
struct xilinx_cdma_desc_hw * hw ;
segment = list_first_entry ( & head_desc - > segments ,
struct xilinx_cdma_tx_segment ,
node ) ;
hw = & segment - > hw ;
2018-09-29 20:18:00 +03:00
xilinx_write ( chan , XILINX_CDMA_REG_SRCADDR ,
xilinx_prep_dma_addr_t ( hw - > src_addr ) ) ;
xilinx_write ( chan , XILINX_CDMA_REG_DSTADDR ,
xilinx_prep_dma_addr_t ( hw - > dest_addr ) ) ;
2016-04-07 08:29:45 +03:00
/* Start the transfer */
dma_ctrl_write ( chan , XILINX_DMA_REG_BTT ,
2018-11-20 18:31:45 +03:00
hw - > control & chan - > xdev - > max_buffer_len ) ;
2016-04-07 08:29:45 +03:00
}
list_splice_tail_init ( & chan - > pending_list , & chan - > active_list ) ;
chan - > desc_pendingcount = 0 ;
2017-12-07 08:21:02 +03:00
chan - > idle = false ;
2016-04-07 08:29:45 +03:00
}
2016-04-07 08:29:43 +03:00
/**
* xilinx_dma_start_transfer - Starts DMA transfer
* @ chan : Driver specific channel struct pointer
*/
static void xilinx_dma_start_transfer ( struct xilinx_dma_chan * chan )
{
struct xilinx_dma_tx_descriptor * head_desc , * tail_desc ;
2017-12-07 08:21:04 +03:00
struct xilinx_axidma_tx_segment * tail_segment ;
2016-04-07 08:29:43 +03:00
u32 reg ;
if ( chan - > err )
return ;
if ( list_empty ( & chan - > pending_list ) )
return ;
2017-12-07 08:21:02 +03:00
if ( ! chan - > idle )
2016-04-07 08:29:43 +03:00
return ;
head_desc = list_first_entry ( & chan - > pending_list ,
struct xilinx_dma_tx_descriptor , node ) ;
tail_desc = list_last_entry ( & chan - > pending_list ,
struct xilinx_dma_tx_descriptor , node ) ;
tail_segment = list_last_entry ( & tail_desc - > segments ,
struct xilinx_axidma_tx_segment , node ) ;
reg = dma_ctrl_read ( chan , XILINX_DMA_REG_DMACR ) ;
if ( chan - > desc_pendingcount < = XILINX_DMA_COALESCE_MAX ) {
reg & = ~ XILINX_DMA_CR_COALESCE_MAX ;
reg | = chan - > desc_pendingcount < <
XILINX_DMA_CR_COALESCE_SHIFT ;
dma_ctrl_write ( chan , XILINX_DMA_REG_DMACR , reg ) ;
}
2016-06-24 08:21:23 +03:00
if ( chan - > has_sg & & ! chan - > xdev - > mcdma )
2016-06-07 16:51:15 +03:00
xilinx_write ( chan , XILINX_DMA_REG_CURDESC ,
head_desc - > async_tx . phys ) ;
2016-04-07 08:29:43 +03:00
2016-06-24 08:21:23 +03:00
if ( chan - > has_sg & & chan - > xdev - > mcdma ) {
if ( chan - > direction = = DMA_MEM_TO_DEV ) {
dma_ctrl_write ( chan , XILINX_DMA_REG_CURDESC ,
head_desc - > async_tx . phys ) ;
} else {
if ( ! chan - > tdest ) {
dma_ctrl_write ( chan , XILINX_DMA_REG_CURDESC ,
head_desc - > async_tx . phys ) ;
} else {
dma_ctrl_write ( chan ,
XILINX_DMA_MCRX_CDESC ( chan - > tdest ) ,
head_desc - > async_tx . phys ) ;
}
}
}
2016-04-07 08:29:43 +03:00
xilinx_dma_start ( chan ) ;
if ( chan - > err )
return ;
/* Start the transfer */
2016-06-24 08:21:23 +03:00
if ( chan - > has_sg & & ! chan - > xdev - > mcdma ) {
2016-05-18 10:47:30 +03:00
if ( chan - > cyclic )
2016-06-07 16:51:15 +03:00
xilinx_write ( chan , XILINX_DMA_REG_TAILDESC ,
chan - > cyclic_seg_v - > phys ) ;
2016-05-18 10:47:30 +03:00
else
2016-06-07 16:51:15 +03:00
xilinx_write ( chan , XILINX_DMA_REG_TAILDESC ,
tail_segment - > phys ) ;
2016-06-24 08:21:23 +03:00
} else if ( chan - > has_sg & & chan - > xdev - > mcdma ) {
if ( chan - > direction = = DMA_MEM_TO_DEV ) {
dma_ctrl_write ( chan , XILINX_DMA_REG_TAILDESC ,
tail_segment - > phys ) ;
} else {
if ( ! chan - > tdest ) {
dma_ctrl_write ( chan , XILINX_DMA_REG_TAILDESC ,
tail_segment - > phys ) ;
} else {
dma_ctrl_write ( chan ,
XILINX_DMA_MCRX_TDESC ( chan - > tdest ) ,
tail_segment - > phys ) ;
}
}
2016-04-07 08:29:43 +03:00
} else {
struct xilinx_axidma_tx_segment * segment ;
struct xilinx_axidma_desc_hw * hw ;
segment = list_first_entry ( & head_desc - > segments ,
struct xilinx_axidma_tx_segment ,
node ) ;
hw = & segment - > hw ;
2016-06-07 16:51:15 +03:00
xilinx_write ( chan , XILINX_DMA_REG_SRCDSTADDR , hw - > buf_addr ) ;
2016-04-07 08:29:43 +03:00
/* Start the transfer */
dma_ctrl_write ( chan , XILINX_DMA_REG_BTT ,
2018-11-20 18:31:45 +03:00
hw - > control & chan - > xdev - > max_buffer_len ) ;
2014-04-23 18:53:26 +04:00
}
2016-02-26 17:03:51 +03:00
list_splice_tail_init ( & chan - > pending_list , & chan - > active_list ) ;
chan - > desc_pendingcount = 0 ;
2017-12-07 08:21:02 +03:00
chan - > idle = false ;
2014-04-23 18:53:26 +04:00
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_issue_pending - Issue pending transactions
2014-04-23 18:53:26 +04:00
* @ dchan : DMA channel
*/
2016-04-07 08:29:41 +03:00
static void xilinx_dma_issue_pending ( struct dma_chan * dchan )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_chan * chan = to_xilinx_chan ( dchan ) ;
2016-02-26 17:03:52 +03:00
unsigned long flags ;
2014-04-23 18:53:26 +04:00
2016-02-26 17:03:52 +03:00
spin_lock_irqsave ( & chan - > lock , flags ) ;
2016-04-07 08:29:43 +03:00
chan - > start_transfer ( chan ) ;
2016-02-26 17:03:52 +03:00
spin_unlock_irqrestore ( & chan - > lock , flags ) ;
2014-04-23 18:53:26 +04:00
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_complete_descriptor - Mark the active descriptor as complete
2014-04-23 18:53:26 +04:00
* @ chan : xilinx DMA channel
*
* CONTEXT : hardirq
*/
2016-04-07 08:29:41 +03:00
static void xilinx_dma_complete_descriptor ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_tx_descriptor * desc , * next ;
2014-04-23 18:53:26 +04:00
2016-02-26 17:03:52 +03:00
/* This function was invoked with lock held */
2016-02-26 17:03:51 +03:00
if ( list_empty ( & chan - > active_list ) )
2016-02-26 17:03:52 +03:00
return ;
2014-04-23 18:53:26 +04:00
2016-02-26 17:03:51 +03:00
list_for_each_entry_safe ( desc , next , & chan - > active_list , node ) {
list_del ( & desc - > node ) ;
2016-05-18 10:47:30 +03:00
if ( ! desc - > cyclic )
dma_cookie_complete ( & desc - > async_tx ) ;
2016-02-26 17:03:51 +03:00
list_add_tail ( & desc - > node , & chan - > done_list ) ;
}
2014-04-23 18:53:26 +04:00
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_reset - Reset DMA channel
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
*
* Return : ' 0 ' on success and failure value on error
*/
2016-04-07 08:29:41 +03:00
static int xilinx_dma_reset ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
2016-03-03 20:32:42 +03:00
int err ;
2014-04-23 18:53:26 +04:00
u32 tmp ;
2016-04-07 08:29:41 +03:00
dma_ctrl_set ( chan , XILINX_DMA_REG_DMACR , XILINX_DMA_DMACR_RESET ) ;
2014-04-23 18:53:26 +04:00
/* Wait for the hardware to finish reset */
2016-04-07 08:29:41 +03:00
err = xilinx_dma_poll_timeout ( chan , XILINX_DMA_REG_DMACR , tmp ,
! ( tmp & XILINX_DMA_DMACR_RESET ) , 0 ,
XILINX_DMA_LOOP_COUNT ) ;
2014-04-23 18:53:26 +04:00
2016-02-26 17:03:54 +03:00
if ( err ) {
2014-04-23 18:53:26 +04:00
dev_err ( chan - > dev , " reset timeout, cr %x, sr %x \n " ,
2016-04-07 08:29:41 +03:00
dma_ctrl_read ( chan , XILINX_DMA_REG_DMACR ) ,
dma_ctrl_read ( chan , XILINX_DMA_REG_DMASR ) ) ;
2014-04-23 18:53:26 +04:00
return - ETIMEDOUT ;
}
chan - > err = false ;
2017-12-07 08:21:02 +03:00
chan - > idle = true ;
2017-12-07 08:21:03 +03:00
chan - > desc_submitcount = 0 ;
2014-04-23 18:53:26 +04:00
2016-02-26 17:03:54 +03:00
return err ;
2014-04-23 18:53:26 +04:00
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
*
* Return : ' 0 ' on success and failure value on error
*/
2016-04-07 08:29:41 +03:00
static int xilinx_dma_chan_reset ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
int err ;
/* Reset VDMA */
2016-04-07 08:29:41 +03:00
err = xilinx_dma_reset ( chan ) ;
2014-04-23 18:53:26 +04:00
if ( err )
return err ;
/* Enable interrupts */
2016-04-07 08:29:41 +03:00
dma_ctrl_set ( chan , XILINX_DMA_REG_DMACR ,
XILINX_DMA_DMAXR_ALL_IRQ_MASK ) ;
2014-04-23 18:53:26 +04:00
return 0 ;
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_irq_handler - DMA Interrupt handler
2014-04-23 18:53:26 +04:00
* @ irq : IRQ number
2016-04-07 08:29:41 +03:00
* @ data : Pointer to the Xilinx DMA channel structure
2014-04-23 18:53:26 +04:00
*
* Return : IRQ_HANDLED / IRQ_NONE
*/
2016-04-07 08:29:41 +03:00
static irqreturn_t xilinx_dma_irq_handler ( int irq , void * data )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_chan * chan = data ;
2014-04-23 18:53:26 +04:00
u32 status ;
/* Read the status and ack the interrupts. */
2016-04-07 08:29:41 +03:00
status = dma_ctrl_read ( chan , XILINX_DMA_REG_DMASR ) ;
if ( ! ( status & XILINX_DMA_DMAXR_ALL_IRQ_MASK ) )
2014-04-23 18:53:26 +04:00
return IRQ_NONE ;
2016-04-07 08:29:41 +03:00
dma_ctrl_write ( chan , XILINX_DMA_REG_DMASR ,
status & XILINX_DMA_DMAXR_ALL_IRQ_MASK ) ;
2014-04-23 18:53:26 +04:00
2016-04-07 08:29:41 +03:00
if ( status & XILINX_DMA_DMASR_ERR_IRQ ) {
2014-04-23 18:53:26 +04:00
/*
* An error occurred . If C_FLUSH_ON_FSYNC is enabled and the
* error is recoverable , ignore it . Otherwise flag the error .
*
* Only recoverable errors can be cleared in the DMASR register ,
* make sure not to write to other error bits to 1.
*/
2016-04-07 08:29:41 +03:00
u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK ;
2016-04-06 08:14:55 +03:00
2016-04-07 08:29:41 +03:00
dma_ctrl_write ( chan , XILINX_DMA_REG_DMASR ,
errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK ) ;
2014-04-23 18:53:26 +04:00
if ( ! chan - > flush_on_fsync | |
2016-04-07 08:29:41 +03:00
( errors & ~ XILINX_DMA_DMASR_ERR_RECOVER_MASK ) ) {
2014-04-23 18:53:26 +04:00
dev_err ( chan - > dev ,
" Channel %p has errors %x, cdr %x tdr %x \n " ,
chan , errors ,
2016-04-07 08:29:41 +03:00
dma_ctrl_read ( chan , XILINX_DMA_REG_CURDESC ) ,
dma_ctrl_read ( chan , XILINX_DMA_REG_TAILDESC ) ) ;
2014-04-23 18:53:26 +04:00
chan - > err = true ;
}
}
2016-04-07 08:29:41 +03:00
if ( status & XILINX_DMA_DMASR_DLY_CNT_IRQ ) {
2014-04-23 18:53:26 +04:00
/*
* Device takes too long to do the transfer when user requires
* responsiveness .
*/
dev_dbg ( chan - > dev , " Inter-packet latency too long \n " ) ;
}
2016-04-07 08:29:41 +03:00
if ( status & XILINX_DMA_DMASR_FRM_CNT_IRQ ) {
2016-02-26 17:03:52 +03:00
spin_lock ( & chan - > lock ) ;
2016-04-07 08:29:41 +03:00
xilinx_dma_complete_descriptor ( chan ) ;
2017-12-07 08:21:02 +03:00
chan - > idle = true ;
2016-04-07 08:29:43 +03:00
chan - > start_transfer ( chan ) ;
2016-02-26 17:03:52 +03:00
spin_unlock ( & chan - > lock ) ;
2014-04-23 18:53:26 +04:00
}
tasklet_schedule ( & chan - > tasklet ) ;
return IRQ_HANDLED ;
}
2016-02-26 17:03:51 +03:00
/**
* append_desc_queue - Queuing descriptor
* @ chan : Driver specific dma channel
* @ desc : dma transaction descriptor
*/
2016-04-07 08:29:41 +03:00
static void append_desc_queue ( struct xilinx_dma_chan * chan ,
struct xilinx_dma_tx_descriptor * desc )
2016-02-26 17:03:51 +03:00
{
struct xilinx_vdma_tx_segment * tail_segment ;
2016-04-07 08:29:41 +03:00
struct xilinx_dma_tx_descriptor * tail_desc ;
2016-04-07 08:29:43 +03:00
struct xilinx_axidma_tx_segment * axidma_tail_segment ;
2016-04-07 08:29:45 +03:00
struct xilinx_cdma_tx_segment * cdma_tail_segment ;
2016-02-26 17:03:51 +03:00
if ( list_empty ( & chan - > pending_list ) )
goto append ;
/*
* Add the hardware descriptor to the chain of hardware descriptors
* that already exists in memory .
*/
tail_desc = list_last_entry ( & chan - > pending_list ,
2016-04-07 08:29:41 +03:00
struct xilinx_dma_tx_descriptor , node ) ;
2016-05-13 10:03:29 +03:00
if ( chan - > xdev - > dma_config - > dmatype = = XDMA_TYPE_VDMA ) {
2016-04-07 08:29:43 +03:00
tail_segment = list_last_entry ( & tail_desc - > segments ,
struct xilinx_vdma_tx_segment ,
node ) ;
tail_segment - > hw . next_desc = ( u32 ) desc - > async_tx . phys ;
2016-05-13 10:03:29 +03:00
} else if ( chan - > xdev - > dma_config - > dmatype = = XDMA_TYPE_CDMA ) {
2016-04-07 08:29:45 +03:00
cdma_tail_segment = list_last_entry ( & tail_desc - > segments ,
struct xilinx_cdma_tx_segment ,
node ) ;
cdma_tail_segment - > hw . next_desc = ( u32 ) desc - > async_tx . phys ;
2016-04-07 08:29:43 +03:00
} else {
axidma_tail_segment = list_last_entry ( & tail_desc - > segments ,
struct xilinx_axidma_tx_segment ,
node ) ;
axidma_tail_segment - > hw . next_desc = ( u32 ) desc - > async_tx . phys ;
}
2016-02-26 17:03:51 +03:00
/*
* Add the software descriptor and all children to the list
* of pending transactions
*/
append :
list_add_tail ( & desc - > node , & chan - > pending_list ) ;
chan - > desc_pendingcount + + ;
2016-05-13 10:03:29 +03:00
if ( chan - > has_sg & & ( chan - > xdev - > dma_config - > dmatype = = XDMA_TYPE_VDMA )
& & unlikely ( chan - > desc_pendingcount > chan - > num_frms ) ) {
2016-02-26 17:03:51 +03:00
dev_dbg ( chan - > dev , " desc pendingcount is too high \n " ) ;
chan - > desc_pendingcount = chan - > num_frms ;
}
}
2014-04-23 18:53:26 +04:00
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_tx_submit - Submit DMA transaction
2014-04-23 18:53:26 +04:00
* @ tx : Async transaction descriptor
*
* Return : cookie value on success and failure value on error
*/
2016-04-07 08:29:41 +03:00
static dma_cookie_t xilinx_dma_tx_submit ( struct dma_async_tx_descriptor * tx )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_tx_descriptor * desc = to_dma_tx_descriptor ( tx ) ;
struct xilinx_dma_chan * chan = to_xilinx_chan ( tx - > chan ) ;
2014-04-23 18:53:26 +04:00
dma_cookie_t cookie ;
unsigned long flags ;
int err ;
2016-05-18 10:47:30 +03:00
if ( chan - > cyclic ) {
xilinx_dma_free_tx_descriptor ( chan , desc ) ;
return - EBUSY ;
}
2014-04-23 18:53:26 +04:00
if ( chan - > err ) {
/*
* If reset fails , need to hard reset the system .
* Channel is no longer functional
*/
2016-04-07 08:29:41 +03:00
err = xilinx_dma_chan_reset ( chan ) ;
2014-04-23 18:53:26 +04:00
if ( err < 0 )
return err ;
}
spin_lock_irqsave ( & chan - > lock , flags ) ;
cookie = dma_cookie_assign ( tx ) ;
2016-02-26 17:03:51 +03:00
/* Put this transaction onto the tail of the pending queue */
append_desc_queue ( chan , desc ) ;
2014-04-23 18:53:26 +04:00
2016-05-18 10:47:30 +03:00
if ( desc - > cyclic )
chan - > cyclic = true ;
2014-04-23 18:53:26 +04:00
spin_unlock_irqrestore ( & chan - > lock , flags ) ;
return cookie ;
}
/**
* xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
* DMA_SLAVE transaction
* @ dchan : DMA channel
* @ xt : Interleaved template pointer
* @ flags : transfer ack flags
*
* Return : Async transaction descriptor on success and NULL on failure
*/
static struct dma_async_tx_descriptor *
xilinx_vdma_dma_prep_interleaved ( struct dma_chan * dchan ,
struct dma_interleaved_template * xt ,
unsigned long flags )
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_chan * chan = to_xilinx_chan ( dchan ) ;
struct xilinx_dma_tx_descriptor * desc ;
2018-01-03 09:42:10 +03:00
struct xilinx_vdma_tx_segment * segment ;
2014-04-23 18:53:26 +04:00
struct xilinx_vdma_desc_hw * hw ;
if ( ! is_slave_direction ( xt - > dir ) )
return NULL ;
if ( ! xt - > numf | | ! xt - > sgl [ 0 ] . size )
return NULL ;
2014-11-05 21:37:01 +03:00
if ( xt - > frame_size ! = 1 )
return NULL ;
2014-04-23 18:53:26 +04:00
/* Allocate a transaction descriptor. */
2016-04-07 08:29:41 +03:00
desc = xilinx_dma_alloc_tx_descriptor ( chan ) ;
2014-04-23 18:53:26 +04:00
if ( ! desc )
return NULL ;
dma_async_tx_descriptor_init ( & desc - > async_tx , & chan - > common ) ;
2016-04-07 08:29:41 +03:00
desc - > async_tx . tx_submit = xilinx_dma_tx_submit ;
2014-04-23 18:53:26 +04:00
async_tx_ack ( & desc - > async_tx ) ;
/* Allocate the link descriptor from DMA pool */
segment = xilinx_vdma_alloc_tx_segment ( chan ) ;
if ( ! segment )
goto error ;
/* Fill in the hardware descriptor */
hw = & segment - > hw ;
hw - > vsize = xt - > numf ;
hw - > hsize = xt - > sgl [ 0 ] . size ;
2014-11-05 21:37:02 +03:00
hw - > stride = ( xt - > sgl [ 0 ] . icg + xt - > sgl [ 0 ] . size ) < <
2016-04-07 08:29:41 +03:00
XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT ;
2014-04-23 18:53:26 +04:00
hw - > stride | = chan - > config . frm_dly < <
2016-04-07 08:29:41 +03:00
XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT ;
2014-04-23 18:53:26 +04:00
2016-04-06 08:08:08 +03:00
if ( xt - > dir ! = DMA_MEM_TO_DEV ) {
if ( chan - > ext_addr ) {
hw - > buf_addr = lower_32_bits ( xt - > dst_start ) ;
hw - > buf_addr_msb = upper_32_bits ( xt - > dst_start ) ;
} else {
hw - > buf_addr = xt - > dst_start ;
}
} else {
if ( chan - > ext_addr ) {
hw - > buf_addr = lower_32_bits ( xt - > src_start ) ;
hw - > buf_addr_msb = upper_32_bits ( xt - > src_start ) ;
} else {
hw - > buf_addr = xt - > src_start ;
}
}
2014-04-23 18:53:26 +04:00
/* Insert the segment into the descriptor segments list. */
list_add_tail ( & segment - > node , & desc - > segments ) ;
/* Link the last hardware descriptor with the first. */
segment = list_first_entry ( & desc - > segments ,
struct xilinx_vdma_tx_segment , node ) ;
2016-02-26 17:03:51 +03:00
desc - > async_tx . phys = segment - > phys ;
2014-04-23 18:53:26 +04:00
return & desc - > async_tx ;
error :
2016-04-07 08:29:41 +03:00
xilinx_dma_free_tx_descriptor ( chan , desc ) ;
2014-04-23 18:53:26 +04:00
return NULL ;
}
2016-04-07 08:29:45 +03:00
/**
* xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
* @ dchan : DMA channel
* @ dma_dst : destination address
* @ dma_src : source address
* @ len : transfer length
* @ flags : transfer ack flags
*
* Return : Async transaction descriptor on success and NULL on failure
*/
static struct dma_async_tx_descriptor *
xilinx_cdma_prep_memcpy ( struct dma_chan * dchan , dma_addr_t dma_dst ,
dma_addr_t dma_src , size_t len , unsigned long flags )
{
struct xilinx_dma_chan * chan = to_xilinx_chan ( dchan ) ;
struct xilinx_dma_tx_descriptor * desc ;
2017-03-13 18:59:12 +03:00
struct xilinx_cdma_tx_segment * segment ;
2016-04-07 08:29:45 +03:00
struct xilinx_cdma_desc_hw * hw ;
2018-11-20 18:31:45 +03:00
if ( ! len | | len > chan - > xdev - > max_buffer_len )
2016-04-07 08:29:45 +03:00
return NULL ;
desc = xilinx_dma_alloc_tx_descriptor ( chan ) ;
if ( ! desc )
return NULL ;
dma_async_tx_descriptor_init ( & desc - > async_tx , & chan - > common ) ;
desc - > async_tx . tx_submit = xilinx_dma_tx_submit ;
/* Allocate the link descriptor from DMA pool */
segment = xilinx_cdma_alloc_tx_segment ( chan ) ;
if ( ! segment )
goto error ;
hw = & segment - > hw ;
hw - > control = len ;
hw - > src_addr = dma_src ;
hw - > dest_addr = dma_dst ;
2016-06-07 16:51:16 +03:00
if ( chan - > ext_addr ) {
hw - > src_addr_msb = upper_32_bits ( dma_src ) ;
hw - > dest_addr_msb = upper_32_bits ( dma_dst ) ;
}
2016-04-07 08:29:45 +03:00
/* Insert the segment into the descriptor segments list. */
list_add_tail ( & segment - > node , & desc - > segments ) ;
desc - > async_tx . phys = segment - > phys ;
2017-03-13 18:59:12 +03:00
hw - > next_desc = segment - > phys ;
2016-04-07 08:29:45 +03:00
return & desc - > async_tx ;
error :
xilinx_dma_free_tx_descriptor ( chan , desc ) ;
return NULL ;
}
2016-04-07 08:29:43 +03:00
/**
* xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @ dchan : DMA channel
* @ sgl : scatterlist to transfer to / from
* @ sg_len : number of entries in @ scatterlist
* @ direction : DMA direction
* @ flags : transfer ack flags
* @ context : APP words of the descriptor
*
* Return : Async transaction descriptor on success and NULL on failure
*/
static struct dma_async_tx_descriptor * xilinx_dma_prep_slave_sg (
struct dma_chan * dchan , struct scatterlist * sgl , unsigned int sg_len ,
enum dma_transfer_direction direction , unsigned long flags ,
void * context )
{
struct xilinx_dma_chan * chan = to_xilinx_chan ( dchan ) ;
struct xilinx_dma_tx_descriptor * desc ;
2017-12-07 08:21:04 +03:00
struct xilinx_axidma_tx_segment * segment = NULL ;
2016-04-07 08:29:43 +03:00
u32 * app_w = ( u32 * ) context ;
struct scatterlist * sg ;
size_t copy ;
size_t sg_used ;
unsigned int i ;
if ( ! is_slave_direction ( direction ) )
return NULL ;
/* Allocate a transaction descriptor. */
desc = xilinx_dma_alloc_tx_descriptor ( chan ) ;
if ( ! desc )
return NULL ;
dma_async_tx_descriptor_init ( & desc - > async_tx , & chan - > common ) ;
desc - > async_tx . tx_submit = xilinx_dma_tx_submit ;
/* Build transactions using information in the scatter gather list */
for_each_sg ( sgl , sg , sg_len , i ) {
sg_used = 0 ;
/* Loop until the entire scatterlist entry is used */
while ( sg_used < sg_dma_len ( sg ) ) {
struct xilinx_axidma_desc_hw * hw ;
/* Get a free segment */
segment = xilinx_axidma_alloc_tx_segment ( chan ) ;
if ( ! segment )
goto error ;
/*
* Calculate the maximum number of bytes to transfer ,
* making sure it is less than the hw limit
*/
2018-11-20 18:31:45 +03:00
copy = xilinx_dma_calc_copysize ( chan , sg_dma_len ( sg ) ,
sg_used ) ;
2016-04-07 08:29:43 +03:00
hw = & segment - > hw ;
/* Fill in the descriptor */
2016-06-07 16:51:15 +03:00
xilinx_axidma_buf ( chan , hw , sg_dma_address ( sg ) ,
sg_used , 0 ) ;
2016-04-07 08:29:43 +03:00
hw - > control = copy ;
if ( chan - > direction = = DMA_MEM_TO_DEV ) {
if ( app_w )
memcpy ( hw - > app , app_w , sizeof ( u32 ) *
XILINX_DMA_NUM_APP_WORDS ) ;
}
sg_used + = copy ;
/*
* Insert the segment into the descriptor segments
* list .
*/
list_add_tail ( & segment - > node , & desc - > segments ) ;
}
}
segment = list_first_entry ( & desc - > segments ,
struct xilinx_axidma_tx_segment , node ) ;
desc - > async_tx . phys = segment - > phys ;
/* For the last DMA_MEM_TO_DEV transfer, set EOP */
if ( chan - > direction = = DMA_MEM_TO_DEV ) {
segment - > hw . control | = XILINX_DMA_BD_SOP ;
segment = list_last_entry ( & desc - > segments ,
struct xilinx_axidma_tx_segment ,
node ) ;
segment - > hw . control | = XILINX_DMA_BD_EOP ;
}
return & desc - > async_tx ;
error :
xilinx_dma_free_tx_descriptor ( chan , desc ) ;
2014-04-23 18:53:26 +04:00
return NULL ;
}
2016-05-18 10:47:30 +03:00
/**
* xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
2017-12-07 08:21:05 +03:00
* @ dchan : DMA channel
* @ buf_addr : Physical address of the buffer
* @ buf_len : Total length of the cyclic buffers
* @ period_len : length of individual cyclic buffer
2016-05-18 10:47:30 +03:00
* @ direction : DMA direction
* @ flags : transfer ack flags
2017-12-07 08:21:05 +03:00
*
* Return : Async transaction descriptor on success and NULL on failure
2016-05-18 10:47:30 +03:00
*/
static struct dma_async_tx_descriptor * xilinx_dma_prep_dma_cyclic (
struct dma_chan * dchan , dma_addr_t buf_addr , size_t buf_len ,
size_t period_len , enum dma_transfer_direction direction ,
unsigned long flags )
{
struct xilinx_dma_chan * chan = to_xilinx_chan ( dchan ) ;
struct xilinx_dma_tx_descriptor * desc ;
struct xilinx_axidma_tx_segment * segment , * head_segment , * prev = NULL ;
size_t copy , sg_used ;
unsigned int num_periods ;
int i ;
u32 reg ;
2016-06-13 18:07:33 +03:00
if ( ! period_len )
return NULL ;
2016-05-18 10:47:30 +03:00
num_periods = buf_len / period_len ;
2016-06-13 18:07:33 +03:00
if ( ! num_periods )
return NULL ;
2016-05-18 10:47:30 +03:00
if ( ! is_slave_direction ( direction ) )
return NULL ;
/* Allocate a transaction descriptor. */
desc = xilinx_dma_alloc_tx_descriptor ( chan ) ;
if ( ! desc )
return NULL ;
chan - > direction = direction ;
dma_async_tx_descriptor_init ( & desc - > async_tx , & chan - > common ) ;
desc - > async_tx . tx_submit = xilinx_dma_tx_submit ;
for ( i = 0 ; i < num_periods ; + + i ) {
sg_used = 0 ;
while ( sg_used < period_len ) {
struct xilinx_axidma_desc_hw * hw ;
/* Get a free segment */
segment = xilinx_axidma_alloc_tx_segment ( chan ) ;
if ( ! segment )
goto error ;
/*
* Calculate the maximum number of bytes to transfer ,
* making sure it is less than the hw limit
*/
2018-11-20 18:31:45 +03:00
copy = xilinx_dma_calc_copysize ( chan , period_len ,
sg_used ) ;
2016-05-18 10:47:30 +03:00
hw = & segment - > hw ;
2016-06-07 16:51:15 +03:00
xilinx_axidma_buf ( chan , hw , buf_addr , sg_used ,
period_len * i ) ;
2016-05-18 10:47:30 +03:00
hw - > control = copy ;
if ( prev )
prev - > hw . next_desc = segment - > phys ;
prev = segment ;
sg_used + = copy ;
/*
* Insert the segment into the descriptor segments
* list .
*/
list_add_tail ( & segment - > node , & desc - > segments ) ;
}
}
head_segment = list_first_entry ( & desc - > segments ,
struct xilinx_axidma_tx_segment , node ) ;
desc - > async_tx . phys = head_segment - > phys ;
desc - > cyclic = true ;
reg = dma_ctrl_read ( chan , XILINX_DMA_REG_DMACR ) ;
reg | = XILINX_DMA_CR_CYCLIC_BD_EN_MASK ;
dma_ctrl_write ( chan , XILINX_DMA_REG_DMACR , reg ) ;
2016-07-09 11:39:48 +03:00
segment = list_last_entry ( & desc - > segments ,
struct xilinx_axidma_tx_segment ,
node ) ;
segment - > hw . next_desc = ( u32 ) head_segment - > phys ;
2016-05-18 10:47:30 +03:00
/* For the last DMA_MEM_TO_DEV transfer, set EOP */
if ( direction = = DMA_MEM_TO_DEV ) {
2016-06-09 09:02:12 +03:00
head_segment - > hw . control | = XILINX_DMA_BD_SOP ;
2016-05-18 10:47:30 +03:00
segment - > hw . control | = XILINX_DMA_BD_EOP ;
}
return & desc - > async_tx ;
error :
xilinx_dma_free_tx_descriptor ( chan , desc ) ;
return NULL ;
}
2016-06-24 08:21:23 +03:00
/**
* xilinx_dma_prep_interleaved - prepare a descriptor for a
* DMA_SLAVE transaction
* @ dchan : DMA channel
* @ xt : Interleaved template pointer
* @ flags : transfer ack flags
*
* Return : Async transaction descriptor on success and NULL on failure
*/
static struct dma_async_tx_descriptor *
xilinx_dma_prep_interleaved ( struct dma_chan * dchan ,
struct dma_interleaved_template * xt ,
unsigned long flags )
{
struct xilinx_dma_chan * chan = to_xilinx_chan ( dchan ) ;
struct xilinx_dma_tx_descriptor * desc ;
struct xilinx_axidma_tx_segment * segment ;
struct xilinx_axidma_desc_hw * hw ;
if ( ! is_slave_direction ( xt - > dir ) )
return NULL ;
if ( ! xt - > numf | | ! xt - > sgl [ 0 ] . size )
return NULL ;
if ( xt - > frame_size ! = 1 )
return NULL ;
/* Allocate a transaction descriptor. */
desc = xilinx_dma_alloc_tx_descriptor ( chan ) ;
if ( ! desc )
return NULL ;
chan - > direction = xt - > dir ;
dma_async_tx_descriptor_init ( & desc - > async_tx , & chan - > common ) ;
desc - > async_tx . tx_submit = xilinx_dma_tx_submit ;
/* Get a free segment */
segment = xilinx_axidma_alloc_tx_segment ( chan ) ;
if ( ! segment )
goto error ;
hw = & segment - > hw ;
/* Fill in the descriptor */
if ( xt - > dir ! = DMA_MEM_TO_DEV )
hw - > buf_addr = xt - > dst_start ;
else
hw - > buf_addr = xt - > src_start ;
hw - > mcdma_control = chan - > tdest & XILINX_DMA_BD_TDEST_MASK ;
hw - > vsize_stride = ( xt - > numf < < XILINX_DMA_BD_VSIZE_SHIFT ) &
XILINX_DMA_BD_VSIZE_MASK ;
hw - > vsize_stride | = ( xt - > sgl [ 0 ] . icg + xt - > sgl [ 0 ] . size ) &
XILINX_DMA_BD_STRIDE_MASK ;
hw - > control = xt - > sgl [ 0 ] . size & XILINX_DMA_BD_HSIZE_MASK ;
/*
* Insert the segment into the descriptor segments
* list .
*/
list_add_tail ( & segment - > node , & desc - > segments ) ;
segment = list_first_entry ( & desc - > segments ,
struct xilinx_axidma_tx_segment , node ) ;
desc - > async_tx . phys = segment - > phys ;
/* For the last DMA_MEM_TO_DEV transfer, set EOP */
if ( xt - > dir = = DMA_MEM_TO_DEV ) {
segment - > hw . control | = XILINX_DMA_BD_SOP ;
segment = list_last_entry ( & desc - > segments ,
struct xilinx_axidma_tx_segment ,
node ) ;
segment - > hw . control | = XILINX_DMA_BD_EOP ;
}
return & desc - > async_tx ;
error :
xilinx_dma_free_tx_descriptor ( chan , desc ) ;
return NULL ;
}
2014-04-23 18:53:26 +04:00
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_terminate_all - Halt the channel and free descriptors
2017-12-07 08:21:05 +03:00
* @ dchan : Driver specific DMA Channel pointer
*
* Return : ' 0 ' always .
2014-04-23 18:53:26 +04:00
*/
2016-04-07 08:29:41 +03:00
static int xilinx_dma_terminate_all ( struct dma_chan * dchan )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_chan * chan = to_xilinx_chan ( dchan ) ;
2016-05-18 10:47:30 +03:00
u32 reg ;
2017-03-13 18:59:11 +03:00
int err ;
2016-05-18 10:47:30 +03:00
if ( chan - > cyclic )
xilinx_dma_chan_reset ( chan ) ;
2014-11-17 16:42:38 +03:00
2017-03-13 18:59:11 +03:00
err = chan - > stop_transfer ( chan ) ;
if ( err ) {
dev_err ( chan - > dev , " Cannot stop channel %p: %x \n " ,
chan , dma_ctrl_read ( chan , XILINX_DMA_REG_DMASR ) ) ;
chan - > err = true ;
}
2014-04-23 18:53:26 +04:00
/* Remove and free all of the descriptors in the lists */
2016-04-07 08:29:41 +03:00
xilinx_dma_free_descriptors ( chan ) ;
2017-12-07 08:21:02 +03:00
chan - > idle = true ;
2014-11-17 16:42:38 +03:00
2016-05-18 10:47:30 +03:00
if ( chan - > cyclic ) {
reg = dma_ctrl_read ( chan , XILINX_DMA_REG_DMACR ) ;
reg & = ~ XILINX_DMA_CR_CYCLIC_BD_EN_MASK ;
dma_ctrl_write ( chan , XILINX_DMA_REG_DMACR , reg ) ;
chan - > cyclic = false ;
}
2018-01-03 09:42:09 +03:00
if ( ( chan - > xdev - > dma_config - > dmatype = = XDMA_TYPE_CDMA ) & & chan - > has_sg )
dma_ctrl_clr ( chan , XILINX_DMA_REG_DMACR ,
XILINX_CDMA_CR_SGMODE ) ;
2014-11-17 16:42:38 +03:00
return 0 ;
2014-04-23 18:53:26 +04:00
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_channel_set_config - Configure VDMA channel
2014-04-23 18:53:26 +04:00
* Run - time configuration for Axi VDMA , supports :
* . halt the channel
* . configure interrupt coalescing and inter - packet delay threshold
* . start / stop parking
* . enable genlock
*
* @ dchan : DMA channel
* @ cfg : VDMA device configuration pointer
*
* Return : ' 0 ' on success and failure value on error
*/
int xilinx_vdma_channel_set_config ( struct dma_chan * dchan ,
struct xilinx_vdma_config * cfg )
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_chan * chan = to_xilinx_chan ( dchan ) ;
2014-04-23 18:53:26 +04:00
u32 dmacr ;
if ( cfg - > reset )
2016-04-07 08:29:41 +03:00
return xilinx_dma_chan_reset ( chan ) ;
2014-04-23 18:53:26 +04:00
2016-04-07 08:29:41 +03:00
dmacr = dma_ctrl_read ( chan , XILINX_DMA_REG_DMACR ) ;
2014-04-23 18:53:26 +04:00
chan - > config . frm_dly = cfg - > frm_dly ;
chan - > config . park = cfg - > park ;
/* genlock settings */
chan - > config . gen_lock = cfg - > gen_lock ;
chan - > config . master = cfg - > master ;
if ( cfg - > gen_lock & & chan - > genlock ) {
2016-04-07 08:29:41 +03:00
dmacr | = XILINX_DMA_DMACR_GENLOCK_EN ;
dmacr | = cfg - > master < < XILINX_DMA_DMACR_MASTER_SHIFT ;
2014-04-23 18:53:26 +04:00
}
chan - > config . frm_cnt_en = cfg - > frm_cnt_en ;
2018-06-13 10:34:48 +03:00
chan - > config . vflip_en = cfg - > vflip_en ;
2014-04-23 18:53:26 +04:00
if ( cfg - > park )
chan - > config . park_frm = cfg - > park_frm ;
else
chan - > config . park_frm = - 1 ;
chan - > config . coalesc = cfg - > coalesc ;
chan - > config . delay = cfg - > delay ;
2016-04-07 08:29:41 +03:00
if ( cfg - > coalesc < = XILINX_DMA_DMACR_FRAME_COUNT_MAX ) {
dmacr | = cfg - > coalesc < < XILINX_DMA_DMACR_FRAME_COUNT_SHIFT ;
2014-04-23 18:53:26 +04:00
chan - > config . coalesc = cfg - > coalesc ;
}
2016-04-07 08:29:41 +03:00
if ( cfg - > delay < = XILINX_DMA_DMACR_DELAY_MAX ) {
dmacr | = cfg - > delay < < XILINX_DMA_DMACR_DELAY_SHIFT ;
2014-04-23 18:53:26 +04:00
chan - > config . delay = cfg - > delay ;
}
/* FSync Source selection */
2016-04-07 08:29:41 +03:00
dmacr & = ~ XILINX_DMA_DMACR_FSYNCSRC_MASK ;
dmacr | = cfg - > ext_fsync < < XILINX_DMA_DMACR_FSYNCSRC_SHIFT ;
2014-04-23 18:53:26 +04:00
2016-04-07 08:29:41 +03:00
dma_ctrl_write ( chan , XILINX_DMA_REG_DMACR , dmacr ) ;
2014-04-23 18:53:26 +04:00
return 0 ;
}
EXPORT_SYMBOL ( xilinx_vdma_channel_set_config ) ;
/* -----------------------------------------------------------------------------
* Probe and remove
*/
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_chan_remove - Per Channel remove function
* @ chan : Driver specific DMA channel
2014-04-23 18:53:26 +04:00
*/
2016-04-07 08:29:41 +03:00
static void xilinx_dma_chan_remove ( struct xilinx_dma_chan * chan )
2014-04-23 18:53:26 +04:00
{
/* Disable all interrupts */
2016-04-07 08:29:41 +03:00
dma_ctrl_clr ( chan , XILINX_DMA_REG_DMACR ,
XILINX_DMA_DMAXR_ALL_IRQ_MASK ) ;
2014-04-23 18:53:26 +04:00
if ( chan - > irq > 0 )
free_irq ( chan - > irq , chan ) ;
tasklet_kill ( & chan - > tasklet ) ;
list_del ( & chan - > common . device_node ) ;
}
2016-05-13 10:03:31 +03:00
static int axidma_clk_init ( struct platform_device * pdev , struct clk * * axi_clk ,
struct clk * * tx_clk , struct clk * * rx_clk ,
struct clk * * sg_clk , struct clk * * tmp_clk )
{
int err ;
* tmp_clk = NULL ;
* axi_clk = devm_clk_get ( & pdev - > dev , " s_axi_lite_aclk " ) ;
if ( IS_ERR ( * axi_clk ) ) {
err = PTR_ERR ( * axi_clk ) ;
2017-08-31 14:35:10 +03:00
dev_err ( & pdev - > dev , " failed to get axi_aclk (%d) \n " , err ) ;
2016-05-13 10:03:31 +03:00
return err ;
}
* tx_clk = devm_clk_get ( & pdev - > dev , " m_axi_mm2s_aclk " ) ;
if ( IS_ERR ( * tx_clk ) )
* tx_clk = NULL ;
* rx_clk = devm_clk_get ( & pdev - > dev , " m_axi_s2mm_aclk " ) ;
if ( IS_ERR ( * rx_clk ) )
* rx_clk = NULL ;
* sg_clk = devm_clk_get ( & pdev - > dev , " m_axi_sg_aclk " ) ;
if ( IS_ERR ( * sg_clk ) )
* sg_clk = NULL ;
err = clk_prepare_enable ( * axi_clk ) ;
if ( err ) {
2017-08-31 14:35:10 +03:00
dev_err ( & pdev - > dev , " failed to enable axi_clk (%d) \n " , err ) ;
2016-05-13 10:03:31 +03:00
return err ;
}
err = clk_prepare_enable ( * tx_clk ) ;
if ( err ) {
2017-08-31 14:35:10 +03:00
dev_err ( & pdev - > dev , " failed to enable tx_clk (%d) \n " , err ) ;
2016-05-13 10:03:31 +03:00
goto err_disable_axiclk ;
}
err = clk_prepare_enable ( * rx_clk ) ;
if ( err ) {
2017-08-31 14:35:10 +03:00
dev_err ( & pdev - > dev , " failed to enable rx_clk (%d) \n " , err ) ;
2016-05-13 10:03:31 +03:00
goto err_disable_txclk ;
}
err = clk_prepare_enable ( * sg_clk ) ;
if ( err ) {
2017-08-31 14:35:10 +03:00
dev_err ( & pdev - > dev , " failed to enable sg_clk (%d) \n " , err ) ;
2016-05-13 10:03:31 +03:00
goto err_disable_rxclk ;
}
return 0 ;
err_disable_rxclk :
clk_disable_unprepare ( * rx_clk ) ;
err_disable_txclk :
clk_disable_unprepare ( * tx_clk ) ;
err_disable_axiclk :
clk_disable_unprepare ( * axi_clk ) ;
return err ;
}
static int axicdma_clk_init ( struct platform_device * pdev , struct clk * * axi_clk ,
struct clk * * dev_clk , struct clk * * tmp_clk ,
struct clk * * tmp1_clk , struct clk * * tmp2_clk )
{
int err ;
* tmp_clk = NULL ;
* tmp1_clk = NULL ;
* tmp2_clk = NULL ;
* axi_clk = devm_clk_get ( & pdev - > dev , " s_axi_lite_aclk " ) ;
if ( IS_ERR ( * axi_clk ) ) {
err = PTR_ERR ( * axi_clk ) ;
2017-08-31 14:35:10 +03:00
dev_err ( & pdev - > dev , " failed to get axi_clk (%d) \n " , err ) ;
2016-05-13 10:03:31 +03:00
return err ;
}
* dev_clk = devm_clk_get ( & pdev - > dev , " m_axi_aclk " ) ;
if ( IS_ERR ( * dev_clk ) ) {
err = PTR_ERR ( * dev_clk ) ;
2017-08-31 14:35:10 +03:00
dev_err ( & pdev - > dev , " failed to get dev_clk (%d) \n " , err ) ;
2016-05-13 10:03:31 +03:00
return err ;
}
err = clk_prepare_enable ( * axi_clk ) ;
if ( err ) {
2017-08-31 14:35:10 +03:00
dev_err ( & pdev - > dev , " failed to enable axi_clk (%d) \n " , err ) ;
2016-05-13 10:03:31 +03:00
return err ;
}
err = clk_prepare_enable ( * dev_clk ) ;
if ( err ) {
2017-08-31 14:35:10 +03:00
dev_err ( & pdev - > dev , " failed to enable dev_clk (%d) \n " , err ) ;
2016-05-13 10:03:31 +03:00
goto err_disable_axiclk ;
}
return 0 ;
err_disable_axiclk :
clk_disable_unprepare ( * axi_clk ) ;
return err ;
}
static int axivdma_clk_init ( struct platform_device * pdev , struct clk * * axi_clk ,
struct clk * * tx_clk , struct clk * * txs_clk ,
struct clk * * rx_clk , struct clk * * rxs_clk )
{
int err ;
* axi_clk = devm_clk_get ( & pdev - > dev , " s_axi_lite_aclk " ) ;
if ( IS_ERR ( * axi_clk ) ) {
err = PTR_ERR ( * axi_clk ) ;
2017-08-31 14:35:10 +03:00
dev_err ( & pdev - > dev , " failed to get axi_aclk (%d) \n " , err ) ;
2016-05-13 10:03:31 +03:00
return err ;
}
* tx_clk = devm_clk_get ( & pdev - > dev , " m_axi_mm2s_aclk " ) ;
if ( IS_ERR ( * tx_clk ) )
* tx_clk = NULL ;
* txs_clk = devm_clk_get ( & pdev - > dev , " m_axis_mm2s_aclk " ) ;
if ( IS_ERR ( * txs_clk ) )
* txs_clk = NULL ;
* rx_clk = devm_clk_get ( & pdev - > dev , " m_axi_s2mm_aclk " ) ;
if ( IS_ERR ( * rx_clk ) )
* rx_clk = NULL ;
* rxs_clk = devm_clk_get ( & pdev - > dev , " s_axis_s2mm_aclk " ) ;
if ( IS_ERR ( * rxs_clk ) )
* rxs_clk = NULL ;
err = clk_prepare_enable ( * axi_clk ) ;
if ( err ) {
2017-08-31 14:35:10 +03:00
dev_err ( & pdev - > dev , " failed to enable axi_clk (%d) \n " , err ) ;
2016-05-13 10:03:31 +03:00
return err ;
}
err = clk_prepare_enable ( * tx_clk ) ;
if ( err ) {
2017-08-31 14:35:10 +03:00
dev_err ( & pdev - > dev , " failed to enable tx_clk (%d) \n " , err ) ;
2016-05-13 10:03:31 +03:00
goto err_disable_axiclk ;
}
err = clk_prepare_enable ( * txs_clk ) ;
if ( err ) {
2017-08-31 14:35:10 +03:00
dev_err ( & pdev - > dev , " failed to enable txs_clk (%d) \n " , err ) ;
2016-05-13 10:03:31 +03:00
goto err_disable_txclk ;
}
err = clk_prepare_enable ( * rx_clk ) ;
if ( err ) {
2017-08-31 14:35:10 +03:00
dev_err ( & pdev - > dev , " failed to enable rx_clk (%d) \n " , err ) ;
2016-05-13 10:03:31 +03:00
goto err_disable_txsclk ;
}
err = clk_prepare_enable ( * rxs_clk ) ;
if ( err ) {
2017-08-31 14:35:10 +03:00
dev_err ( & pdev - > dev , " failed to enable rxs_clk (%d) \n " , err ) ;
2016-05-13 10:03:31 +03:00
goto err_disable_rxclk ;
}
return 0 ;
err_disable_rxclk :
clk_disable_unprepare ( * rx_clk ) ;
err_disable_txsclk :
clk_disable_unprepare ( * txs_clk ) ;
err_disable_txclk :
clk_disable_unprepare ( * tx_clk ) ;
err_disable_axiclk :
clk_disable_unprepare ( * axi_clk ) ;
return err ;
}
static void xdma_disable_allclks ( struct xilinx_dma_device * xdev )
{
clk_disable_unprepare ( xdev - > rxs_clk ) ;
clk_disable_unprepare ( xdev - > rx_clk ) ;
clk_disable_unprepare ( xdev - > txs_clk ) ;
clk_disable_unprepare ( xdev - > tx_clk ) ;
clk_disable_unprepare ( xdev - > axi_clk ) ;
}
2014-04-23 18:53:26 +04:00
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_chan_probe - Per Channel Probing
2014-04-23 18:53:26 +04:00
* It get channel features from the device tree entry and
* initialize special channel handling routines
*
* @ xdev : Driver specific device structure
* @ node : Device node
2017-12-07 08:21:05 +03:00
* @ chan_id : DMA Channel id
2014-04-23 18:53:26 +04:00
*
* Return : ' 0 ' on success and failure value on error
*/
2016-04-07 08:29:41 +03:00
static int xilinx_dma_chan_probe ( struct xilinx_dma_device * xdev ,
2016-06-24 08:21:23 +03:00
struct device_node * node , int chan_id )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_chan * chan ;
2014-04-23 18:53:26 +04:00
bool has_dre = false ;
u32 value , width ;
int err ;
/* Allocate and initialize the channel structure */
chan = devm_kzalloc ( xdev - > dev , sizeof ( * chan ) , GFP_KERNEL ) ;
if ( ! chan )
return - ENOMEM ;
chan - > dev = xdev - > dev ;
chan - > xdev = xdev ;
chan - > has_sg = xdev - > has_sg ;
2016-02-26 17:03:51 +03:00
chan - > desc_pendingcount = 0x0 ;
2016-04-06 08:08:08 +03:00
chan - > ext_addr = xdev - > ext_addr ;
2017-12-18 08:18:05 +03:00
/* This variable ensures that descriptors are not
* Submitted when dma engine is in progress . This variable is
* Added to avoid polling for a bit in the status register to
2017-12-07 08:21:02 +03:00
* Know dma state in the driver hot path .
*/
chan - > idle = true ;
2014-04-23 18:53:26 +04:00
spin_lock_init ( & chan - > lock ) ;
INIT_LIST_HEAD ( & chan - > pending_list ) ;
INIT_LIST_HEAD ( & chan - > done_list ) ;
2016-02-26 17:03:51 +03:00
INIT_LIST_HEAD ( & chan - > active_list ) ;
2017-12-07 08:21:04 +03:00
INIT_LIST_HEAD ( & chan - > free_seg_list ) ;
2014-04-23 18:53:26 +04:00
/* Retrieve the channel properties from the device tree */
has_dre = of_property_read_bool ( node , " xlnx,include-dre " ) ;
chan - > genlock = of_property_read_bool ( node , " xlnx,genlock-mode " ) ;
err = of_property_read_u32 ( node , " xlnx,datawidth " , & value ) ;
if ( err ) {
dev_err ( xdev - > dev , " missing xlnx,datawidth property \n " ) ;
return err ;
}
width = value > > 3 ; /* Convert bits to bytes */
/* If data width is greater than 8 bytes, DRE is not in hw */
if ( width > 8 )
has_dre = false ;
if ( ! has_dre )
xdev - > common . copy_align = fls ( width - 1 ) ;
2016-06-24 08:21:26 +03:00
if ( of_device_is_compatible ( node , " xlnx,axi-vdma-mm2s-channel " ) | |
of_device_is_compatible ( node , " xlnx,axi-dma-mm2s-channel " ) | |
of_device_is_compatible ( node , " xlnx,axi-cdma-channel " ) ) {
2014-04-23 18:53:26 +04:00
chan - > direction = DMA_MEM_TO_DEV ;
2016-06-24 08:21:23 +03:00
chan - > id = chan_id ;
chan - > tdest = chan_id ;
2014-04-23 18:53:26 +04:00
2016-04-07 08:29:41 +03:00
chan - > ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET ;
2016-05-13 10:03:29 +03:00
if ( xdev - > dma_config - > dmatype = = XDMA_TYPE_VDMA ) {
2016-04-07 08:29:43 +03:00
chan - > desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET ;
2017-12-07 08:21:03 +03:00
chan - > config . park = 1 ;
2014-04-23 18:53:26 +04:00
2016-04-07 08:29:43 +03:00
if ( xdev - > flush_on_fsync = = XILINX_DMA_FLUSH_BOTH | |
xdev - > flush_on_fsync = = XILINX_DMA_FLUSH_MM2S )
chan - > flush_on_fsync = true ;
}
2014-04-23 18:53:26 +04:00
} else if ( of_device_is_compatible ( node ,
2016-06-24 08:21:26 +03:00
" xlnx,axi-vdma-s2mm-channel " ) | |
of_device_is_compatible ( node ,
" xlnx,axi-dma-s2mm-channel " ) ) {
2014-04-23 18:53:26 +04:00
chan - > direction = DMA_DEV_TO_MEM ;
2016-06-24 08:21:23 +03:00
chan - > id = chan_id ;
chan - > tdest = chan_id - xdev - > nr_channels ;
2018-06-13 10:34:48 +03:00
chan - > has_vflip = of_property_read_bool ( node ,
" xlnx,enable-vert-flip " ) ;
if ( chan - > has_vflip ) {
chan - > config . vflip_en = dma_read ( chan ,
XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP ) &
XILINX_VDMA_ENABLE_VERTICAL_FLIP ;
}
2014-04-23 18:53:26 +04:00
2016-04-07 08:29:41 +03:00
chan - > ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET ;
2016-05-13 10:03:29 +03:00
if ( xdev - > dma_config - > dmatype = = XDMA_TYPE_VDMA ) {
2016-04-07 08:29:43 +03:00
chan - > desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET ;
2017-12-07 08:21:03 +03:00
chan - > config . park = 1 ;
2014-04-23 18:53:26 +04:00
2016-04-07 08:29:43 +03:00
if ( xdev - > flush_on_fsync = = XILINX_DMA_FLUSH_BOTH | |
xdev - > flush_on_fsync = = XILINX_DMA_FLUSH_S2MM )
chan - > flush_on_fsync = true ;
}
2014-04-23 18:53:26 +04:00
} else {
dev_err ( xdev - > dev , " Invalid channel compatible node \n " ) ;
return - EINVAL ;
}
/* Request the interrupt */
chan - > irq = irq_of_parse_and_map ( node , 0 ) ;
2016-04-07 08:29:41 +03:00
err = request_irq ( chan - > irq , xilinx_dma_irq_handler , IRQF_SHARED ,
" xilinx-dma-controller " , chan ) ;
2014-04-23 18:53:26 +04:00
if ( err ) {
dev_err ( xdev - > dev , " unable to request IRQ %d \n " , chan - > irq ) ;
return err ;
}
2017-03-13 18:59:11 +03:00
if ( xdev - > dma_config - > dmatype = = XDMA_TYPE_AXIDMA ) {
2016-04-07 08:29:43 +03:00
chan - > start_transfer = xilinx_dma_start_transfer ;
2017-03-13 18:59:11 +03:00
chan - > stop_transfer = xilinx_dma_stop_transfer ;
} else if ( xdev - > dma_config - > dmatype = = XDMA_TYPE_CDMA ) {
2016-04-07 08:29:45 +03:00
chan - > start_transfer = xilinx_cdma_start_transfer ;
2017-03-13 18:59:11 +03:00
chan - > stop_transfer = xilinx_cdma_stop_transfer ;
} else {
2016-04-07 08:29:43 +03:00
chan - > start_transfer = xilinx_vdma_start_transfer ;
2017-03-13 18:59:11 +03:00
chan - > stop_transfer = xilinx_dma_stop_transfer ;
}
2016-04-07 08:29:43 +03:00
2014-04-23 18:53:26 +04:00
/* Initialize the tasklet */
2016-04-07 08:29:41 +03:00
tasklet_init ( & chan - > tasklet , xilinx_dma_do_tasklet ,
2014-04-23 18:53:26 +04:00
( unsigned long ) chan ) ;
/*
* Initialize the DMA channel and add it to the DMA engine channels
* list .
*/
chan - > common . device = & xdev - > common ;
list_add_tail ( & chan - > common . device_node , & xdev - > common . channels ) ;
xdev - > chan [ chan - > id ] = chan ;
/* Reset the channel */
2016-04-07 08:29:41 +03:00
err = xilinx_dma_chan_reset ( chan ) ;
2014-04-23 18:53:26 +04:00
if ( err < 0 ) {
dev_err ( xdev - > dev , " Reset channel failed \n " ) ;
return err ;
}
return 0 ;
}
2016-06-24 08:21:23 +03:00
/**
* xilinx_dma_child_probe - Per child node probe
* It get number of dma - channels per child node from
* device - tree and initializes all the channels .
*
* @ xdev : Driver specific device structure
* @ node : Device node
*
* Return : 0 always .
*/
static int xilinx_dma_child_probe ( struct xilinx_dma_device * xdev ,
2017-12-07 08:21:06 +03:00
struct device_node * node )
{
2016-06-24 08:21:23 +03:00
int ret , i , nr_channels = 1 ;
ret = of_property_read_u32 ( node , " dma-channels " , & nr_channels ) ;
if ( ( ret < 0 ) & & xdev - > mcdma )
dev_warn ( xdev - > dev , " missing dma-channels property \n " ) ;
for ( i = 0 ; i < nr_channels ; i + + )
xilinx_dma_chan_probe ( xdev , node , xdev - > chan_id + + ) ;
xdev - > nr_channels + = nr_channels ;
return 0 ;
}
2014-04-23 18:53:26 +04:00
/**
* of_dma_xilinx_xlate - Translation function
* @ dma_spec : Pointer to DMA specifier as found in the device tree
* @ ofdma : Pointer to DMA controller data
*
* Return : DMA channel pointer on success and NULL on error
*/
static struct dma_chan * of_dma_xilinx_xlate ( struct of_phandle_args * dma_spec ,
struct of_dma * ofdma )
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_device * xdev = ofdma - > of_dma_data ;
2014-04-23 18:53:26 +04:00
int chan_id = dma_spec - > args [ 0 ] ;
2016-06-24 08:21:23 +03:00
if ( chan_id > = xdev - > nr_channels | | ! xdev - > chan [ chan_id ] )
2014-04-23 18:53:26 +04:00
return NULL ;
return dma_get_slave_channel ( & xdev - > chan [ chan_id ] - > common ) ;
}
2016-05-13 10:03:29 +03:00
static const struct xilinx_dma_config axidma_config = {
. dmatype = XDMA_TYPE_AXIDMA ,
2016-05-13 10:03:31 +03:00
. clk_init = axidma_clk_init ,
2016-05-13 10:03:29 +03:00
} ;
static const struct xilinx_dma_config axicdma_config = {
. dmatype = XDMA_TYPE_CDMA ,
2016-05-13 10:03:31 +03:00
. clk_init = axicdma_clk_init ,
2016-05-13 10:03:29 +03:00
} ;
static const struct xilinx_dma_config axivdma_config = {
. dmatype = XDMA_TYPE_VDMA ,
2016-05-13 10:03:31 +03:00
. clk_init = axivdma_clk_init ,
2016-05-13 10:03:29 +03:00
} ;
2016-04-07 08:29:43 +03:00
static const struct of_device_id xilinx_dma_of_ids [ ] = {
2016-05-13 10:03:29 +03:00
{ . compatible = " xlnx,axi-dma-1.00.a " , . data = & axidma_config } ,
{ . compatible = " xlnx,axi-cdma-1.00.a " , . data = & axicdma_config } ,
{ . compatible = " xlnx,axi-vdma-1.00.a " , . data = & axivdma_config } ,
2016-04-07 08:29:43 +03:00
{ }
} ;
MODULE_DEVICE_TABLE ( of , xilinx_dma_of_ids ) ;
2014-04-23 18:53:26 +04:00
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_probe - Driver probe function
2014-04-23 18:53:26 +04:00
* @ pdev : Pointer to the platform_device structure
*
* Return : ' 0 ' on success and failure value on error
*/
2016-04-07 08:29:41 +03:00
static int xilinx_dma_probe ( struct platform_device * pdev )
2014-04-23 18:53:26 +04:00
{
2016-05-13 10:03:31 +03:00
int ( * clk_init ) ( struct platform_device * , struct clk * * , struct clk * * ,
struct clk * * , struct clk * * , struct clk * * )
= axivdma_clk_init ;
2014-04-23 18:53:26 +04:00
struct device_node * node = pdev - > dev . of_node ;
2016-04-07 08:29:41 +03:00
struct xilinx_dma_device * xdev ;
2016-05-13 10:03:29 +03:00
struct device_node * child , * np = pdev - > dev . of_node ;
2014-04-23 18:53:26 +04:00
struct resource * io ;
2016-04-06 08:08:08 +03:00
u32 num_frames , addr_width ;
2014-04-23 18:53:26 +04:00
int i , err ;
/* Allocate and initialize the DMA engine structure */
xdev = devm_kzalloc ( & pdev - > dev , sizeof ( * xdev ) , GFP_KERNEL ) ;
if ( ! xdev )
return - ENOMEM ;
xdev - > dev = & pdev - > dev ;
2016-05-13 10:03:29 +03:00
if ( np ) {
const struct of_device_id * match ;
match = of_match_node ( xilinx_dma_of_ids , np ) ;
2016-05-13 10:03:31 +03:00
if ( match & & match - > data ) {
2016-05-13 10:03:29 +03:00
xdev - > dma_config = match - > data ;
2016-05-13 10:03:31 +03:00
clk_init = xdev - > dma_config - > clk_init ;
}
2016-05-13 10:03:29 +03:00
}
2014-04-23 18:53:26 +04:00
2016-05-13 10:03:31 +03:00
err = clk_init ( pdev , & xdev - > axi_clk , & xdev - > tx_clk , & xdev - > txs_clk ,
& xdev - > rx_clk , & xdev - > rxs_clk ) ;
if ( err )
return err ;
2014-04-23 18:53:26 +04:00
/* Request and map I/O memory */
io = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
xdev - > regs = devm_ioremap_resource ( & pdev - > dev , io ) ;
if ( IS_ERR ( xdev - > regs ) )
return PTR_ERR ( xdev - > regs ) ;
/* Retrieve the DMA engine properties from the device tree */
xdev - > has_sg = of_property_read_bool ( node , " xlnx,include-sg " ) ;
2018-11-20 18:31:45 +03:00
xdev - > max_buffer_len = XILINX_DMA_MAX_TRANS_LEN ;
2016-06-24 08:21:23 +03:00
if ( xdev - > dma_config - > dmatype = = XDMA_TYPE_AXIDMA )
xdev - > mcdma = of_property_read_bool ( node , " xlnx,mcdma " ) ;
2014-04-23 18:53:26 +04:00
2016-05-13 10:03:29 +03:00
if ( xdev - > dma_config - > dmatype = = XDMA_TYPE_VDMA ) {
2016-04-07 08:29:43 +03:00
err = of_property_read_u32 ( node , " xlnx,num-fstores " ,
& num_frames ) ;
if ( err < 0 ) {
dev_err ( xdev - > dev ,
" missing xlnx,num-fstores property \n " ) ;
return err ;
}
2014-04-23 18:53:26 +04:00
2016-04-07 08:29:43 +03:00
err = of_property_read_u32 ( node , " xlnx,flush-fsync " ,
& xdev - > flush_on_fsync ) ;
if ( err < 0 )
dev_warn ( xdev - > dev ,
" missing xlnx,flush-fsync property \n " ) ;
2014-04-23 18:53:26 +04:00
}
2016-04-06 08:08:08 +03:00
err = of_property_read_u32 ( node , " xlnx,addrwidth " , & addr_width ) ;
2014-04-23 18:53:26 +04:00
if ( err < 0 )
2016-04-06 08:08:08 +03:00
dev_warn ( xdev - > dev , " missing xlnx,addrwidth property \n " ) ;
if ( addr_width > 32 )
xdev - > ext_addr = true ;
else
xdev - > ext_addr = false ;
/* Set the dma mask bits */
dma_set_mask ( xdev - > dev , DMA_BIT_MASK ( addr_width ) ) ;
2014-04-23 18:53:26 +04:00
/* Initialize the DMA engine */
xdev - > common . dev = & pdev - > dev ;
INIT_LIST_HEAD ( & xdev - > common . channels ) ;
2016-05-13 10:03:29 +03:00
if ( ! ( xdev - > dma_config - > dmatype = = XDMA_TYPE_CDMA ) ) {
2016-04-07 08:29:45 +03:00
dma_cap_set ( DMA_SLAVE , xdev - > common . cap_mask ) ;
dma_cap_set ( DMA_PRIVATE , xdev - > common . cap_mask ) ;
}
2014-04-23 18:53:26 +04:00
xdev - > common . device_alloc_chan_resources =
2016-04-07 08:29:41 +03:00
xilinx_dma_alloc_chan_resources ;
2014-04-23 18:53:26 +04:00
xdev - > common . device_free_chan_resources =
2016-04-07 08:29:41 +03:00
xilinx_dma_free_chan_resources ;
xdev - > common . device_terminate_all = xilinx_dma_terminate_all ;
xdev - > common . device_tx_status = xilinx_dma_tx_status ;
xdev - > common . device_issue_pending = xilinx_dma_issue_pending ;
2016-05-13 10:03:29 +03:00
if ( xdev - > dma_config - > dmatype = = XDMA_TYPE_AXIDMA ) {
2016-05-18 10:47:30 +03:00
dma_cap_set ( DMA_CYCLIC , xdev - > common . cap_mask ) ;
2016-04-07 08:29:43 +03:00
xdev - > common . device_prep_slave_sg = xilinx_dma_prep_slave_sg ;
2016-05-18 10:47:30 +03:00
xdev - > common . device_prep_dma_cyclic =
xilinx_dma_prep_dma_cyclic ;
2016-06-24 08:21:23 +03:00
xdev - > common . device_prep_interleaved_dma =
xilinx_dma_prep_interleaved ;
2016-04-07 08:29:43 +03:00
/* Residue calculation is supported by only AXI DMA */
xdev - > common . residue_granularity =
DMA_RESIDUE_GRANULARITY_SEGMENT ;
2016-05-13 10:03:29 +03:00
} else if ( xdev - > dma_config - > dmatype = = XDMA_TYPE_CDMA ) {
2016-04-07 08:29:45 +03:00
dma_cap_set ( DMA_MEMCPY , xdev - > common . cap_mask ) ;
xdev - > common . device_prep_dma_memcpy = xilinx_cdma_prep_memcpy ;
2016-04-07 08:29:43 +03:00
} else {
xdev - > common . device_prep_interleaved_dma =
2014-04-23 18:53:26 +04:00
xilinx_vdma_dma_prep_interleaved ;
2016-04-07 08:29:43 +03:00
}
2014-04-23 18:53:26 +04:00
platform_set_drvdata ( pdev , xdev ) ;
/* Initialize the channels */
for_each_child_of_node ( node , child ) {
2016-06-24 08:21:23 +03:00
err = xilinx_dma_child_probe ( xdev , child ) ;
2014-04-23 18:53:26 +04:00
if ( err < 0 )
2016-05-13 10:03:31 +03:00
goto disable_clks ;
2014-04-23 18:53:26 +04:00
}
2016-05-13 10:03:29 +03:00
if ( xdev - > dma_config - > dmatype = = XDMA_TYPE_VDMA ) {
2016-06-24 08:21:23 +03:00
for ( i = 0 ; i < xdev - > nr_channels ; i + + )
2016-04-07 08:29:43 +03:00
if ( xdev - > chan [ i ] )
xdev - > chan [ i ] - > num_frms = num_frames ;
}
2014-04-23 18:53:26 +04:00
/* Register the DMA engine with the core */
dma_async_device_register ( & xdev - > common ) ;
err = of_dma_controller_register ( node , of_dma_xilinx_xlate ,
xdev ) ;
if ( err < 0 ) {
dev_err ( & pdev - > dev , " Unable to register DMA to DT \n " ) ;
dma_async_device_unregister ( & xdev - > common ) ;
goto error ;
}
2017-12-07 08:21:07 +03:00
if ( xdev - > dma_config - > dmatype = = XDMA_TYPE_AXIDMA )
dev_info ( & pdev - > dev , " Xilinx AXI DMA Engine Driver Probed!! \n " ) ;
else if ( xdev - > dma_config - > dmatype = = XDMA_TYPE_CDMA )
dev_info ( & pdev - > dev , " Xilinx AXI CDMA Engine Driver Probed!! \n " ) ;
else
dev_info ( & pdev - > dev , " Xilinx AXI VDMA Engine Driver Probed!! \n " ) ;
2014-04-23 18:53:26 +04:00
return 0 ;
2016-05-13 10:03:31 +03:00
disable_clks :
xdma_disable_allclks ( xdev ) ;
2014-04-23 18:53:26 +04:00
error :
2016-06-24 08:21:23 +03:00
for ( i = 0 ; i < xdev - > nr_channels ; i + + )
2014-04-23 18:53:26 +04:00
if ( xdev - > chan [ i ] )
2016-04-07 08:29:41 +03:00
xilinx_dma_chan_remove ( xdev - > chan [ i ] ) ;
2014-04-23 18:53:26 +04:00
return err ;
}
/**
2016-04-07 08:29:41 +03:00
* xilinx_dma_remove - Driver remove function
2014-04-23 18:53:26 +04:00
* @ pdev : Pointer to the platform_device structure
*
* Return : Always ' 0 '
*/
2016-04-07 08:29:41 +03:00
static int xilinx_dma_remove ( struct platform_device * pdev )
2014-04-23 18:53:26 +04:00
{
2016-04-07 08:29:41 +03:00
struct xilinx_dma_device * xdev = platform_get_drvdata ( pdev ) ;
2014-04-23 18:53:26 +04:00
int i ;
of_dma_controller_free ( pdev - > dev . of_node ) ;
dma_async_device_unregister ( & xdev - > common ) ;
2016-06-24 08:21:23 +03:00
for ( i = 0 ; i < xdev - > nr_channels ; i + + )
2014-04-23 18:53:26 +04:00
if ( xdev - > chan [ i ] )
2016-04-07 08:29:41 +03:00
xilinx_dma_chan_remove ( xdev - > chan [ i ] ) ;
2014-04-23 18:53:26 +04:00
2016-05-13 10:03:31 +03:00
xdma_disable_allclks ( xdev ) ;
2014-04-23 18:53:26 +04:00
return 0 ;
}
static struct platform_driver xilinx_vdma_driver = {
. driver = {
. name = " xilinx-vdma " ,
2016-04-07 08:29:41 +03:00
. of_match_table = xilinx_dma_of_ids ,
2014-04-23 18:53:26 +04:00
} ,
2016-04-07 08:29:41 +03:00
. probe = xilinx_dma_probe ,
. remove = xilinx_dma_remove ,
2014-04-23 18:53:26 +04:00
} ;
module_platform_driver ( xilinx_vdma_driver ) ;
MODULE_AUTHOR ( " Xilinx, Inc. " ) ;
MODULE_DESCRIPTION ( " Xilinx VDMA driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;