2019-05-28 19:57:06 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2015-08-20 18:39:13 +03:00
/*
* Driver for the Analog Devices AXI - DMAC core
*
2019-06-06 13:53:44 +03:00
* Copyright 2013 - 2019 Analog Devices Inc .
2015-08-20 18:39:13 +03:00
* Author : Lars - Peter Clausen < lars @ metafoo . de >
*/
2020-08-25 18:19:50 +03:00
# include <linux/bitfield.h>
2015-08-20 18:39:13 +03:00
# include <linux/clk.h>
# include <linux/device.h>
# include <linux/dma-mapping.h>
# include <linux/dmaengine.h>
# include <linux/err.h>
# include <linux/interrupt.h>
# include <linux/io.h>
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/of.h>
# include <linux/of_dma.h>
# include <linux/platform_device.h>
2019-06-06 13:45:50 +03:00
# include <linux/regmap.h>
2015-08-20 18:39:13 +03:00
# include <linux/slab.h>
2019-05-27 09:55:17 +03:00
# include <linux/fpga/adi-axi-common.h>
2015-08-20 18:39:13 +03:00
# include <dt-bindings/dma/axi-dmac.h>
# include "dmaengine.h"
# include "virt-dma.h"
/*
* The AXI - DMAC is a soft IP core that is used in FPGA designs . The core has
* various instantiation parameters which decided the exact feature set support
* by the core .
*
* Each channel of the core has a source interface and a destination interface .
* The number of channels and the type of the channel interfaces is selected at
* configuration time . A interface can either be a connected to a central memory
* interconnect , which allows access to system memory , or it can be connected to
* a dedicated bus which is directly connected to a data port on a peripheral .
* Given that those are configuration options of the core that are selected when
* it is instantiated this means that they can not be changed by software at
* runtime . By extension this means that each channel is uni - directional . It can
* either be device to memory or memory to device , but not both . Also since the
* device side is a dedicated data bus only connected to a single peripheral
* there is no address than can or needs to be configured for the device side .
*/
2020-08-25 18:19:50 +03:00
# define AXI_DMAC_REG_INTERFACE_DESC 0x10
# define AXI_DMAC_DMA_SRC_TYPE_MSK GENMASK(13, 12)
# define AXI_DMAC_DMA_SRC_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_TYPE_MSK, x)
# define AXI_DMAC_DMA_SRC_WIDTH_MSK GENMASK(11, 8)
# define AXI_DMAC_DMA_SRC_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_WIDTH_MSK, x)
# define AXI_DMAC_DMA_DST_TYPE_MSK GENMASK(5, 4)
# define AXI_DMAC_DMA_DST_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_TYPE_MSK, x)
# define AXI_DMAC_DMA_DST_WIDTH_MSK GENMASK(3, 0)
# define AXI_DMAC_DMA_DST_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_WIDTH_MSK, x)
2015-08-20 18:39:13 +03:00
# define AXI_DMAC_REG_IRQ_MASK 0x80
# define AXI_DMAC_REG_IRQ_PENDING 0x84
# define AXI_DMAC_REG_IRQ_SOURCE 0x88
# define AXI_DMAC_REG_CTRL 0x400
# define AXI_DMAC_REG_TRANSFER_ID 0x404
# define AXI_DMAC_REG_START_TRANSFER 0x408
# define AXI_DMAC_REG_FLAGS 0x40c
# define AXI_DMAC_REG_DEST_ADDRESS 0x410
# define AXI_DMAC_REG_SRC_ADDRESS 0x414
# define AXI_DMAC_REG_X_LENGTH 0x418
# define AXI_DMAC_REG_Y_LENGTH 0x41c
# define AXI_DMAC_REG_DEST_STRIDE 0x420
# define AXI_DMAC_REG_SRC_STRIDE 0x424
# define AXI_DMAC_REG_TRANSFER_DONE 0x428
# define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c
# define AXI_DMAC_REG_STATUS 0x430
# define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434
# define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438
2019-06-06 13:45:48 +03:00
# define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c
# define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450
2015-08-20 18:39:13 +03:00
# define AXI_DMAC_CTRL_ENABLE BIT(0)
# define AXI_DMAC_CTRL_PAUSE BIT(1)
# define AXI_DMAC_IRQ_SOT BIT(0)
# define AXI_DMAC_IRQ_EOT BIT(1)
# define AXI_DMAC_FLAG_CYCLIC BIT(0)
2019-05-16 12:44:30 +03:00
# define AXI_DMAC_FLAG_LAST BIT(1)
2019-06-06 13:45:48 +03:00
# define AXI_DMAC_FLAG_PARTIAL_REPORT BIT(2)
# define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31)
2015-08-20 18:39:13 +03:00
2017-09-05 11:16:38 +03:00
/* The maximum ID allocated by the hardware is 31 */
# define AXI_DMAC_SG_UNUSED 32U
2015-08-20 18:39:13 +03:00
struct axi_dmac_sg {
dma_addr_t src_addr ;
dma_addr_t dest_addr ;
unsigned int x_len ;
unsigned int y_len ;
unsigned int dest_stride ;
unsigned int src_stride ;
unsigned int id ;
2019-06-06 13:45:48 +03:00
unsigned int partial_len ;
2017-09-05 11:16:38 +03:00
bool schedule_when_free ;
2015-08-20 18:39:13 +03:00
} ;
struct axi_dmac_desc {
struct virt_dma_desc vdesc ;
bool cyclic ;
2019-06-06 13:45:49 +03:00
bool have_partial_xfer ;
2015-08-20 18:39:13 +03:00
unsigned int num_submitted ;
unsigned int num_completed ;
unsigned int num_sgs ;
struct axi_dmac_sg sg [ ] ;
} ;
struct axi_dmac_chan {
struct virt_dma_chan vchan ;
struct axi_dmac_desc * next_desc ;
struct list_head active_descs ;
enum dma_transfer_direction direction ;
unsigned int src_width ;
unsigned int dest_width ;
unsigned int src_type ;
unsigned int dest_type ;
unsigned int max_length ;
2019-05-27 09:55:17 +03:00
unsigned int address_align_mask ;
unsigned int length_align_mask ;
2015-08-20 18:39:13 +03:00
2019-06-06 13:45:48 +03:00
bool hw_partial_xfer ;
2015-08-20 18:39:13 +03:00
bool hw_cyclic ;
bool hw_2d ;
} ;
struct axi_dmac {
void __iomem * base ;
int irq ;
struct clk * clk ;
struct dma_device dma_dev ;
struct axi_dmac_chan chan ;
} ;
static struct axi_dmac * chan_to_axi_dmac ( struct axi_dmac_chan * chan )
{
return container_of ( chan - > vchan . chan . device , struct axi_dmac ,
dma_dev ) ;
}
static struct axi_dmac_chan * to_axi_dmac_chan ( struct dma_chan * c )
{
return container_of ( c , struct axi_dmac_chan , vchan . chan ) ;
}
static struct axi_dmac_desc * to_axi_dmac_desc ( struct virt_dma_desc * vdesc )
{
return container_of ( vdesc , struct axi_dmac_desc , vdesc ) ;
}
static void axi_dmac_write ( struct axi_dmac * axi_dmac , unsigned int reg ,
unsigned int val )
{
writel ( val , axi_dmac - > base + reg ) ;
}
static int axi_dmac_read ( struct axi_dmac * axi_dmac , unsigned int reg )
{
return readl ( axi_dmac - > base + reg ) ;
}
static int axi_dmac_src_is_mem ( struct axi_dmac_chan * chan )
{
return chan - > src_type = = AXI_DMAC_BUS_TYPE_AXI_MM ;
}
static int axi_dmac_dest_is_mem ( struct axi_dmac_chan * chan )
{
return chan - > dest_type = = AXI_DMAC_BUS_TYPE_AXI_MM ;
}
static bool axi_dmac_check_len ( struct axi_dmac_chan * chan , unsigned int len )
{
2019-03-08 16:02:35 +03:00
if ( len = = 0 )
2015-08-20 18:39:13 +03:00
return false ;
2019-05-27 09:55:17 +03:00
if ( ( len & chan - > length_align_mask ) ! = 0 ) /* Not aligned */
2015-08-20 18:39:13 +03:00
return false ;
return true ;
}
static bool axi_dmac_check_addr ( struct axi_dmac_chan * chan , dma_addr_t addr )
{
2019-05-27 09:55:17 +03:00
if ( ( addr & chan - > address_align_mask ) ! = 0 ) /* Not aligned */
2015-08-20 18:39:13 +03:00
return false ;
return true ;
}
static void axi_dmac_start_transfer ( struct axi_dmac_chan * chan )
{
struct axi_dmac * dmac = chan_to_axi_dmac ( chan ) ;
struct virt_dma_desc * vdesc ;
struct axi_dmac_desc * desc ;
struct axi_dmac_sg * sg ;
unsigned int flags = 0 ;
unsigned int val ;
val = axi_dmac_read ( dmac , AXI_DMAC_REG_START_TRANSFER ) ;
if ( val ) /* Queue is full, wait for the next SOT IRQ */
return ;
desc = chan - > next_desc ;
if ( ! desc ) {
vdesc = vchan_next_desc ( & chan - > vchan ) ;
if ( ! vdesc )
return ;
list_move_tail ( & vdesc - > node , & chan - > active_descs ) ;
desc = to_axi_dmac_desc ( vdesc ) ;
}
sg = & desc - > sg [ desc - > num_submitted ] ;
2017-09-05 11:16:38 +03:00
/* Already queued in cyclic mode. Wait for it to finish */
if ( sg - > id ! = AXI_DMAC_SG_UNUSED ) {
sg - > schedule_when_free = true ;
return ;
}
2015-08-20 18:39:13 +03:00
desc - > num_submitted + + ;
2019-06-06 13:45:49 +03:00
if ( desc - > num_submitted = = desc - > num_sgs | |
desc - > have_partial_xfer ) {
2017-09-05 11:16:38 +03:00
if ( desc - > cyclic )
desc - > num_submitted = 0 ; /* Start again */
else
chan - > next_desc = NULL ;
2019-05-16 12:44:30 +03:00
flags | = AXI_DMAC_FLAG_LAST ;
2017-09-05 11:16:38 +03:00
} else {
2015-08-20 18:39:13 +03:00
chan - > next_desc = desc ;
2017-09-05 11:16:38 +03:00
}
2015-08-20 18:39:13 +03:00
sg - > id = axi_dmac_read ( dmac , AXI_DMAC_REG_TRANSFER_ID ) ;
if ( axi_dmac_dest_is_mem ( chan ) ) {
axi_dmac_write ( dmac , AXI_DMAC_REG_DEST_ADDRESS , sg - > dest_addr ) ;
axi_dmac_write ( dmac , AXI_DMAC_REG_DEST_STRIDE , sg - > dest_stride ) ;
}
if ( axi_dmac_src_is_mem ( chan ) ) {
axi_dmac_write ( dmac , AXI_DMAC_REG_SRC_ADDRESS , sg - > src_addr ) ;
axi_dmac_write ( dmac , AXI_DMAC_REG_SRC_STRIDE , sg - > src_stride ) ;
}
/*
* If the hardware supports cyclic transfers and there is no callback to
2017-09-05 11:16:37 +03:00
* call and only a single segment , enable hw cyclic mode to avoid
* unnecessary interrupts .
2015-08-20 18:39:13 +03:00
*/
2017-09-05 11:16:37 +03:00
if ( chan - > hw_cyclic & & desc - > cyclic & & ! desc - > vdesc . tx . callback & &
desc - > num_sgs = = 1 )
2015-08-20 18:39:13 +03:00
flags | = AXI_DMAC_FLAG_CYCLIC ;
2019-06-06 13:45:48 +03:00
if ( chan - > hw_partial_xfer )
flags | = AXI_DMAC_FLAG_PARTIAL_REPORT ;
2015-08-20 18:39:13 +03:00
axi_dmac_write ( dmac , AXI_DMAC_REG_X_LENGTH , sg - > x_len - 1 ) ;
axi_dmac_write ( dmac , AXI_DMAC_REG_Y_LENGTH , sg - > y_len - 1 ) ;
axi_dmac_write ( dmac , AXI_DMAC_REG_FLAGS , flags ) ;
axi_dmac_write ( dmac , AXI_DMAC_REG_START_TRANSFER , 1 ) ;
}
static struct axi_dmac_desc * axi_dmac_active_desc ( struct axi_dmac_chan * chan )
{
return list_first_entry_or_null ( & chan - > active_descs ,
struct axi_dmac_desc , vdesc . node ) ;
}
2019-06-06 13:45:48 +03:00
static inline unsigned int axi_dmac_total_sg_bytes ( struct axi_dmac_chan * chan ,
struct axi_dmac_sg * sg )
{
if ( chan - > hw_2d )
return sg - > x_len * sg - > y_len ;
else
return sg - > x_len ;
}
static void axi_dmac_dequeue_partial_xfers ( struct axi_dmac_chan * chan )
{
struct axi_dmac * dmac = chan_to_axi_dmac ( chan ) ;
struct axi_dmac_desc * desc ;
struct axi_dmac_sg * sg ;
u32 xfer_done , len , id , i ;
bool found_sg ;
do {
len = axi_dmac_read ( dmac , AXI_DMAC_REG_PARTIAL_XFER_LEN ) ;
id = axi_dmac_read ( dmac , AXI_DMAC_REG_PARTIAL_XFER_ID ) ;
found_sg = false ;
list_for_each_entry ( desc , & chan - > active_descs , vdesc . node ) {
for ( i = 0 ; i < desc - > num_sgs ; i + + ) {
sg = & desc - > sg [ i ] ;
if ( sg - > id = = AXI_DMAC_SG_UNUSED )
continue ;
if ( sg - > id = = id ) {
2019-06-06 13:45:49 +03:00
desc - > have_partial_xfer = true ;
2019-06-06 13:45:48 +03:00
sg - > partial_len = len ;
found_sg = true ;
break ;
}
}
if ( found_sg )
break ;
}
if ( found_sg ) {
dev_dbg ( dmac - > dma_dev . dev ,
" Found partial segment id=%u, len=%u \n " ,
id , len ) ;
} else {
dev_warn ( dmac - > dma_dev . dev ,
" Not found partial segment id=%u, len=%u \n " ,
id , len ) ;
}
/* Check if we have any more partial transfers */
xfer_done = axi_dmac_read ( dmac , AXI_DMAC_REG_TRANSFER_DONE ) ;
xfer_done = ! ( xfer_done & AXI_DMAC_FLAG_PARTIAL_XFER_DONE ) ;
} while ( ! xfer_done ) ;
}
static void axi_dmac_compute_residue ( struct axi_dmac_chan * chan ,
struct axi_dmac_desc * active )
{
struct dmaengine_result * rslt = & active - > vdesc . tx_result ;
unsigned int start = active - > num_completed - 1 ;
struct axi_dmac_sg * sg ;
unsigned int i , total ;
rslt - > result = DMA_TRANS_NOERROR ;
rslt - > residue = 0 ;
/*
* We get here if the last completed segment is partial , which
* means we can compute the residue from that segment onwards
*/
for ( i = start ; i < active - > num_sgs ; i + + ) {
sg = & active - > sg [ i ] ;
total = axi_dmac_total_sg_bytes ( chan , sg ) ;
rslt - > residue + = ( total - sg - > partial_len ) ;
}
}
2017-09-05 11:16:38 +03:00
static bool axi_dmac_transfer_done ( struct axi_dmac_chan * chan ,
2015-08-20 18:39:13 +03:00
unsigned int completed_transfers )
{
struct axi_dmac_desc * active ;
struct axi_dmac_sg * sg ;
2017-09-05 11:16:38 +03:00
bool start_next = false ;
2015-08-20 18:39:13 +03:00
active = axi_dmac_active_desc ( chan ) ;
if ( ! active )
2017-09-05 11:16:38 +03:00
return false ;
2015-08-20 18:39:13 +03:00
2019-06-06 13:45:48 +03:00
if ( chan - > hw_partial_xfer & &
( completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE ) )
axi_dmac_dequeue_partial_xfers ( chan ) ;
2017-09-05 11:16:38 +03:00
do {
sg = & active - > sg [ active - > num_completed ] ;
if ( sg - > id = = AXI_DMAC_SG_UNUSED ) /* Not yet submitted */
break ;
if ( ! ( BIT ( sg - > id ) & completed_transfers ) )
break ;
active - > num_completed + + ;
sg - > id = AXI_DMAC_SG_UNUSED ;
if ( sg - > schedule_when_free ) {
sg - > schedule_when_free = false ;
start_next = true ;
}
2019-06-06 13:45:48 +03:00
if ( sg - > partial_len )
axi_dmac_compute_residue ( chan , active ) ;
2017-09-05 11:16:38 +03:00
if ( active - > cyclic )
vchan_cyclic_callback ( & active - > vdesc ) ;
2019-06-06 13:45:48 +03:00
if ( active - > num_completed = = active - > num_sgs | |
sg - > partial_len ) {
2017-09-05 11:16:38 +03:00
if ( active - > cyclic ) {
active - > num_completed = 0 ; /* wrap around */
} else {
2015-08-20 18:39:13 +03:00
list_del ( & active - > vdesc . node ) ;
vchan_cookie_complete ( & active - > vdesc ) ;
active = axi_dmac_active_desc ( chan ) ;
}
2017-09-05 11:16:38 +03:00
}
} while ( active ) ;
return start_next ;
2015-08-20 18:39:13 +03:00
}
static irqreturn_t axi_dmac_interrupt_handler ( int irq , void * devid )
{
struct axi_dmac * dmac = devid ;
unsigned int pending ;
2017-09-05 11:16:38 +03:00
bool start_next = false ;
2015-08-20 18:39:13 +03:00
pending = axi_dmac_read ( dmac , AXI_DMAC_REG_IRQ_PENDING ) ;
2016-07-01 18:45:58 +03:00
if ( ! pending )
return IRQ_NONE ;
2015-08-20 18:39:13 +03:00
axi_dmac_write ( dmac , AXI_DMAC_REG_IRQ_PENDING , pending ) ;
spin_lock ( & dmac - > chan . vchan . lock ) ;
/* One or more transfers have finished */
if ( pending & AXI_DMAC_IRQ_EOT ) {
unsigned int completed ;
completed = axi_dmac_read ( dmac , AXI_DMAC_REG_TRANSFER_DONE ) ;
2017-09-05 11:16:38 +03:00
start_next = axi_dmac_transfer_done ( & dmac - > chan , completed ) ;
2015-08-20 18:39:13 +03:00
}
/* Space has become available in the descriptor queue */
2017-09-05 11:16:38 +03:00
if ( ( pending & AXI_DMAC_IRQ_SOT ) | | start_next )
2015-08-20 18:39:13 +03:00
axi_dmac_start_transfer ( & dmac - > chan ) ;
spin_unlock ( & dmac - > chan . vchan . lock ) ;
return IRQ_HANDLED ;
}
static int axi_dmac_terminate_all ( struct dma_chan * c )
{
struct axi_dmac_chan * chan = to_axi_dmac_chan ( c ) ;
struct axi_dmac * dmac = chan_to_axi_dmac ( chan ) ;
unsigned long flags ;
LIST_HEAD ( head ) ;
spin_lock_irqsave ( & chan - > vchan . lock , flags ) ;
axi_dmac_write ( dmac , AXI_DMAC_REG_CTRL , 0 ) ;
chan - > next_desc = NULL ;
vchan_get_all_descriptors ( & chan - > vchan , & head ) ;
list_splice_tail_init ( & chan - > active_descs , & head ) ;
spin_unlock_irqrestore ( & chan - > vchan . lock , flags ) ;
vchan_dma_desc_free_list ( & chan - > vchan , & head ) ;
return 0 ;
}
2015-10-20 12:46:30 +03:00
static void axi_dmac_synchronize ( struct dma_chan * c )
{
struct axi_dmac_chan * chan = to_axi_dmac_chan ( c ) ;
vchan_synchronize ( & chan - > vchan ) ;
}
2015-08-20 18:39:13 +03:00
static void axi_dmac_issue_pending ( struct dma_chan * c )
{
struct axi_dmac_chan * chan = to_axi_dmac_chan ( c ) ;
struct axi_dmac * dmac = chan_to_axi_dmac ( chan ) ;
unsigned long flags ;
axi_dmac_write ( dmac , AXI_DMAC_REG_CTRL , AXI_DMAC_CTRL_ENABLE ) ;
spin_lock_irqsave ( & chan - > vchan . lock , flags ) ;
if ( vchan_issue_pending ( & chan - > vchan ) )
axi_dmac_start_transfer ( chan ) ;
spin_unlock_irqrestore ( & chan - > vchan . lock , flags ) ;
}
static struct axi_dmac_desc * axi_dmac_alloc_desc ( unsigned int num_sgs )
{
struct axi_dmac_desc * desc ;
2017-09-05 11:16:38 +03:00
unsigned int i ;
2015-08-20 18:39:13 +03:00
2019-01-09 17:44:37 +03:00
desc = kzalloc ( struct_size ( desc , sg , num_sgs ) , GFP_NOWAIT ) ;
2015-08-20 18:39:13 +03:00
if ( ! desc )
return NULL ;
2017-09-05 11:16:38 +03:00
for ( i = 0 ; i < num_sgs ; i + + )
desc - > sg [ i ] . id = AXI_DMAC_SG_UNUSED ;
2015-08-20 18:39:13 +03:00
desc - > num_sgs = num_sgs ;
return desc ;
}
2019-03-08 16:02:35 +03:00
static struct axi_dmac_sg * axi_dmac_fill_linear_sg ( struct axi_dmac_chan * chan ,
enum dma_transfer_direction direction , dma_addr_t addr ,
unsigned int num_periods , unsigned int period_len ,
struct axi_dmac_sg * sg )
{
unsigned int num_segments , i ;
unsigned int segment_size ;
unsigned int len ;
/* Split into multiple equally sized segments if necessary */
num_segments = DIV_ROUND_UP ( period_len , chan - > max_length ) ;
segment_size = DIV_ROUND_UP ( period_len , num_segments ) ;
/* Take care of alignment */
2019-05-27 09:55:17 +03:00
segment_size = ( ( segment_size - 1 ) | chan - > length_align_mask ) + 1 ;
2019-03-08 16:02:35 +03:00
for ( i = 0 ; i < num_periods ; i + + ) {
len = period_len ;
while ( len > segment_size ) {
if ( direction = = DMA_DEV_TO_MEM )
sg - > dest_addr = addr ;
else
sg - > src_addr = addr ;
sg - > x_len = segment_size ;
sg - > y_len = 1 ;
sg + + ;
addr + = segment_size ;
len - = segment_size ;
}
if ( direction = = DMA_DEV_TO_MEM )
sg - > dest_addr = addr ;
else
sg - > src_addr = addr ;
sg - > x_len = len ;
sg - > y_len = 1 ;
sg + + ;
addr + = len ;
}
return sg ;
}
2015-08-20 18:39:13 +03:00
static struct dma_async_tx_descriptor * axi_dmac_prep_slave_sg (
struct dma_chan * c , struct scatterlist * sgl ,
unsigned int sg_len , enum dma_transfer_direction direction ,
unsigned long flags , void * context )
{
struct axi_dmac_chan * chan = to_axi_dmac_chan ( c ) ;
struct axi_dmac_desc * desc ;
2019-03-08 16:02:35 +03:00
struct axi_dmac_sg * dsg ;
2015-08-20 18:39:13 +03:00
struct scatterlist * sg ;
2019-03-08 16:02:35 +03:00
unsigned int num_sgs ;
2015-08-20 18:39:13 +03:00
unsigned int i ;
if ( direction ! = chan - > direction )
return NULL ;
2019-03-08 16:02:35 +03:00
num_sgs = 0 ;
for_each_sg ( sgl , sg , sg_len , i )
num_sgs + = DIV_ROUND_UP ( sg_dma_len ( sg ) , chan - > max_length ) ;
desc = axi_dmac_alloc_desc ( num_sgs ) ;
2015-08-20 18:39:13 +03:00
if ( ! desc )
return NULL ;
2019-03-08 16:02:35 +03:00
dsg = desc - > sg ;
2015-08-20 18:39:13 +03:00
for_each_sg ( sgl , sg , sg_len , i ) {
if ( ! axi_dmac_check_addr ( chan , sg_dma_address ( sg ) ) | |
! axi_dmac_check_len ( chan , sg_dma_len ( sg ) ) ) {
kfree ( desc ) ;
return NULL ;
}
2019-03-08 16:02:35 +03:00
dsg = axi_dmac_fill_linear_sg ( chan , direction , sg_dma_address ( sg ) , 1 ,
sg_dma_len ( sg ) , dsg ) ;
2015-08-20 18:39:13 +03:00
}
desc - > cyclic = false ;
return vchan_tx_prep ( & chan - > vchan , & desc - > vdesc , flags ) ;
}
static struct dma_async_tx_descriptor * axi_dmac_prep_dma_cyclic (
struct dma_chan * c , dma_addr_t buf_addr , size_t buf_len ,
size_t period_len , enum dma_transfer_direction direction ,
unsigned long flags )
{
struct axi_dmac_chan * chan = to_axi_dmac_chan ( c ) ;
struct axi_dmac_desc * desc ;
2019-03-08 16:02:35 +03:00
unsigned int num_periods , num_segments ;
2015-08-20 18:39:13 +03:00
if ( direction ! = chan - > direction )
return NULL ;
if ( ! axi_dmac_check_len ( chan , buf_len ) | |
! axi_dmac_check_addr ( chan , buf_addr ) )
return NULL ;
if ( period_len = = 0 | | buf_len % period_len )
return NULL ;
num_periods = buf_len / period_len ;
2019-03-08 16:02:35 +03:00
num_segments = DIV_ROUND_UP ( period_len , chan - > max_length ) ;
2015-08-20 18:39:13 +03:00
2019-03-08 16:02:35 +03:00
desc = axi_dmac_alloc_desc ( num_periods * num_segments ) ;
2015-08-20 18:39:13 +03:00
if ( ! desc )
return NULL ;
2019-03-08 16:02:35 +03:00
axi_dmac_fill_linear_sg ( chan , direction , buf_addr , num_periods ,
period_len , desc - > sg ) ;
2015-08-20 18:39:13 +03:00
desc - > cyclic = true ;
return vchan_tx_prep ( & chan - > vchan , & desc - > vdesc , flags ) ;
}
static struct dma_async_tx_descriptor * axi_dmac_prep_interleaved (
struct dma_chan * c , struct dma_interleaved_template * xt ,
unsigned long flags )
{
struct axi_dmac_chan * chan = to_axi_dmac_chan ( c ) ;
struct axi_dmac_desc * desc ;
size_t dst_icg , src_icg ;
if ( xt - > frame_size ! = 1 )
return NULL ;
if ( xt - > dir ! = chan - > direction )
return NULL ;
if ( axi_dmac_src_is_mem ( chan ) ) {
if ( ! xt - > src_inc | | ! axi_dmac_check_addr ( chan , xt - > src_start ) )
return NULL ;
}
if ( axi_dmac_dest_is_mem ( chan ) ) {
if ( ! xt - > dst_inc | | ! axi_dmac_check_addr ( chan , xt - > dst_start ) )
return NULL ;
}
dst_icg = dmaengine_get_dst_icg ( xt , & xt - > sgl [ 0 ] ) ;
src_icg = dmaengine_get_src_icg ( xt , & xt - > sgl [ 0 ] ) ;
if ( chan - > hw_2d ) {
if ( ! axi_dmac_check_len ( chan , xt - > sgl [ 0 ] . size ) | |
2019-03-26 17:05:20 +03:00
xt - > numf = = 0 )
2015-08-20 18:39:13 +03:00
return NULL ;
if ( xt - > sgl [ 0 ] . size + dst_icg > chan - > max_length | |
xt - > sgl [ 0 ] . size + src_icg > chan - > max_length )
return NULL ;
} else {
if ( dst_icg ! = 0 | | src_icg ! = 0 )
return NULL ;
if ( chan - > max_length / xt - > sgl [ 0 ] . size < xt - > numf )
return NULL ;
if ( ! axi_dmac_check_len ( chan , xt - > sgl [ 0 ] . size * xt - > numf ) )
return NULL ;
}
desc = axi_dmac_alloc_desc ( 1 ) ;
if ( ! desc )
return NULL ;
if ( axi_dmac_src_is_mem ( chan ) ) {
desc - > sg [ 0 ] . src_addr = xt - > src_start ;
desc - > sg [ 0 ] . src_stride = xt - > sgl [ 0 ] . size + src_icg ;
}
if ( axi_dmac_dest_is_mem ( chan ) ) {
desc - > sg [ 0 ] . dest_addr = xt - > dst_start ;
desc - > sg [ 0 ] . dest_stride = xt - > sgl [ 0 ] . size + dst_icg ;
}
if ( chan - > hw_2d ) {
desc - > sg [ 0 ] . x_len = xt - > sgl [ 0 ] . size ;
desc - > sg [ 0 ] . y_len = xt - > numf ;
} else {
desc - > sg [ 0 ] . x_len = xt - > sgl [ 0 ] . size * xt - > numf ;
desc - > sg [ 0 ] . y_len = 1 ;
}
2019-05-16 10:04:43 +03:00
if ( flags & DMA_CYCLIC )
desc - > cyclic = true ;
2015-08-20 18:39:13 +03:00
return vchan_tx_prep ( & chan - > vchan , & desc - > vdesc , flags ) ;
}
static void axi_dmac_free_chan_resources ( struct dma_chan * c )
{
vchan_free_chan_resources ( to_virt_chan ( c ) ) ;
}
static void axi_dmac_desc_free ( struct virt_dma_desc * vdesc )
{
kfree ( container_of ( vdesc , struct axi_dmac_desc , vdesc ) ) ;
}
2019-06-06 13:45:50 +03:00
static bool axi_dmac_regmap_rdwr ( struct device * dev , unsigned int reg )
{
switch ( reg ) {
case AXI_DMAC_REG_IRQ_MASK :
case AXI_DMAC_REG_IRQ_SOURCE :
case AXI_DMAC_REG_IRQ_PENDING :
case AXI_DMAC_REG_CTRL :
case AXI_DMAC_REG_TRANSFER_ID :
case AXI_DMAC_REG_START_TRANSFER :
case AXI_DMAC_REG_FLAGS :
case AXI_DMAC_REG_DEST_ADDRESS :
case AXI_DMAC_REG_SRC_ADDRESS :
case AXI_DMAC_REG_X_LENGTH :
case AXI_DMAC_REG_Y_LENGTH :
case AXI_DMAC_REG_DEST_STRIDE :
case AXI_DMAC_REG_SRC_STRIDE :
case AXI_DMAC_REG_TRANSFER_DONE :
case AXI_DMAC_REG_ACTIVE_TRANSFER_ID :
case AXI_DMAC_REG_STATUS :
case AXI_DMAC_REG_CURRENT_SRC_ADDR :
case AXI_DMAC_REG_CURRENT_DEST_ADDR :
case AXI_DMAC_REG_PARTIAL_XFER_LEN :
case AXI_DMAC_REG_PARTIAL_XFER_ID :
return true ;
default :
return false ;
}
}
static const struct regmap_config axi_dmac_regmap_config = {
. reg_bits = 32 ,
. val_bits = 32 ,
. reg_stride = 4 ,
. max_register = AXI_DMAC_REG_PARTIAL_XFER_ID ,
. readable_reg = axi_dmac_regmap_rdwr ,
. writeable_reg = axi_dmac_regmap_rdwr ,
} ;
2020-08-25 18:19:49 +03:00
static void axi_dmac_adjust_chan_params ( struct axi_dmac_chan * chan )
{
chan - > address_align_mask = max ( chan - > dest_width , chan - > src_width ) - 1 ;
if ( axi_dmac_dest_is_mem ( chan ) & & axi_dmac_src_is_mem ( chan ) )
chan - > direction = DMA_MEM_TO_MEM ;
else if ( ! axi_dmac_dest_is_mem ( chan ) & & axi_dmac_src_is_mem ( chan ) )
chan - > direction = DMA_MEM_TO_DEV ;
else if ( axi_dmac_dest_is_mem ( chan ) & & ! axi_dmac_src_is_mem ( chan ) )
chan - > direction = DMA_DEV_TO_MEM ;
else
chan - > direction = DMA_DEV_TO_DEV ;
}
2015-08-20 18:39:13 +03:00
/*
* The configuration stored in the devicetree matches the configuration
* parameters of the peripheral instance and allows the driver to know which
* features are implemented and how it should behave .
*/
static int axi_dmac_parse_chan_dt ( struct device_node * of_chan ,
struct axi_dmac_chan * chan )
{
u32 val ;
int ret ;
ret = of_property_read_u32 ( of_chan , " reg " , & val ) ;
if ( ret )
return ret ;
/* We only support 1 channel for now */
if ( val ! = 0 )
return - EINVAL ;
ret = of_property_read_u32 ( of_chan , " adi,source-bus-type " , & val ) ;
if ( ret )
return ret ;
if ( val > AXI_DMAC_BUS_TYPE_FIFO )
return - EINVAL ;
chan - > src_type = val ;
ret = of_property_read_u32 ( of_chan , " adi,destination-bus-type " , & val ) ;
if ( ret )
return ret ;
if ( val > AXI_DMAC_BUS_TYPE_FIFO )
return - EINVAL ;
chan - > dest_type = val ;
ret = of_property_read_u32 ( of_chan , " adi,source-bus-width " , & val ) ;
if ( ret )
return ret ;
chan - > src_width = val / 8 ;
ret = of_property_read_u32 ( of_chan , " adi,destination-bus-width " , & val ) ;
if ( ret )
return ret ;
chan - > dest_width = val / 8 ;
2020-08-25 18:19:49 +03:00
axi_dmac_adjust_chan_params ( chan ) ;
2015-08-20 18:39:13 +03:00
return 0 ;
}
2020-08-25 18:19:48 +03:00
static int axi_dmac_parse_dt ( struct device * dev , struct axi_dmac * dmac )
{
struct device_node * of_channels , * of_chan ;
int ret ;
of_channels = of_get_child_by_name ( dev - > of_node , " adi,channels " ) ;
if ( of_channels = = NULL )
return - ENODEV ;
for_each_child_of_node ( of_channels , of_chan ) {
ret = axi_dmac_parse_chan_dt ( of_chan , & dmac - > chan ) ;
if ( ret ) {
of_node_put ( of_chan ) ;
of_node_put ( of_channels ) ;
return - EINVAL ;
}
}
of_node_put ( of_channels ) ;
return 0 ;
}
2020-08-25 18:19:50 +03:00
static int axi_dmac_read_chan_config ( struct device * dev , struct axi_dmac * dmac )
{
struct axi_dmac_chan * chan = & dmac - > chan ;
unsigned int val , desc ;
desc = axi_dmac_read ( dmac , AXI_DMAC_REG_INTERFACE_DESC ) ;
if ( desc = = 0 ) {
dev_err ( dev , " DMA interface register reads zero \n " ) ;
return - EFAULT ;
}
val = AXI_DMAC_DMA_SRC_TYPE_GET ( desc ) ;
if ( val > AXI_DMAC_BUS_TYPE_FIFO ) {
dev_err ( dev , " Invalid source bus type read: %d \n " , val ) ;
return - EINVAL ;
}
chan - > src_type = val ;
val = AXI_DMAC_DMA_DST_TYPE_GET ( desc ) ;
if ( val > AXI_DMAC_BUS_TYPE_FIFO ) {
dev_err ( dev , " Invalid destination bus type read: %d \n " , val ) ;
return - EINVAL ;
}
chan - > dest_type = val ;
val = AXI_DMAC_DMA_SRC_WIDTH_GET ( desc ) ;
if ( val = = 0 ) {
dev_err ( dev , " Source bus width is zero \n " ) ;
return - EINVAL ;
}
/* widths are stored in log2 */
chan - > src_width = 1 < < val ;
val = AXI_DMAC_DMA_DST_WIDTH_GET ( desc ) ;
if ( val = = 0 ) {
dev_err ( dev , " Destination bus width is zero \n " ) ;
return - EINVAL ;
}
chan - > dest_width = 1 < < val ;
axi_dmac_adjust_chan_params ( chan ) ;
return 0 ;
}
2020-08-25 18:19:45 +03:00
static int axi_dmac_detect_caps ( struct axi_dmac * dmac , unsigned int version )
2019-03-26 16:06:28 +03:00
{
struct axi_dmac_chan * chan = & dmac - > chan ;
axi_dmac_write ( dmac , AXI_DMAC_REG_FLAGS , AXI_DMAC_FLAG_CYCLIC ) ;
if ( axi_dmac_read ( dmac , AXI_DMAC_REG_FLAGS ) = = AXI_DMAC_FLAG_CYCLIC )
chan - > hw_cyclic = true ;
axi_dmac_write ( dmac , AXI_DMAC_REG_Y_LENGTH , 1 ) ;
if ( axi_dmac_read ( dmac , AXI_DMAC_REG_Y_LENGTH ) = = 1 )
chan - > hw_2d = true ;
axi_dmac_write ( dmac , AXI_DMAC_REG_X_LENGTH , 0xffffffff ) ;
chan - > max_length = axi_dmac_read ( dmac , AXI_DMAC_REG_X_LENGTH ) ;
if ( chan - > max_length ! = UINT_MAX )
chan - > max_length + + ;
2019-05-16 11:31:34 +03:00
axi_dmac_write ( dmac , AXI_DMAC_REG_DEST_ADDRESS , 0xffffffff ) ;
if ( axi_dmac_read ( dmac , AXI_DMAC_REG_DEST_ADDRESS ) = = 0 & &
chan - > dest_type = = AXI_DMAC_BUS_TYPE_AXI_MM ) {
dev_err ( dmac - > dma_dev . dev ,
" Destination memory-mapped interface not supported. " ) ;
return - ENODEV ;
}
axi_dmac_write ( dmac , AXI_DMAC_REG_SRC_ADDRESS , 0xffffffff ) ;
if ( axi_dmac_read ( dmac , AXI_DMAC_REG_SRC_ADDRESS ) = = 0 & &
chan - > src_type = = AXI_DMAC_BUS_TYPE_AXI_MM ) {
dev_err ( dmac - > dma_dev . dev ,
" Source memory-mapped interface not supported. " ) ;
return - ENODEV ;
}
2019-06-06 13:45:48 +03:00
if ( version > = ADI_AXI_PCORE_VER ( 4 , 2 , ' a ' ) )
chan - > hw_partial_xfer = true ;
2019-05-27 09:55:17 +03:00
if ( version > = ADI_AXI_PCORE_VER ( 4 , 1 , ' a ' ) ) {
axi_dmac_write ( dmac , AXI_DMAC_REG_X_LENGTH , 0x00 ) ;
chan - > length_align_mask =
axi_dmac_read ( dmac , AXI_DMAC_REG_X_LENGTH ) ;
} else {
chan - > length_align_mask = chan - > address_align_mask ;
}
2019-05-16 11:31:34 +03:00
return 0 ;
2019-03-26 16:06:28 +03:00
}
2015-08-20 18:39:13 +03:00
static int axi_dmac_probe ( struct platform_device * pdev )
{
struct dma_device * dma_dev ;
struct axi_dmac * dmac ;
struct resource * res ;
2019-12-09 11:57:11 +03:00
struct regmap * regmap ;
2020-08-25 18:19:45 +03:00
unsigned int version ;
2015-08-20 18:39:13 +03:00
int ret ;
dmac = devm_kzalloc ( & pdev - > dev , sizeof ( * dmac ) , GFP_KERNEL ) ;
if ( ! dmac )
return - ENOMEM ;
dmac - > irq = platform_get_irq ( pdev , 0 ) ;
2016-07-01 18:45:57 +03:00
if ( dmac - > irq < 0 )
return dmac - > irq ;
if ( dmac - > irq = = 0 )
2015-08-20 18:39:13 +03:00
return - EINVAL ;
res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
dmac - > base = devm_ioremap_resource ( & pdev - > dev , res ) ;
if ( IS_ERR ( dmac - > base ) )
return PTR_ERR ( dmac - > base ) ;
dmac - > clk = devm_clk_get ( & pdev - > dev , NULL ) ;
if ( IS_ERR ( dmac - > clk ) )
return PTR_ERR ( dmac - > clk ) ;
2020-08-25 18:19:47 +03:00
ret = clk_prepare_enable ( dmac - > clk ) ;
if ( ret < 0 )
return ret ;
2020-08-25 18:19:50 +03:00
version = axi_dmac_read ( dmac , ADI_AXI_REG_VERSION ) ;
if ( version > = ADI_AXI_PCORE_VER ( 4 , 3 , ' a ' ) )
ret = axi_dmac_read_chan_config ( & pdev - > dev , dmac ) ;
else
ret = axi_dmac_parse_dt ( & pdev - > dev , dmac ) ;
2020-08-25 18:19:48 +03:00
if ( ret < 0 )
2020-08-25 18:19:47 +03:00
goto err_clk_disable ;
2015-08-20 18:39:13 +03:00
2020-08-25 18:19:46 +03:00
INIT_LIST_HEAD ( & dmac - > chan . active_descs ) ;
2019-03-08 16:02:35 +03:00
dma_set_max_seg_size ( & pdev - > dev , UINT_MAX ) ;
2015-08-20 18:39:13 +03:00
dma_dev = & dmac - > dma_dev ;
dma_cap_set ( DMA_SLAVE , dma_dev - > cap_mask ) ;
dma_cap_set ( DMA_CYCLIC , dma_dev - > cap_mask ) ;
2019-03-26 17:07:56 +03:00
dma_cap_set ( DMA_INTERLEAVE , dma_dev - > cap_mask ) ;
2015-08-20 18:39:13 +03:00
dma_dev - > device_free_chan_resources = axi_dmac_free_chan_resources ;
dma_dev - > device_tx_status = dma_cookie_status ;
dma_dev - > device_issue_pending = axi_dmac_issue_pending ;
dma_dev - > device_prep_slave_sg = axi_dmac_prep_slave_sg ;
dma_dev - > device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic ;
dma_dev - > device_prep_interleaved_dma = axi_dmac_prep_interleaved ;
dma_dev - > device_terminate_all = axi_dmac_terminate_all ;
2015-10-20 12:46:30 +03:00
dma_dev - > device_synchronize = axi_dmac_synchronize ;
2015-08-20 18:39:13 +03:00
dma_dev - > dev = & pdev - > dev ;
dma_dev - > chancnt = 1 ;
dma_dev - > src_addr_widths = BIT ( dmac - > chan . src_width ) ;
dma_dev - > dst_addr_widths = BIT ( dmac - > chan . dest_width ) ;
dma_dev - > directions = BIT ( dmac - > chan . direction ) ;
dma_dev - > residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR ;
INIT_LIST_HEAD ( & dma_dev - > channels ) ;
dmac - > chan . vchan . desc_free = axi_dmac_desc_free ;
vchan_init ( & dmac - > chan . vchan , dma_dev ) ;
2020-08-25 18:19:45 +03:00
ret = axi_dmac_detect_caps ( dmac , version ) ;
2019-05-16 11:31:34 +03:00
if ( ret )
goto err_clk_disable ;
2019-03-26 16:06:28 +03:00
2019-05-27 09:55:18 +03:00
dma_dev - > copy_align = ( dmac - > chan . address_align_mask + 1 ) ;
2019-03-26 16:06:28 +03:00
2015-08-20 18:39:13 +03:00
axi_dmac_write ( dmac , AXI_DMAC_REG_IRQ_MASK , 0x00 ) ;
ret = dma_async_device_register ( dma_dev ) ;
if ( ret )
goto err_clk_disable ;
ret = of_dma_controller_register ( pdev - > dev . of_node ,
of_dma_xlate_by_chan_id , dma_dev ) ;
if ( ret )
goto err_unregister_device ;
2018-04-28 22:57:54 +03:00
ret = request_irq ( dmac - > irq , axi_dmac_interrupt_handler , IRQF_SHARED ,
2015-08-20 18:39:13 +03:00
dev_name ( & pdev - > dev ) , dmac ) ;
if ( ret )
goto err_unregister_of ;
platform_set_drvdata ( pdev , dmac ) ;
2019-12-09 11:57:11 +03:00
regmap = devm_regmap_init_mmio ( & pdev - > dev , dmac - > base ,
& axi_dmac_regmap_config ) ;
if ( IS_ERR ( regmap ) ) {
ret = PTR_ERR ( regmap ) ;
goto err_free_irq ;
}
2019-06-06 13:45:50 +03:00
2015-08-20 18:39:13 +03:00
return 0 ;
2019-12-09 11:57:11 +03:00
err_free_irq :
free_irq ( dmac - > irq , dmac ) ;
2015-08-20 18:39:13 +03:00
err_unregister_of :
of_dma_controller_free ( pdev - > dev . of_node ) ;
err_unregister_device :
dma_async_device_unregister ( & dmac - > dma_dev ) ;
err_clk_disable :
clk_disable_unprepare ( dmac - > clk ) ;
return ret ;
}
static int axi_dmac_remove ( struct platform_device * pdev )
{
struct axi_dmac * dmac = platform_get_drvdata ( pdev ) ;
of_dma_controller_free ( pdev - > dev . of_node ) ;
free_irq ( dmac - > irq , dmac ) ;
tasklet_kill ( & dmac - > chan . vchan . task ) ;
dma_async_device_unregister ( & dmac - > dma_dev ) ;
clk_disable_unprepare ( dmac - > clk ) ;
return 0 ;
}
static const struct of_device_id axi_dmac_of_match_table [ ] = {
{ . compatible = " adi,axi-dmac-1.00.a " } ,
{ } ,
} ;
2016-07-01 18:45:56 +03:00
MODULE_DEVICE_TABLE ( of , axi_dmac_of_match_table ) ;
2015-08-20 18:39:13 +03:00
static struct platform_driver axi_dmac_driver = {
. driver = {
. name = " dma-axi-dmac " ,
. of_match_table = axi_dmac_of_match_table ,
} ,
. probe = axi_dmac_probe ,
. remove = axi_dmac_remove ,
} ;
module_platform_driver ( axi_dmac_driver ) ;
MODULE_AUTHOR ( " Lars-Peter Clausen <lars@metafoo.de> " ) ;
MODULE_DESCRIPTION ( " DMA controller driver for the AXI-DMAC controller " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;