2019-05-24 13:03:53 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2010-09-28 17:57:37 +04:00
/*
* Copyright ( c ) 2006 ARM Ltd .
* Copyright ( c ) 2010 ST - Ericsson SA
2017-05-21 00:42:53 +03:00
* Copyirght ( c ) 2017 Linaro Ltd .
2010-09-28 17:57:37 +04:00
*
* Author : Peter Pearse < peter . pearse @ arm . com >
2017-05-21 00:42:53 +03:00
* Author : Linus Walleij < linus . walleij @ linaro . org >
2010-09-28 17:57:37 +04:00
*
* Documentation : ARM DDI 01 96 G = = PL080
2011-01-16 23:18:05 +03:00
* Documentation : ARM DDI 021 8 E = = PL081
2013-08-11 21:59:17 +04:00
* Documentation : S3C6410 User ' s Manual = = PL080S
2010-09-28 17:57:37 +04:00
*
2011-01-16 23:18:05 +03:00
* PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
* channel .
2010-09-28 17:57:37 +04:00
*
* The PL080 has 8 channels available for simultaneous use , and the PL081
* has only two channels . So on these DMA controllers the number of channels
* and the number of incoming DMA signals are two totally different things .
* It is usually not possible to theoretically handle all physical signals ,
* so a multiplexing scheme with possible denial of use is necessary .
*
* The PL080 has a dual bus master , PL081 has a single master .
*
2013-08-11 21:59:17 +04:00
* PL080S is a version modified by Samsung and used in S3C64xx SoCs .
* It differs in following aspects :
* - CH_CONFIG register at different offset ,
* - separate CH_CONTROL2 register for transfer size ,
* - bigger maximum transfer size ,
* - 8 - word aligned LLI , instead of 4 - word , due to extra CCTL2 word ,
* - no support for peripheral flow control .
*
2010-09-28 17:57:37 +04:00
* Memory to peripheral transfer may be visualized as
* Get data from memory to DMAC
* Until no data left
* On burst request from peripheral
* Destination burst from DMAC to peripheral
* Clear burst request
* Raise terminal count interrupt
*
* For peripherals with a FIFO :
* Source burst size = = half the depth of the peripheral FIFO
* Destination burst size = = the depth of the peripheral FIFO
*
* ( Bursts are irrelevant for mem to mem transfers - there are no burst
* signals , the DMA controller will simply facilitate its AHB master . )
*
* ASSUMES default ( little ) endianness for DMA transfers
*
2011-01-04 01:33:06 +03:00
* The PL08x has two flow control settings :
* - DMAC flow control : the transfer size defines the number of transfers
* which occur for the current LLI entry , and the DMAC raises TC at the
* end of every LLI entry . Observed behaviour shows the DMAC listening
* to both the BREQ and SREQ signals ( contrary to documented ) ,
* transferring data if either is active . The LBREQ and LSREQ signals
* are ignored .
*
* - Peripheral flow control : the transfer size is ignored ( and should be
* zero ) . The data is transferred from the current LLI entry , until
* after the final transfer signalled by LBREQ or LSREQ . The DMAC
2013-08-11 21:59:17 +04:00
* will then move to the next LLI entry . Unsupported by PL080S .
2010-09-28 17:57:37 +04:00
*/
2011-01-04 01:34:07 +03:00
# include <linux/amba/bus.h>
2010-09-28 17:57:37 +04:00
# include <linux/amba/pl08x.h>
# include <linux/debugfs.h>
2011-08-05 14:02:28 +04:00
# include <linux/delay.h>
# include <linux/device.h>
# include <linux/dmaengine.h>
# include <linux/dmapool.h>
2011-09-02 15:13:44 +04:00
# include <linux/dma-mapping.h>
2014-01-23 14:40:07 +04:00
# include <linux/export.h>
2011-08-05 14:02:28 +04:00
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/module.h>
2015-07-11 15:12:04 +03:00
# include <linux/of.h>
# include <linux/of_dma.h>
2011-08-05 14:02:33 +04:00
# include <linux/pm_runtime.h>
2010-09-28 17:57:37 +04:00
# include <linux/seq_file.h>
2011-08-05 14:02:28 +04:00
# include <linux/slab.h>
2012-11-24 04:22:56 +04:00
# include <linux/amba/pl080.h>
2010-09-28 17:57:37 +04:00
2012-03-07 02:34:26 +04:00
# include "dmaengine.h"
2012-05-26 17:04:29 +04:00
# include "virt-dma.h"
2012-03-07 02:34:26 +04:00
2010-09-28 17:57:37 +04:00
# define DRIVER_NAME "pl08xdmac"
2015-03-18 02:25:36 +03:00
# define PL80X_DMA_BUSWIDTHS \
BIT ( DMA_SLAVE_BUSWIDTH_UNDEFINED ) | \
BIT ( DMA_SLAVE_BUSWIDTH_1_BYTE ) | \
BIT ( DMA_SLAVE_BUSWIDTH_2_BYTES ) | \
BIT ( DMA_SLAVE_BUSWIDTH_4_BYTES )
2011-08-31 12:34:35 +04:00
static struct amba_driver pl08x_amba_driver ;
2012-05-16 13:48:44 +04:00
struct pl08x_driver_data ;
2011-08-31 12:34:35 +04:00
2010-09-28 17:57:37 +04:00
/**
2011-01-16 23:18:05 +03:00
* struct vendor_data - vendor - specific config parameters for PL08x derivatives
2017-04-02 17:50:44 +03:00
* @ config_offset : offset to the configuration register
2010-09-28 17:57:37 +04:00
* @ channels : the number of channels available in this variant
2016-04-04 23:44:59 +03:00
* @ signals : the number of request signals available from the hardware
2011-01-16 23:18:05 +03:00
* @ dualmaster : whether this version supports dual AHB masters or not .
2017-05-21 00:42:53 +03:00
* @ nomadik : whether this variant is a ST Microelectronics Nomadik , where the
* channels have Nomadik security extension bits that need to be checked
* for permission before use and some registers are missing
* @ pl080s : whether this variant is a Samsung PL080S , which has separate
* register and LLI word for transfer size .
* @ ftdmac020 : whether this variant is a Faraday Technology FTDMAC020
2016-04-04 23:44:59 +03:00
* @ max_transfer_size : the maximum single element transfer size for this
* PL08x variant .
2010-09-28 17:57:37 +04:00
*/
struct vendor_data {
2013-08-11 21:59:14 +04:00
u8 config_offset ;
2010-09-28 17:57:37 +04:00
u8 channels ;
2016-04-04 23:44:59 +03:00
u8 signals ;
2010-09-28 17:57:37 +04:00
bool dualmaster ;
2012-04-12 11:01:49 +04:00
bool nomadik ;
2013-08-11 21:59:17 +04:00
bool pl080s ;
2017-05-21 00:42:53 +03:00
bool ftdmac020 ;
2013-08-11 21:59:18 +04:00
u32 max_transfer_size ;
2010-09-28 17:57:37 +04:00
} ;
2012-05-16 13:48:44 +04:00
/**
* struct pl08x_bus_data - information of source or destination
* busses for a transfer
* @ addr : current address
* @ maxwidth : the maximum width of a transfer on this bus
* @ buswidth : the width of this bus in bytes : 1 , 2 or 4
*/
struct pl08x_bus_data {
dma_addr_t addr ;
u8 maxwidth ;
u8 buswidth ;
} ;
2013-08-19 14:19:28 +04:00
# define IS_BUS_ALIGNED(bus) IS_ALIGNED((bus)->addr, (bus)->buswidth)
2012-05-16 13:48:44 +04:00
/**
* struct pl08x_phy_chan - holder for the physical channels
* @ id : physical index to this channel
2017-04-02 17:50:44 +03:00
* @ base : memory base address for this physical channel
* @ reg_config : configuration address for this physical channel
2017-05-21 00:42:53 +03:00
* @ reg_control : control address for this physical channel
* @ reg_src : transfer source address register
* @ reg_dst : transfer destination address register
* @ reg_lli : transfer LLI address register
* @ reg_busy : if the variant has a special per - channel busy register ,
* this contains a pointer to it
2012-05-16 13:48:44 +04:00
* @ lock : a lock to use when altering an instance of this struct
* @ serving : the virtual channel currently being served by this physical
* channel
2012-05-25 14:15:15 +04:00
* @ locked : channel unavailable for the system , e . g . dedicated to secure
* world
2017-05-21 00:42:53 +03:00
* @ ftdmac020 : channel is on a FTDMAC020
* @ pl080s : channel is on a PL08s
2012-05-16 13:48:44 +04:00
*/
struct pl08x_phy_chan {
unsigned int id ;
void __iomem * base ;
2013-08-11 21:59:14 +04:00
void __iomem * reg_config ;
2017-05-21 00:42:53 +03:00
void __iomem * reg_control ;
void __iomem * reg_src ;
void __iomem * reg_dst ;
void __iomem * reg_lli ;
void __iomem * reg_busy ;
2012-05-16 13:48:44 +04:00
spinlock_t lock ;
struct pl08x_dma_chan * serving ;
2012-05-25 14:15:15 +04:00
bool locked ;
2017-05-21 00:42:53 +03:00
bool ftdmac020 ;
bool pl080s ;
2012-05-16 13:48:44 +04:00
} ;
/**
* struct pl08x_sg - structure containing data per sg
* @ src_addr : src address of sg
* @ dst_addr : dst address of sg
* @ len : transfer len in bytes
* @ node : node for txd ' s dsg_list
*/
struct pl08x_sg {
dma_addr_t src_addr ;
dma_addr_t dst_addr ;
size_t len ;
struct list_head node ;
} ;
/**
* struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
2012-05-26 17:04:29 +04:00
* @ vd : virtual DMA descriptor
2012-05-16 13:48:44 +04:00
* @ dsg_list : list of children sg ' s
* @ llis_bus : DMA memory address ( physical ) start for the LLIs
* @ llis_va : virtual memory address start for the LLIs
* @ cctl : control reg values for current txd
* @ ccfg : config reg values for current txd
2012-05-26 17:42:23 +04:00
* @ done : this marks completed descriptors , which should not have their
* mux released .
2013-08-11 21:59:20 +04:00
* @ cyclic : indicate cyclic transfers
2012-05-16 13:48:44 +04:00
*/
struct pl08x_txd {
2012-05-26 17:04:29 +04:00
struct virt_dma_desc vd ;
2012-05-16 13:48:44 +04:00
struct list_head dsg_list ;
dma_addr_t llis_bus ;
2013-08-11 21:59:15 +04:00
u32 * llis_va ;
2012-05-16 13:48:44 +04:00
/* Default cctl value for LLIs */
u32 cctl ;
/*
* Settings to be put into the physical channel when we
* trigger this txd . Other registers are in llis_va [ 0 ] .
*/
u32 ccfg ;
2012-05-26 17:42:23 +04:00
bool done ;
2013-08-11 21:59:20 +04:00
bool cyclic ;
2012-05-16 13:48:44 +04:00
} ;
/**
2017-04-03 06:32:36 +03:00
* enum pl08x_dma_chan_state - holds the PL08x specific virtual channel
2012-05-16 13:48:44 +04:00
* states
* @ PL08X_CHAN_IDLE : the channel is idle
* @ PL08X_CHAN_RUNNING : the channel has allocated a physical transport
* channel and is running a transfer on it
* @ PL08X_CHAN_PAUSED : the channel has allocated a physical transport
* channel , but the transfer is currently paused
* @ PL08X_CHAN_WAITING : the channel is waiting for a physical transport
* channel to become available ( only pertains to memcpy channels )
*/
enum pl08x_dma_chan_state {
PL08X_CHAN_IDLE ,
PL08X_CHAN_RUNNING ,
PL08X_CHAN_PAUSED ,
PL08X_CHAN_WAITING ,
} ;
/**
* struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
2012-05-26 17:04:29 +04:00
* @ vc : wrappped virtual channel
2012-05-16 13:48:44 +04:00
* @ phychan : the physical channel utilized by this channel , if there is one
* @ name : name of channel
* @ cd : channel platform data
2017-04-02 17:50:44 +03:00
* @ cfg : slave configuration
2012-05-16 13:48:44 +04:00
* @ at : active transaction on this channel
* @ host : a pointer to the host ( internal use )
* @ state : whether the channel is idle , paused , running etc
* @ slave : whether this channel is a device ( slave ) or for memcpy
2012-05-25 14:15:15 +04:00
* @ signal : the physical DMA request signal which this channel is using
2012-05-25 14:32:45 +04:00
* @ mux_use : count of descriptors using this DMA request signal setting
2019-03-04 18:03:58 +03:00
* @ waiting_at : time in jiffies when this channel moved to waiting state
2012-05-16 13:48:44 +04:00
*/
struct pl08x_dma_chan {
2012-05-26 17:04:29 +04:00
struct virt_dma_chan vc ;
2012-05-16 13:48:44 +04:00
struct pl08x_phy_chan * phychan ;
2012-05-28 13:18:55 +04:00
const char * name ;
2016-04-04 23:44:59 +03:00
struct pl08x_channel_data * cd ;
2012-05-16 14:02:40 +04:00
struct dma_slave_config cfg ;
2012-05-16 13:48:44 +04:00
struct pl08x_txd * at ;
struct pl08x_driver_data * host ;
enum pl08x_dma_chan_state state ;
bool slave ;
2012-05-25 14:15:15 +04:00
int signal ;
2012-05-25 14:32:45 +04:00
unsigned mux_use ;
2019-03-04 18:03:58 +03:00
unsigned long waiting_at ;
2012-05-16 13:48:44 +04:00
} ;
2010-09-28 17:57:37 +04:00
/**
* struct pl08x_driver_data - the local state holder for the PL08x
2017-05-21 00:42:52 +03:00
* @ slave : optional slave engine for this instance
2010-09-28 17:57:37 +04:00
* @ memcpy : memcpy engine for this instance
2017-05-21 00:42:52 +03:00
* @ has_slave : the PL08x has a slave engine ( routed signals )
2010-09-28 17:57:37 +04:00
* @ base : virtual memory base ( remapped ) for the PL08x
* @ adev : the corresponding AMBA ( PrimeCell ) bus entry
* @ vd : vendor data for this PL08x variant
* @ pd : platform data passed in from the platform / machine
* @ phy_chans : array of data for the physical channels
* @ pool : a pool for the LLI descriptors
2011-08-05 14:02:27 +04:00
* @ lli_buses : bitmask to or in to LLI pointer selecting AHB port for LLI
* fetches
2011-01-04 01:41:13 +03:00
* @ mem_buses : set to indicate memory transfers on AHB2 .
2017-04-02 17:50:44 +03:00
* @ lli_words : how many words are used in each LLI item for this variant
2010-09-28 17:57:37 +04:00
*/
struct pl08x_driver_data {
struct dma_device slave ;
struct dma_device memcpy ;
2017-05-21 00:42:52 +03:00
bool has_slave ;
2010-09-28 17:57:37 +04:00
void __iomem * base ;
struct amba_device * adev ;
2011-01-04 01:35:08 +03:00
const struct vendor_data * vd ;
2010-09-28 17:57:37 +04:00
struct pl08x_platform_data * pd ;
struct pl08x_phy_chan * phy_chans ;
struct dma_pool * pool ;
2011-01-04 01:41:13 +03:00
u8 lli_buses ;
u8 mem_buses ;
2013-08-11 21:59:15 +04:00
u8 lli_words ;
2010-09-28 17:57:37 +04:00
} ;
/*
* PL08X specific defines
*/
2013-08-11 21:59:15 +04:00
/* The order of words in an LLI. */
# define PL080_LLI_SRC 0
# define PL080_LLI_DST 1
# define PL080_LLI_LLI 2
# define PL080_LLI_CCTL 3
2013-08-11 21:59:17 +04:00
# define PL080S_LLI_CCTL2 4
2013-08-11 21:59:15 +04:00
/* Total words in an LLI. */
# define PL080_LLI_WORDS 4
2013-08-11 21:59:17 +04:00
# define PL080S_LLI_WORDS 8
2010-09-28 17:57:37 +04:00
2013-08-11 21:59:15 +04:00
/*
* Number of LLIs in each LLI buffer allocated for one transfer
* ( maximum times we call dma_pool_alloc on this pool without freeing )
*/
# define MAX_NUM_TSFR_LLIS 512
2010-09-28 17:57:37 +04:00
# define PL08X_ALIGN 8
static inline struct pl08x_dma_chan * to_pl08x_chan ( struct dma_chan * chan )
{
2012-05-26 17:04:29 +04:00
return container_of ( chan , struct pl08x_dma_chan , vc . chan ) ;
2010-09-28 17:57:37 +04:00
}
2011-01-04 01:44:57 +03:00
static inline struct pl08x_txd * to_pl08x_txd ( struct dma_async_tx_descriptor * tx )
{
2012-05-26 17:04:29 +04:00
return container_of ( tx , struct pl08x_txd , vd . tx ) ;
2011-01-04 01:44:57 +03:00
}
2012-05-25 14:10:58 +04:00
/*
* Mux handling .
*
* This gives us the DMA request input to the PL08x primecell which the
* peripheral described by the channel data will be routed to , possibly
* via a board / SoC specific external MUX . One important point to note
* here is that this does not depend on the physical channel .
*/
2012-05-25 14:15:15 +04:00
static int pl08x_request_mux ( struct pl08x_dma_chan * plchan )
2012-05-25 14:10:58 +04:00
{
const struct pl08x_platform_data * pd = plchan - > host - > pd ;
int ret ;
2013-06-19 23:38:28 +04:00
if ( plchan - > mux_use + + = = 0 & & pd - > get_xfer_signal ) {
ret = pd - > get_xfer_signal ( plchan - > cd ) ;
2012-05-25 14:32:45 +04:00
if ( ret < 0 ) {
plchan - > mux_use = 0 ;
2012-05-25 14:10:58 +04:00
return ret ;
2012-05-25 14:32:45 +04:00
}
2012-05-25 14:10:58 +04:00
2012-05-25 14:15:15 +04:00
plchan - > signal = ret ;
2012-05-25 14:10:58 +04:00
}
return 0 ;
}
static void pl08x_release_mux ( struct pl08x_dma_chan * plchan )
{
const struct pl08x_platform_data * pd = plchan - > host - > pd ;
2012-05-25 14:32:45 +04:00
if ( plchan - > signal > = 0 ) {
WARN_ON ( plchan - > mux_use = = 0 ) ;
2013-06-19 23:38:28 +04:00
if ( - - plchan - > mux_use = = 0 & & pd - > put_xfer_signal ) {
pd - > put_xfer_signal ( plchan - > cd , plchan - > signal ) ;
2012-05-25 14:32:45 +04:00
plchan - > signal = - 1 ;
}
2012-05-25 14:10:58 +04:00
}
}
2010-09-28 17:57:37 +04:00
/*
* Physical channel handling
*/
/* Whether a certain channel is busy or not */
static int pl08x_phy_channel_busy ( struct pl08x_phy_chan * ch )
{
unsigned int val ;
2017-05-21 00:42:53 +03:00
/* If we have a special busy register, take a shortcut */
if ( ch - > reg_busy ) {
val = readl ( ch - > reg_busy ) ;
return ! ! ( val & BIT ( ch - > id ) ) ;
}
2013-08-11 21:59:14 +04:00
val = readl ( ch - > reg_config ) ;
2010-09-28 17:57:37 +04:00
return val & PL080_CONFIG_ACTIVE ;
}
2017-05-21 00:42:53 +03:00
/*
* pl08x_write_lli ( ) - Write an LLI into the DMA controller .
*
* The PL08x derivatives support linked lists , but the first item of the
* list containing the source , destination , control word and next LLI is
* ignored . Instead the driver has to write those values directly into the
* SRC , DST , LLI and control registers . On FTDMAC020 also the SIZE
* register need to be set up for the first transfer .
*/
2013-08-11 21:59:15 +04:00
static void pl08x_write_lli ( struct pl08x_driver_data * pl08x ,
struct pl08x_phy_chan * phychan , const u32 * lli , u32 ccfg )
{
2013-08-11 21:59:17 +04:00
if ( pl08x - > vd - > pl080s )
dev_vdbg ( & pl08x - > adev - > dev ,
" WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
" clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x \n " ,
phychan - > id , lli [ PL080_LLI_SRC ] , lli [ PL080_LLI_DST ] ,
lli [ PL080_LLI_LLI ] , lli [ PL080_LLI_CCTL ] ,
lli [ PL080S_LLI_CCTL2 ] , ccfg ) ;
else
dev_vdbg ( & pl08x - > adev - > dev ,
" WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
" clli=0x%08x, cctl=0x%08x, ccfg=0x%08x \n " ,
phychan - > id , lli [ PL080_LLI_SRC ] , lli [ PL080_LLI_DST ] ,
lli [ PL080_LLI_LLI ] , lli [ PL080_LLI_CCTL ] , ccfg ) ;
2013-08-11 21:59:15 +04:00
2017-05-21 00:42:53 +03:00
writel_relaxed ( lli [ PL080_LLI_SRC ] , phychan - > reg_src ) ;
writel_relaxed ( lli [ PL080_LLI_DST ] , phychan - > reg_dst ) ;
writel_relaxed ( lli [ PL080_LLI_LLI ] , phychan - > reg_lli ) ;
2013-08-11 21:59:15 +04:00
2017-05-21 00:42:53 +03:00
/*
* The FTMAC020 has a different layout in the CCTL word of the LLI
* and the CCTL register which is split in CSR and SIZE registers .
* Convert the LLI item CCTL into the proper values to write into
* the CSR and SIZE registers .
*/
if ( phychan - > ftdmac020 ) {
u32 llictl = lli [ PL080_LLI_CCTL ] ;
u32 val = 0 ;
/* Write the transfer size (12 bits) to the size register */
writel_relaxed ( llictl & FTDMAC020_LLI_TRANSFER_SIZE_MASK ,
phychan - > base + FTDMAC020_CH_SIZE ) ;
/*
* Then write the control bits 28. .16 to the control register
* by shuffleing the bits around to where they are in the
* main register . The mapping is as follows :
* Bit 28 : TC_MSK - mask on all except last LLI
* Bit 27. .25 : SRC_WIDTH
* Bit 24. .22 : DST_WIDTH
* Bit 21. .20 : SRCAD_CTRL
* Bit 19. .17 : DSTAD_CTRL
* Bit 17 : SRC_SEL
* Bit 16 : DST_SEL
*/
if ( llictl & FTDMAC020_LLI_TC_MSK )
val | = FTDMAC020_CH_CSR_TC_MSK ;
val | = ( ( llictl & FTDMAC020_LLI_SRC_WIDTH_MSK ) > >
( FTDMAC020_LLI_SRC_WIDTH_SHIFT -
FTDMAC020_CH_CSR_SRC_WIDTH_SHIFT ) ) ;
val | = ( ( llictl & FTDMAC020_LLI_DST_WIDTH_MSK ) > >
( FTDMAC020_LLI_DST_WIDTH_SHIFT -
FTDMAC020_CH_CSR_DST_WIDTH_SHIFT ) ) ;
val | = ( ( llictl & FTDMAC020_LLI_SRCAD_CTL_MSK ) > >
( FTDMAC020_LLI_SRCAD_CTL_SHIFT -
FTDMAC020_CH_CSR_SRCAD_CTL_SHIFT ) ) ;
val | = ( ( llictl & FTDMAC020_LLI_DSTAD_CTL_MSK ) > >
( FTDMAC020_LLI_DSTAD_CTL_SHIFT -
FTDMAC020_CH_CSR_DSTAD_CTL_SHIFT ) ) ;
if ( llictl & FTDMAC020_LLI_SRC_SEL )
val | = FTDMAC020_CH_CSR_SRC_SEL ;
if ( llictl & FTDMAC020_LLI_DST_SEL )
val | = FTDMAC020_CH_CSR_DST_SEL ;
/*
* Set up the bits that exist in the CSR but are not
* part the LLI , i . e . only gets written to the control
* register right here .
*
* FIXME : do not just handle memcpy , also handle slave DMA .
*/
switch ( pl08x - > pd - > memcpy_burst_size ) {
default :
case PL08X_BURST_SZ_1 :
val | = PL080_BSIZE_1 < <
FTDMAC020_CH_CSR_SRC_SIZE_SHIFT ;
break ;
case PL08X_BURST_SZ_4 :
val | = PL080_BSIZE_4 < <
FTDMAC020_CH_CSR_SRC_SIZE_SHIFT ;
break ;
case PL08X_BURST_SZ_8 :
val | = PL080_BSIZE_8 < <
FTDMAC020_CH_CSR_SRC_SIZE_SHIFT ;
break ;
case PL08X_BURST_SZ_16 :
val | = PL080_BSIZE_16 < <
FTDMAC020_CH_CSR_SRC_SIZE_SHIFT ;
break ;
case PL08X_BURST_SZ_32 :
val | = PL080_BSIZE_32 < <
FTDMAC020_CH_CSR_SRC_SIZE_SHIFT ;
break ;
case PL08X_BURST_SZ_64 :
val | = PL080_BSIZE_64 < <
FTDMAC020_CH_CSR_SRC_SIZE_SHIFT ;
break ;
case PL08X_BURST_SZ_128 :
val | = PL080_BSIZE_128 < <
FTDMAC020_CH_CSR_SRC_SIZE_SHIFT ;
break ;
case PL08X_BURST_SZ_256 :
val | = PL080_BSIZE_256 < <
FTDMAC020_CH_CSR_SRC_SIZE_SHIFT ;
break ;
}
/* Protection flags */
if ( pl08x - > pd - > memcpy_prot_buff )
val | = FTDMAC020_CH_CSR_PROT2 ;
if ( pl08x - > pd - > memcpy_prot_cache )
val | = FTDMAC020_CH_CSR_PROT3 ;
/* We are the kernel, so we are in privileged mode */
val | = FTDMAC020_CH_CSR_PROT1 ;
writel_relaxed ( val , phychan - > reg_control ) ;
} else {
/* Bits are just identical */
writel_relaxed ( lli [ PL080_LLI_CCTL ] , phychan - > reg_control ) ;
}
/* Second control word on the PL080s */
2013-08-11 21:59:17 +04:00
if ( pl08x - > vd - > pl080s )
writel_relaxed ( lli [ PL080S_LLI_CCTL2 ] ,
phychan - > base + PL080S_CH_CONTROL2 ) ;
2013-08-11 21:59:15 +04:00
writel ( ccfg , phychan - > reg_config ) ;
}
2010-09-28 17:57:37 +04:00
/*
* Set the initial DMA register values i . e . those for the first LLI
2011-01-04 01:30:24 +03:00
* The next LLI pointer and the configuration interrupt bit have
2011-01-04 01:38:52 +03:00
* been set when the LLIs were constructed . Poke them into the hardware
* and start the transfer .
2010-09-28 17:57:37 +04:00
*/
2012-05-25 15:32:00 +04:00
static void pl08x_start_next_txd ( struct pl08x_dma_chan * plchan )
2010-09-28 17:57:37 +04:00
{
2011-01-04 01:38:52 +03:00
struct pl08x_driver_data * pl08x = plchan - > host ;
2010-09-28 17:57:37 +04:00
struct pl08x_phy_chan * phychan = plchan - > phychan ;
2012-05-26 17:27:40 +04:00
struct virt_dma_desc * vd = vchan_next_desc ( & plchan - > vc ) ;
struct pl08x_txd * txd = to_pl08x_txd ( & vd - > tx ) ;
2011-01-04 01:39:53 +03:00
u32 val ;
2011-01-04 01:38:52 +03:00
2012-05-26 17:27:40 +04:00
list_del ( & txd - > vd . node ) ;
2012-05-25 15:32:00 +04:00
2011-01-04 01:38:52 +03:00
plchan - > at = txd ;
2010-09-28 17:57:37 +04:00
2011-01-04 01:38:52 +03:00
/* Wait for channel inactive */
while ( pl08x_phy_channel_busy ( phychan ) )
cpu_relax ( ) ;
2010-09-28 17:57:37 +04:00
2013-08-11 21:59:15 +04:00
pl08x_write_lli ( pl08x , phychan , & txd - > llis_va [ 0 ] , txd - > ccfg ) ;
2011-01-04 01:38:52 +03:00
/* Enable the DMA channel */
/* Do not access config register until channel shows as disabled */
2017-04-02 17:50:53 +03:00
while ( readl ( pl08x - > base + PL080_EN_CHAN ) & BIT ( phychan - > id ) )
2011-01-04 01:36:29 +03:00
cpu_relax ( ) ;
2010-09-28 17:57:37 +04:00
2011-01-04 01:38:52 +03:00
/* Do not access config register until channel shows as inactive */
2017-05-21 00:42:53 +03:00
if ( phychan - > ftdmac020 ) {
val = readl ( phychan - > reg_config ) ;
while ( val & FTDMAC020_CH_CFG_BUSY )
val = readl ( phychan - > reg_config ) ;
val = readl ( phychan - > reg_control ) ;
while ( val & FTDMAC020_CH_CSR_EN )
val = readl ( phychan - > reg_control ) ;
writel ( val | FTDMAC020_CH_CSR_EN ,
phychan - > reg_control ) ;
} else {
2013-08-11 21:59:14 +04:00
val = readl ( phychan - > reg_config ) ;
2017-05-21 00:42:53 +03:00
while ( ( val & PL080_CONFIG_ACTIVE ) | |
( val & PL080_CONFIG_ENABLE ) )
val = readl ( phychan - > reg_config ) ;
2010-09-28 17:57:37 +04:00
2017-05-21 00:42:53 +03:00
writel ( val | PL080_CONFIG_ENABLE , phychan - > reg_config ) ;
}
2010-09-28 17:57:37 +04:00
}
/*
2011-01-27 15:37:44 +03:00
* Pause the channel by setting the HALT bit .
2010-09-28 17:57:37 +04:00
*
2011-01-27 15:37:44 +03:00
* For M - > P transfers , pause the DMAC first and then stop the peripheral -
* the FIFO can only drain if the peripheral is still requesting data .
* ( note : this can still timeout if the DMAC FIFO never drains of data . )
2010-09-28 17:57:37 +04:00
*
2011-01-27 15:37:44 +03:00
* For P - > M transfers , disable the peripheral first to stop it filling
* the DMAC FIFO , and then pause the DMAC .
2010-09-28 17:57:37 +04:00
*/
static void pl08x_pause_phy_chan ( struct pl08x_phy_chan * ch )
{
u32 val ;
2011-01-27 15:37:44 +03:00
int timeout ;
2010-09-28 17:57:37 +04:00
2017-05-21 00:42:53 +03:00
if ( ch - > ftdmac020 ) {
/* Use the enable bit on the FTDMAC020 */
val = readl ( ch - > reg_control ) ;
val & = ~ FTDMAC020_CH_CSR_EN ;
writel ( val , ch - > reg_control ) ;
return ;
}
2010-09-28 17:57:37 +04:00
/* Set the HALT bit and wait for the FIFO to drain */
2013-08-11 21:59:14 +04:00
val = readl ( ch - > reg_config ) ;
2010-09-28 17:57:37 +04:00
val | = PL080_CONFIG_HALT ;
2013-08-11 21:59:14 +04:00
writel ( val , ch - > reg_config ) ;
2010-09-28 17:57:37 +04:00
/* Wait for channel inactive */
2011-01-27 15:37:44 +03:00
for ( timeout = 1000 ; timeout ; timeout - - ) {
if ( ! pl08x_phy_channel_busy ( ch ) )
break ;
udelay ( 1 ) ;
}
if ( pl08x_phy_channel_busy ( ch ) )
pr_err ( " pl08x: channel%u timeout waiting for pause \n " , ch - > id ) ;
2010-09-28 17:57:37 +04:00
}
static void pl08x_resume_phy_chan ( struct pl08x_phy_chan * ch )
{
u32 val ;
2017-05-21 00:42:53 +03:00
/* Use the enable bit on the FTDMAC020 */
if ( ch - > ftdmac020 ) {
val = readl ( ch - > reg_control ) ;
val | = FTDMAC020_CH_CSR_EN ;
writel ( val , ch - > reg_control ) ;
return ;
}
2010-09-28 17:57:37 +04:00
/* Clear the HALT bit */
2013-08-11 21:59:14 +04:00
val = readl ( ch - > reg_config ) ;
2010-09-28 17:57:37 +04:00
val & = ~ PL080_CONFIG_HALT ;
2013-08-11 21:59:14 +04:00
writel ( val , ch - > reg_config ) ;
2010-09-28 17:57:37 +04:00
}
2011-01-27 15:32:53 +03:00
/*
* pl08x_terminate_phy_chan ( ) stops the channel , clears the FIFO and
* clears any pending interrupt status . This should not be used for
* an on - going transfer , but as a method of shutting down a channel
* ( eg , when it ' s no longer used ) or terminating a transfer .
*/
static void pl08x_terminate_phy_chan ( struct pl08x_driver_data * pl08x ,
struct pl08x_phy_chan * ch )
2010-09-28 17:57:37 +04:00
{
2017-05-21 00:42:53 +03:00
u32 val ;
/* The layout for the FTDMAC020 is different */
if ( ch - > ftdmac020 ) {
/* Disable all interrupts */
val = readl ( ch - > reg_config ) ;
val | = ( FTDMAC020_CH_CFG_INT_ABT_MASK |
FTDMAC020_CH_CFG_INT_ERR_MASK |
FTDMAC020_CH_CFG_INT_TC_MASK ) ;
writel ( val , ch - > reg_config ) ;
/* Abort and disable channel */
val = readl ( ch - > reg_control ) ;
val & = ~ FTDMAC020_CH_CSR_EN ;
val | = FTDMAC020_CH_CSR_ABT ;
writel ( val , ch - > reg_control ) ;
/* Clear ABT and ERR interrupt flags */
writel ( BIT ( ch - > id ) | BIT ( ch - > id + 16 ) ,
pl08x - > base + PL080_ERR_CLEAR ) ;
writel ( BIT ( ch - > id ) , pl08x - > base + PL080_TC_CLEAR ) ;
2010-09-28 17:57:37 +04:00
2017-05-21 00:42:53 +03:00
return ;
}
val = readl ( ch - > reg_config ) ;
2011-01-27 15:32:53 +03:00
val & = ~ ( PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
2015-05-06 02:40:52 +03:00
PL080_CONFIG_TC_IRQ_MASK ) ;
2013-08-11 21:59:14 +04:00
writel ( val , ch - > reg_config ) ;
2011-01-27 15:32:53 +03:00
2017-04-02 17:50:53 +03:00
writel ( BIT ( ch - > id ) , pl08x - > base + PL080_ERR_CLEAR ) ;
writel ( BIT ( ch - > id ) , pl08x - > base + PL080_TC_CLEAR ) ;
2010-09-28 17:57:37 +04:00
}
2017-05-21 00:42:53 +03:00
static u32 get_bytes_in_phy_channel ( struct pl08x_phy_chan * ch )
2010-09-28 17:57:37 +04:00
{
2017-05-21 00:42:53 +03:00
u32 val ;
u32 bytes ;
if ( ch - > ftdmac020 ) {
bytes = readl ( ch - > base + FTDMAC020_CH_SIZE ) ;
val = readl ( ch - > reg_control ) ;
val & = FTDMAC020_CH_CSR_SRC_WIDTH_MSK ;
val > > = FTDMAC020_CH_CSR_SRC_WIDTH_SHIFT ;
} else if ( ch - > pl080s ) {
val = readl ( ch - > base + PL080S_CH_CONTROL2 ) ;
bytes = val & PL080S_CONTROL_TRANSFER_SIZE_MASK ;
2010-09-28 17:57:37 +04:00
2017-05-21 00:42:53 +03:00
val = readl ( ch - > reg_control ) ;
val & = PL080_CONTROL_SWIDTH_MASK ;
val > > = PL080_CONTROL_SWIDTH_SHIFT ;
} else {
/* Plain PL08x */
val = readl ( ch - > reg_control ) ;
bytes = val & PL080_CONTROL_TRANSFER_SIZE_MASK ;
val & = PL080_CONTROL_SWIDTH_MASK ;
val > > = PL080_CONTROL_SWIDTH_SHIFT ;
}
2013-08-11 21:59:19 +04:00
2017-05-21 00:42:53 +03:00
switch ( val ) {
2010-09-28 17:57:37 +04:00
case PL080_WIDTH_8BIT :
break ;
case PL080_WIDTH_16BIT :
bytes * = 2 ;
break ;
case PL080_WIDTH_32BIT :
bytes * = 4 ;
break ;
}
return bytes ;
}
2017-05-21 00:42:53 +03:00
static u32 get_bytes_in_lli ( struct pl08x_phy_chan * ch , const u32 * llis_va )
2013-08-11 21:59:17 +04:00
{
2017-05-21 00:42:53 +03:00
u32 val ;
u32 bytes ;
if ( ch - > ftdmac020 ) {
val = llis_va [ PL080_LLI_CCTL ] ;
bytes = val & FTDMAC020_LLI_TRANSFER_SIZE_MASK ;
val = llis_va [ PL080_LLI_CCTL ] ;
val & = FTDMAC020_LLI_SRC_WIDTH_MSK ;
val > > = FTDMAC020_LLI_SRC_WIDTH_SHIFT ;
} else if ( ch - > pl080s ) {
val = llis_va [ PL080S_LLI_CCTL2 ] ;
bytes = val & PL080S_CONTROL_TRANSFER_SIZE_MASK ;
val = llis_va [ PL080_LLI_CCTL ] ;
val & = PL080_CONTROL_SWIDTH_MASK ;
val > > = PL080_CONTROL_SWIDTH_SHIFT ;
} else {
/* Plain PL08x */
val = llis_va [ PL080_LLI_CCTL ] ;
bytes = val & PL080_CONTROL_TRANSFER_SIZE_MASK ;
2013-08-11 21:59:17 +04:00
2017-05-21 00:42:53 +03:00
val & = PL080_CONTROL_SWIDTH_MASK ;
val > > = PL080_CONTROL_SWIDTH_SHIFT ;
}
2013-08-11 21:59:19 +04:00
2017-05-21 00:42:53 +03:00
switch ( val ) {
2010-09-28 17:57:37 +04:00
case PL080_WIDTH_8BIT :
break ;
case PL080_WIDTH_16BIT :
bytes * = 2 ;
break ;
case PL080_WIDTH_32BIT :
bytes * = 4 ;
break ;
}
return bytes ;
}
/* The channel should be paused when calling this */
static u32 pl08x_getbytes_chan ( struct pl08x_dma_chan * plchan )
{
2013-08-11 21:59:15 +04:00
struct pl08x_driver_data * pl08x = plchan - > host ;
const u32 * llis_va , * llis_va_limit ;
2010-09-28 17:57:37 +04:00
struct pl08x_phy_chan * ch ;
2013-08-11 21:59:13 +04:00
dma_addr_t llis_bus ;
2010-09-28 17:57:37 +04:00
struct pl08x_txd * txd ;
2013-08-11 21:59:15 +04:00
u32 llis_max_words ;
2013-08-11 21:59:13 +04:00
size_t bytes ;
u32 clli ;
2010-09-28 17:57:37 +04:00
ch = plchan - > phychan ;
txd = plchan - > at ;
2013-08-11 21:59:13 +04:00
if ( ! ch | | ! txd )
return 0 ;
2010-09-28 17:57:37 +04:00
/*
2011-01-04 01:38:32 +03:00
* Follow the LLIs to get the number of remaining
* bytes in the currently active transaction .
2010-09-28 17:57:37 +04:00
*/
2017-05-21 00:42:53 +03:00
clli = readl ( ch - > reg_lli ) & ~ PL080_LLI_LM_AHB2 ;
2010-09-28 17:57:37 +04:00
2013-08-11 21:59:13 +04:00
/* First get the remaining bytes in the active transfer */
2017-05-21 00:42:53 +03:00
bytes = get_bytes_in_phy_channel ( ch ) ;
2010-09-28 17:57:37 +04:00
2013-08-11 21:59:13 +04:00
if ( ! clli )
return bytes ;
2011-01-04 01:38:32 +03:00
2013-08-11 21:59:13 +04:00
llis_va = txd - > llis_va ;
llis_bus = txd - > llis_bus ;
2010-09-28 17:57:37 +04:00
2013-08-11 21:59:15 +04:00
llis_max_words = pl08x - > lli_words * MAX_NUM_TSFR_LLIS ;
2013-08-11 21:59:13 +04:00
BUG_ON ( clli < llis_bus | | clli > = llis_bus +
2013-08-11 21:59:15 +04:00
sizeof ( u32 ) * llis_max_words ) ;
2011-01-04 01:38:32 +03:00
2013-08-11 21:59:13 +04:00
/*
* Locate the next LLI - as this is an array ,
* it ' s simple maths to find .
*/
2013-08-11 21:59:15 +04:00
llis_va + = ( clli - llis_bus ) / sizeof ( u32 ) ;
2010-09-28 17:57:37 +04:00
2013-08-11 21:59:15 +04:00
llis_va_limit = llis_va + llis_max_words ;
for ( ; llis_va < llis_va_limit ; llis_va + = pl08x - > lli_words ) {
2017-05-21 00:42:53 +03:00
bytes + = get_bytes_in_lli ( ch , llis_va ) ;
2013-08-11 21:59:13 +04:00
/*
2013-08-11 21:59:20 +04:00
* A LLI pointer going backward terminates the LLI list
2013-08-11 21:59:13 +04:00
*/
2013-08-11 21:59:20 +04:00
if ( llis_va [ PL080_LLI_LLI ] < = clli )
2013-08-11 21:59:13 +04:00
break ;
2010-09-28 17:57:37 +04:00
}
return bytes ;
}
/*
* Allocate a physical channel for a virtual channel
2011-01-16 23:18:05 +03:00
*
* Try to locate a physical channel to be used for this transfer . If all
* are taken return NULL and the requester will have to cope by using
* some fallback PIO mode or retrying later .
2010-09-28 17:57:37 +04:00
*/
static struct pl08x_phy_chan *
pl08x_get_phy_channel ( struct pl08x_driver_data * pl08x ,
struct pl08x_dma_chan * virt_chan )
{
struct pl08x_phy_chan * ch = NULL ;
unsigned long flags ;
int i ;
for ( i = 0 ; i < pl08x - > vd - > channels ; i + + ) {
ch = & pl08x - > phy_chans [ i ] ;
spin_lock_irqsave ( & ch - > lock , flags ) ;
2012-04-12 11:01:49 +04:00
if ( ! ch - > locked & & ! ch - > serving ) {
2010-09-28 17:57:37 +04:00
ch - > serving = virt_chan ;
spin_unlock_irqrestore ( & ch - > lock , flags ) ;
break ;
}
spin_unlock_irqrestore ( & ch - > lock , flags ) ;
}
if ( i = = pl08x - > vd - > channels ) {
/* No physical channel available, cope with it */
return NULL ;
}
return ch ;
}
2012-05-26 16:54:15 +04:00
/* Mark the physical channel as free. Note, this write is atomic. */
2010-09-28 17:57:37 +04:00
static inline void pl08x_put_phy_channel ( struct pl08x_driver_data * pl08x ,
struct pl08x_phy_chan * ch )
{
2012-05-26 16:54:15 +04:00
ch - > serving = NULL ;
}
2010-09-28 17:57:37 +04:00
2012-05-26 16:54:15 +04:00
/*
* Try to allocate a physical channel . When successful , assign it to
* this virtual channel , and initiate the next descriptor . The
* virtual channel lock must be held at this point .
*/
static void pl08x_phy_alloc_and_start ( struct pl08x_dma_chan * plchan )
{
struct pl08x_driver_data * pl08x = plchan - > host ;
struct pl08x_phy_chan * ch ;
2011-01-27 15:32:53 +03:00
2012-05-26 16:54:15 +04:00
ch = pl08x_get_phy_channel ( pl08x , plchan ) ;
if ( ! ch ) {
dev_dbg ( & pl08x - > adev - > dev , " no physical channel available for xfer on %s \n " , plchan - > name ) ;
plchan - > state = PL08X_CHAN_WAITING ;
2019-03-04 18:03:58 +03:00
plchan - > waiting_at = jiffies ;
2012-05-26 16:54:15 +04:00
return ;
}
2010-09-28 17:57:37 +04:00
2012-05-26 16:54:15 +04:00
dev_dbg ( & pl08x - > adev - > dev , " allocated physical channel %d for xfer on %s \n " ,
ch - > id , plchan - > name ) ;
plchan - > phychan = ch ;
plchan - > state = PL08X_CHAN_RUNNING ;
pl08x_start_next_txd ( plchan ) ;
}
static void pl08x_phy_reassign_start ( struct pl08x_phy_chan * ch ,
struct pl08x_dma_chan * plchan )
{
struct pl08x_driver_data * pl08x = plchan - > host ;
dev_dbg ( & pl08x - > adev - > dev , " reassigned physical channel %d for xfer on %s \n " ,
ch - > id , plchan - > name ) ;
/*
* We do this without taking the lock ; we ' re really only concerned
* about whether this pointer is NULL or not , and we ' re guaranteed
* that this will only be called when it _already_ is non - NULL .
*/
ch - > serving = plchan ;
plchan - > phychan = ch ;
plchan - > state = PL08X_CHAN_RUNNING ;
pl08x_start_next_txd ( plchan ) ;
}
/*
* Free a physical DMA channel , potentially reallocating it to another
* virtual channel if we have any pending .
*/
static void pl08x_phy_free ( struct pl08x_dma_chan * plchan )
{
struct pl08x_driver_data * pl08x = plchan - > host ;
struct pl08x_dma_chan * p , * next ;
2019-03-04 18:03:58 +03:00
unsigned long waiting_at ;
2012-05-26 16:54:15 +04:00
retry :
next = NULL ;
2019-03-04 18:03:58 +03:00
waiting_at = jiffies ;
2012-05-26 16:54:15 +04:00
2019-03-04 18:03:58 +03:00
/*
* Find a waiting virtual channel for the next transfer .
* To be fair , time when each channel reached waiting state is compared
* to select channel that is waiting for the longest time .
*/
2012-05-26 17:04:29 +04:00
list_for_each_entry ( p , & pl08x - > memcpy . channels , vc . chan . device_node )
2019-03-04 18:03:58 +03:00
if ( p - > state = = PL08X_CHAN_WAITING & &
p - > waiting_at < = waiting_at ) {
2012-05-26 16:54:15 +04:00
next = p ;
2019-03-04 18:03:58 +03:00
waiting_at = p - > waiting_at ;
2012-05-26 16:54:15 +04:00
}
2017-05-21 00:42:52 +03:00
if ( ! next & & pl08x - > has_slave ) {
2012-05-26 17:04:29 +04:00
list_for_each_entry ( p , & pl08x - > slave . channels , vc . chan . device_node )
2019-03-04 18:03:58 +03:00
if ( p - > state = = PL08X_CHAN_WAITING & &
p - > waiting_at < = waiting_at ) {
2012-05-26 16:54:15 +04:00
next = p ;
2019-03-04 18:03:58 +03:00
waiting_at = p - > waiting_at ;
2012-05-26 16:54:15 +04:00
}
}
/* Ensure that the physical channel is stopped */
pl08x_terminate_phy_chan ( pl08x , plchan - > phychan ) ;
if ( next ) {
bool success ;
/*
* Eww . We know this isn ' t going to deadlock
* but lockdep probably doesn ' t .
*/
2012-05-26 17:09:53 +04:00
spin_lock ( & next - > vc . lock ) ;
2012-05-26 16:54:15 +04:00
/* Re-check the state now that we have the lock */
success = next - > state = = PL08X_CHAN_WAITING ;
if ( success )
pl08x_phy_reassign_start ( plchan - > phychan , next ) ;
2012-05-26 17:09:53 +04:00
spin_unlock ( & next - > vc . lock ) ;
2012-05-26 16:54:15 +04:00
/* If the state changed, try to find another channel */
if ( ! success )
goto retry ;
} else {
/* No more jobs, so free up the physical channel */
pl08x_put_phy_channel ( pl08x , plchan - > phychan ) ;
}
plchan - > phychan = NULL ;
plchan - > state = PL08X_CHAN_IDLE ;
2010-09-28 17:57:37 +04:00
}
/*
* LLI handling
*/
2017-05-21 00:42:53 +03:00
static inline unsigned int
pl08x_get_bytes_for_lli ( struct pl08x_driver_data * pl08x ,
u32 cctl ,
bool source )
2010-09-28 17:57:37 +04:00
{
2017-05-21 00:42:53 +03:00
u32 val ;
if ( pl08x - > vd - > ftdmac020 ) {
if ( source )
val = ( cctl & FTDMAC020_LLI_SRC_WIDTH_MSK ) > >
FTDMAC020_LLI_SRC_WIDTH_SHIFT ;
else
val = ( cctl & FTDMAC020_LLI_DST_WIDTH_MSK ) > >
FTDMAC020_LLI_DST_WIDTH_SHIFT ;
} else {
if ( source )
val = ( cctl & PL080_CONTROL_SWIDTH_MASK ) > >
PL080_CONTROL_SWIDTH_SHIFT ;
else
val = ( cctl & PL080_CONTROL_DWIDTH_MASK ) > >
PL080_CONTROL_DWIDTH_SHIFT ;
}
switch ( val ) {
2010-09-28 17:57:37 +04:00
case PL080_WIDTH_8BIT :
return 1 ;
case PL080_WIDTH_16BIT :
return 2 ;
case PL080_WIDTH_32BIT :
return 4 ;
default :
break ;
}
BUG ( ) ;
return 0 ;
}
2017-05-21 00:42:53 +03:00
static inline u32 pl08x_lli_control_bits ( struct pl08x_driver_data * pl08x ,
u32 cctl ,
u8 srcwidth , u8 dstwidth ,
size_t tsize )
2010-09-28 17:57:37 +04:00
{
u32 retbits = cctl ;
2017-05-21 00:42:53 +03:00
/*
* Remove all src , dst and transfer size bits , then set the
* width and size according to the parameters . The bit offsets
* are different in the FTDMAC020 so we need to accound for this .
*/
if ( pl08x - > vd - > ftdmac020 ) {
retbits & = ~ FTDMAC020_LLI_DST_WIDTH_MSK ;
retbits & = ~ FTDMAC020_LLI_SRC_WIDTH_MSK ;
retbits & = ~ FTDMAC020_LLI_TRANSFER_SIZE_MASK ;
switch ( srcwidth ) {
case 1 :
retbits | = PL080_WIDTH_8BIT < <
FTDMAC020_LLI_SRC_WIDTH_SHIFT ;
break ;
case 2 :
retbits | = PL080_WIDTH_16BIT < <
FTDMAC020_LLI_SRC_WIDTH_SHIFT ;
break ;
case 4 :
retbits | = PL080_WIDTH_32BIT < <
FTDMAC020_LLI_SRC_WIDTH_SHIFT ;
break ;
default :
BUG ( ) ;
break ;
}
2010-09-28 17:57:37 +04:00
2017-05-21 00:42:53 +03:00
switch ( dstwidth ) {
case 1 :
retbits | = PL080_WIDTH_8BIT < <
FTDMAC020_LLI_DST_WIDTH_SHIFT ;
break ;
case 2 :
retbits | = PL080_WIDTH_16BIT < <
FTDMAC020_LLI_DST_WIDTH_SHIFT ;
break ;
case 4 :
retbits | = PL080_WIDTH_32BIT < <
FTDMAC020_LLI_DST_WIDTH_SHIFT ;
break ;
default :
BUG ( ) ;
break ;
}
2010-09-28 17:57:37 +04:00
2017-05-21 00:42:53 +03:00
tsize & = FTDMAC020_LLI_TRANSFER_SIZE_MASK ;
retbits | = tsize < < FTDMAC020_LLI_TRANSFER_SIZE_SHIFT ;
} else {
retbits & = ~ PL080_CONTROL_DWIDTH_MASK ;
retbits & = ~ PL080_CONTROL_SWIDTH_MASK ;
retbits & = ~ PL080_CONTROL_TRANSFER_SIZE_MASK ;
switch ( srcwidth ) {
case 1 :
retbits | = PL080_WIDTH_8BIT < <
PL080_CONTROL_SWIDTH_SHIFT ;
break ;
case 2 :
retbits | = PL080_WIDTH_16BIT < <
PL080_CONTROL_SWIDTH_SHIFT ;
break ;
case 4 :
retbits | = PL080_WIDTH_32BIT < <
PL080_CONTROL_SWIDTH_SHIFT ;
break ;
default :
BUG ( ) ;
break ;
}
switch ( dstwidth ) {
case 1 :
retbits | = PL080_WIDTH_8BIT < <
PL080_CONTROL_DWIDTH_SHIFT ;
break ;
case 2 :
retbits | = PL080_WIDTH_16BIT < <
PL080_CONTROL_DWIDTH_SHIFT ;
break ;
case 4 :
retbits | = PL080_WIDTH_32BIT < <
PL080_CONTROL_DWIDTH_SHIFT ;
break ;
default :
BUG ( ) ;
break ;
}
tsize & = PL080_CONTROL_TRANSFER_SIZE_MASK ;
retbits | = tsize < < PL080_CONTROL_TRANSFER_SIZE_SHIFT ;
2010-09-28 17:57:37 +04:00
}
return retbits ;
}
2011-01-04 01:43:15 +03:00
struct pl08x_lli_build_data {
struct pl08x_txd * txd ;
struct pl08x_bus_data srcbus ;
struct pl08x_bus_data dstbus ;
size_t remainder ;
2011-07-21 20:11:46 +04:00
u32 lli_bus ;
2011-01-04 01:43:15 +03:00
} ;
2010-09-28 17:57:37 +04:00
/*
2011-08-05 14:02:31 +04:00
* Autoselect a master bus to use for the transfer . Slave will be the chosen as
* victim in case src & dest are not similarly aligned . i . e . If after aligning
* masters address with width requirements of transfer ( by sending few byte by
* byte data ) , slave is still not aligned , then its width will be reduced to
* BYTE .
* - prefers the destination bus if both available
2011-08-05 14:02:41 +04:00
* - prefers bus with fixed address ( i . e . peripheral )
2010-09-28 17:57:37 +04:00
*/
2017-05-21 00:42:53 +03:00
static void pl08x_choose_master_bus ( struct pl08x_driver_data * pl08x ,
struct pl08x_lli_build_data * bd ,
struct pl08x_bus_data * * mbus ,
struct pl08x_bus_data * * sbus ,
u32 cctl )
2010-09-28 17:57:37 +04:00
{
2017-05-21 00:42:53 +03:00
bool dst_incr ;
bool src_incr ;
/*
* The FTDMAC020 only supports memory - to - memory transfer , so
* source and destination always increase .
*/
if ( pl08x - > vd - > ftdmac020 ) {
dst_incr = true ;
src_incr = true ;
} else {
dst_incr = ! ! ( cctl & PL080_CONTROL_DST_INCR ) ;
src_incr = ! ! ( cctl & PL080_CONTROL_SRC_INCR ) ;
}
/*
* If either bus is not advancing , i . e . it is a peripheral , that
* one becomes master
*/
if ( ! dst_incr ) {
2011-01-04 01:43:15 +03:00
* mbus = & bd - > dstbus ;
* sbus = & bd - > srcbus ;
2017-05-21 00:42:53 +03:00
} else if ( ! src_incr ) {
2011-08-05 14:02:41 +04:00
* mbus = & bd - > srcbus ;
* sbus = & bd - > dstbus ;
2010-09-28 17:57:37 +04:00
} else {
2011-08-05 14:02:41 +04:00
if ( bd - > dstbus . buswidth > = bd - > srcbus . buswidth ) {
2011-01-04 01:43:15 +03:00
* mbus = & bd - > dstbus ;
* sbus = & bd - > srcbus ;
2011-08-05 14:02:41 +04:00
} else {
2011-01-04 01:43:15 +03:00
* mbus = & bd - > srcbus ;
* sbus = & bd - > dstbus ;
2010-09-28 17:57:37 +04:00
}
}
}
/*
2011-01-16 23:18:05 +03:00
* Fills in one LLI for a certain transfer descriptor and advance the counter
2010-09-28 17:57:37 +04:00
*/
2013-08-11 21:59:15 +04:00
static void pl08x_fill_lli_for_desc ( struct pl08x_driver_data * pl08x ,
struct pl08x_lli_build_data * bd ,
2013-08-11 21:59:17 +04:00
int num_llis , int len , u32 cctl , u32 cctl2 )
2010-09-28 17:57:37 +04:00
{
2013-08-11 21:59:15 +04:00
u32 offset = num_llis * pl08x - > lli_words ;
u32 * llis_va = bd - > txd - > llis_va + offset ;
2011-01-04 01:43:15 +03:00
dma_addr_t llis_bus = bd - > txd - > llis_bus ;
2010-09-28 17:57:37 +04:00
BUG_ON ( num_llis > = MAX_NUM_TSFR_LLIS ) ;
2013-08-11 21:59:15 +04:00
/* Advance the offset to next LLI. */
offset + = pl08x - > lli_words ;
llis_va [ PL080_LLI_SRC ] = bd - > srcbus . addr ;
llis_va [ PL080_LLI_DST ] = bd - > dstbus . addr ;
llis_va [ PL080_LLI_LLI ] = ( llis_bus + sizeof ( u32 ) * offset ) ;
llis_va [ PL080_LLI_LLI ] | = bd - > lli_bus ;
llis_va [ PL080_LLI_CCTL ] = cctl ;
2013-08-11 21:59:17 +04:00
if ( pl08x - > vd - > pl080s )
llis_va [ PL080S_LLI_CCTL2 ] = cctl2 ;
2010-09-28 17:57:37 +04:00
2017-05-21 00:42:53 +03:00
if ( pl08x - > vd - > ftdmac020 ) {
/* FIXME: only memcpy so far so both increase */
2011-01-04 01:43:15 +03:00
bd - > srcbus . addr + = len ;
bd - > dstbus . addr + = len ;
2017-05-21 00:42:53 +03:00
} else {
if ( cctl & PL080_CONTROL_SRC_INCR )
bd - > srcbus . addr + = len ;
if ( cctl & PL080_CONTROL_DST_INCR )
bd - > dstbus . addr + = len ;
}
2010-09-28 17:57:37 +04:00
2011-01-04 01:43:15 +03:00
BUG_ON ( bd - > remainder < len ) ;
2011-01-04 01:37:31 +03:00
2011-01-04 01:43:15 +03:00
bd - > remainder - = len ;
2010-09-28 17:57:37 +04:00
}
2013-08-11 21:59:15 +04:00
static inline void prep_byte_width_lli ( struct pl08x_driver_data * pl08x ,
struct pl08x_lli_build_data * bd , u32 * cctl , u32 len ,
int num_llis , size_t * total_bytes )
2010-09-28 17:57:37 +04:00
{
2017-05-21 00:42:53 +03:00
* cctl = pl08x_lli_control_bits ( pl08x , * cctl , 1 , 1 , len ) ;
2013-08-11 21:59:17 +04:00
pl08x_fill_lli_for_desc ( pl08x , bd , num_llis , len , * cctl , len ) ;
2011-08-05 14:02:39 +04:00
( * total_bytes ) + = len ;
2010-09-28 17:57:37 +04:00
}
2017-05-21 00:42:53 +03:00
# if 1
2013-08-11 21:59:16 +04:00
static void pl08x_dump_lli ( struct pl08x_driver_data * pl08x ,
const u32 * llis_va , int num_llis )
{
int i ;
2013-08-11 21:59:17 +04:00
if ( pl08x - > vd - > pl080s ) {
2013-08-11 21:59:16 +04:00
dev_vdbg ( & pl08x - > adev - > dev ,
2013-08-11 21:59:17 +04:00
" %-3s %-9s %-10s %-10s %-10s %-10s %s \n " ,
" lli " , " " , " csrc " , " cdst " , " clli " , " cctl " , " cctl2 " ) ;
for ( i = 0 ; i < num_llis ; i + + ) {
dev_vdbg ( & pl08x - > adev - > dev ,
" %3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x \n " ,
i , llis_va , llis_va [ PL080_LLI_SRC ] ,
llis_va [ PL080_LLI_DST ] , llis_va [ PL080_LLI_LLI ] ,
llis_va [ PL080_LLI_CCTL ] ,
llis_va [ PL080S_LLI_CCTL2 ] ) ;
llis_va + = pl08x - > lli_words ;
}
} else {
dev_vdbg ( & pl08x - > adev - > dev ,
" %-3s %-9s %-10s %-10s %-10s %s \n " ,
" lli " , " " , " csrc " , " cdst " , " clli " , " cctl " ) ;
for ( i = 0 ; i < num_llis ; i + + ) {
dev_vdbg ( & pl08x - > adev - > dev ,
" %3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x \n " ,
i , llis_va , llis_va [ PL080_LLI_SRC ] ,
llis_va [ PL080_LLI_DST ] , llis_va [ PL080_LLI_LLI ] ,
llis_va [ PL080_LLI_CCTL ] ) ;
llis_va + = pl08x - > lli_words ;
}
2013-08-11 21:59:16 +04:00
}
}
# else
static inline void pl08x_dump_lli ( struct pl08x_driver_data * pl08x ,
const u32 * llis_va , int num_llis ) { }
# endif
2010-09-28 17:57:37 +04:00
/*
* This fills in the table of LLIs for the transfer descriptor
* Note that we assume we never have to change the burst sizes
* Return 0 for error
*/
static int pl08x_fill_llis_for_desc ( struct pl08x_driver_data * pl08x ,
struct pl08x_txd * txd )
{
struct pl08x_bus_data * mbus , * sbus ;
2011-01-04 01:43:15 +03:00
struct pl08x_lli_build_data bd ;
2010-09-28 17:57:37 +04:00
int num_llis = 0 ;
2011-08-05 14:02:39 +04:00
u32 cctl , early_bytes = 0 ;
2011-08-05 14:02:43 +04:00
size_t max_bytes_per_lli , total_bytes ;
2013-08-11 21:59:15 +04:00
u32 * llis_va , * last_lli ;
2011-08-05 14:02:43 +04:00
struct pl08x_sg * dsg ;
2010-09-28 17:57:37 +04:00
2011-08-05 14:02:27 +04:00
txd - > llis_va = dma_pool_alloc ( pl08x - > pool , GFP_NOWAIT , & txd - > llis_bus ) ;
2010-09-28 17:57:37 +04:00
if ( ! txd - > llis_va ) {
dev_err ( & pl08x - > adev - > dev , " %s no memory for llis \n " , __func__ ) ;
return 0 ;
}
2011-01-04 01:43:15 +03:00
bd . txd = txd ;
2011-07-21 20:11:46 +04:00
bd . lli_bus = ( pl08x - > lli_buses & PL08X_AHB2 ) ? PL080_LLI_LM_AHB2 : 0 ;
2011-08-05 14:02:43 +04:00
cctl = txd - > cctl ;
2011-01-04 01:43:15 +03:00
2010-09-28 17:57:37 +04:00
/* Find maximum width of the source bus */
2017-05-21 00:42:53 +03:00
bd . srcbus . maxwidth = pl08x_get_bytes_for_lli ( pl08x , cctl , true ) ;
2010-09-28 17:57:37 +04:00
/* Find maximum width of the destination bus */
2017-05-21 00:42:53 +03:00
bd . dstbus . maxwidth = pl08x_get_bytes_for_lli ( pl08x , cctl , false ) ;
2010-09-28 17:57:37 +04:00
2011-08-05 14:02:43 +04:00
list_for_each_entry ( dsg , & txd - > dsg_list , node ) {
total_bytes = 0 ;
cctl = txd - > cctl ;
2010-09-28 17:57:37 +04:00
2011-08-05 14:02:43 +04:00
bd . srcbus . addr = dsg - > src_addr ;
bd . dstbus . addr = dsg - > dst_addr ;
bd . remainder = dsg - > len ;
bd . srcbus . buswidth = bd . srcbus . maxwidth ;
bd . dstbus . buswidth = bd . dstbus . maxwidth ;
2010-09-28 17:57:37 +04:00
2017-05-21 00:42:53 +03:00
pl08x_choose_master_bus ( pl08x , & bd , & mbus , & sbus , cctl ) ;
2010-09-28 17:57:37 +04:00
2013-08-14 16:52:09 +04:00
dev_vdbg ( & pl08x - > adev - > dev ,
" src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu \n " ,
( u64 ) bd . srcbus . addr ,
cctl & PL080_CONTROL_SRC_INCR ? " + " : " " ,
2011-08-05 14:02:43 +04:00
bd . srcbus . buswidth ,
2013-08-14 16:52:09 +04:00
( u64 ) bd . dstbus . addr ,
cctl & PL080_CONTROL_DST_INCR ? " + " : " " ,
2011-08-05 14:02:43 +04:00
bd . dstbus . buswidth ,
bd . remainder ) ;
dev_vdbg ( & pl08x - > adev - > dev , " mbus=%s sbus=%s \n " ,
mbus = = & bd . srcbus ? " src " : " dst " ,
sbus = = & bd . srcbus ? " src " : " dst " ) ;
2011-07-21 20:12:06 +04:00
2011-08-05 14:02:43 +04:00
/*
* Zero length is only allowed if all these requirements are
* met :
* - flow controller is peripheral .
* - src . addr is aligned to src . width
* - dst . addr is aligned to dst . width
*
* sg_len = = 1 should be true , as there can be two cases here :
*
* - Memory addresses are contiguous and are not scattered .
* Here , Only one sg will be passed by user driver , with
* memory address and zero length . We pass this to controller
* and after the transfer it will receive the last burst
* request from peripheral and so transfer finishes .
*
* - Memory addresses are scattered and are not contiguous .
* Here , Obviously as DMA controller doesn ' t know when a lli ' s
* transfer gets over , it can ' t load next lli . So in this
* case , there has to be an assumption that only one lli is
* supported . Thus , we can ' t have scattered addresses .
*/
if ( ! bd . remainder ) {
2017-05-21 00:42:53 +03:00
u32 fc ;
/* FTDMAC020 only does memory-to-memory */
if ( pl08x - > vd - > ftdmac020 )
fc = PL080_FLOW_MEM2MEM ;
else
fc = ( txd - > ccfg & PL080_CONFIG_FLOW_CONTROL_MASK ) > >
PL080_CONFIG_FLOW_CONTROL_SHIFT ;
2011-08-05 14:02:43 +04:00
if ( ! ( ( fc > = PL080_FLOW_SRC2DST_DST ) & &
2011-08-05 14:02:42 +04:00
( fc < = PL080_FLOW_SRC2DST_SRC ) ) ) {
2011-08-05 14:02:43 +04:00
dev_err ( & pl08x - > adev - > dev , " %s sg len can't be zero " ,
__func__ ) ;
return 0 ;
}
2011-08-05 14:02:42 +04:00
2013-08-19 14:19:28 +04:00
if ( ! IS_BUS_ALIGNED ( & bd . srcbus ) | |
! IS_BUS_ALIGNED ( & bd . dstbus ) ) {
2011-08-05 14:02:43 +04:00
dev_err ( & pl08x - > adev - > dev ,
" %s src & dst address must be aligned to src "
" & dst width if peripheral is flow controller " ,
__func__ ) ;
return 0 ;
}
2011-08-05 14:02:39 +04:00
2017-05-21 00:42:53 +03:00
cctl = pl08x_lli_control_bits ( pl08x , cctl ,
bd . srcbus . buswidth , bd . dstbus . buswidth ,
0 ) ;
2013-08-11 21:59:15 +04:00
pl08x_fill_lli_for_desc ( pl08x , & bd , num_llis + + ,
2013-08-11 21:59:17 +04:00
0 , cctl , 0 ) ;
2011-08-05 14:02:43 +04:00
break ;
}
2010-09-28 17:57:37 +04:00
/*
2011-08-05 14:02:43 +04:00
* Send byte by byte for following cases
* - Less than a bus width available
* - until master bus is aligned
2010-09-28 17:57:37 +04:00
*/
2011-08-05 14:02:43 +04:00
if ( bd . remainder < mbus - > buswidth )
early_bytes = bd . remainder ;
2013-08-19 14:19:28 +04:00
else if ( ! IS_BUS_ALIGNED ( mbus ) ) {
early_bytes = mbus - > buswidth -
( mbus - > addr & ( mbus - > buswidth - 1 ) ) ;
2011-08-05 14:02:43 +04:00
if ( ( bd . remainder - early_bytes ) < mbus - > buswidth )
early_bytes = bd . remainder ;
}
2010-09-28 17:57:37 +04:00
2011-08-05 14:02:43 +04:00
if ( early_bytes ) {
dev_vdbg ( & pl08x - > adev - > dev ,
2014-08-01 21:09:48 +04:00
" %s byte width LLIs (remain 0x%08zx) \n " ,
2011-08-05 14:02:43 +04:00
__func__ , bd . remainder ) ;
2013-08-11 21:59:15 +04:00
prep_byte_width_lli ( pl08x , & bd , & cctl , early_bytes ,
num_llis + + , & total_bytes ) ;
2010-09-28 17:57:37 +04:00
}
2011-08-05 14:02:43 +04:00
if ( bd . remainder ) {
/*
* Master now aligned
* - if slave is not then we must set its width down
*/
2013-08-19 14:19:28 +04:00
if ( ! IS_BUS_ALIGNED ( sbus ) ) {
2011-08-05 14:02:43 +04:00
dev_dbg ( & pl08x - > adev - > dev ,
" %s set down bus width to one byte \n " ,
__func__ ) ;
2011-08-05 14:02:38 +04:00
2011-08-05 14:02:43 +04:00
sbus - > buswidth = 1 ;
}
2010-09-28 17:57:37 +04:00
/*
2011-08-05 14:02:43 +04:00
* Bytes transferred = tsize * src width , not
* MIN ( buswidths )
2010-09-28 17:57:37 +04:00
*/
2011-08-05 14:02:43 +04:00
max_bytes_per_lli = bd . srcbus . buswidth *
2013-08-11 21:59:18 +04:00
pl08x - > vd - > max_transfer_size ;
2011-08-05 14:02:43 +04:00
dev_vdbg ( & pl08x - > adev - > dev ,
" %s max bytes per lli = %zu \n " ,
__func__ , max_bytes_per_lli ) ;
2010-09-28 17:57:37 +04:00
/*
2011-08-05 14:02:43 +04:00
* Make largest possible LLIs until less than one bus
* width left
2010-09-28 17:57:37 +04:00
*/
2011-08-05 14:02:43 +04:00
while ( bd . remainder > ( mbus - > buswidth - 1 ) ) {
size_t lli_len , tsize , width ;
2010-09-28 17:57:37 +04:00
2011-08-05 14:02:43 +04:00
/*
* If enough left try to send max possible ,
* otherwise try to send the remainder
*/
lli_len = min ( bd . remainder , max_bytes_per_lli ) ;
2011-08-05 14:02:37 +04:00
2011-08-05 14:02:43 +04:00
/*
* Check against maximum bus alignment :
* Calculate actual transfer size in relation to
* bus width an get a maximum remainder of the
* highest bus width - 1
*/
width = max ( mbus - > buswidth , sbus - > buswidth ) ;
lli_len = ( lli_len / width ) * width ;
tsize = lli_len / bd . srcbus . buswidth ;
dev_vdbg ( & pl08x - > adev - > dev ,
" %s fill lli with single lli chunk of "
" size 0x%08zx (remainder 0x%08zx) \n " ,
__func__ , lli_len , bd . remainder ) ;
2017-05-21 00:42:53 +03:00
cctl = pl08x_lli_control_bits ( pl08x , cctl ,
bd . srcbus . buswidth , bd . dstbus . buswidth ,
tsize ) ;
2013-08-11 21:59:15 +04:00
pl08x_fill_lli_for_desc ( pl08x , & bd , num_llis + + ,
2013-08-11 21:59:17 +04:00
lli_len , cctl , tsize ) ;
2011-08-05 14:02:43 +04:00
total_bytes + = lli_len ;
}
2010-09-28 17:57:37 +04:00
2011-08-05 14:02:43 +04:00
/*
* Send any odd bytes
*/
if ( bd . remainder ) {
dev_vdbg ( & pl08x - > adev - > dev ,
" %s align with boundary, send odd bytes (remain %zu) \n " ,
__func__ , bd . remainder ) ;
2013-08-11 21:59:15 +04:00
prep_byte_width_lli ( pl08x , & bd , & cctl ,
bd . remainder , num_llis + + , & total_bytes ) ;
2011-08-05 14:02:43 +04:00
}
2010-09-28 17:57:37 +04:00
}
2011-08-05 14:02:37 +04:00
2011-08-05 14:02:43 +04:00
if ( total_bytes ! = dsg - > len ) {
dev_err ( & pl08x - > adev - > dev ,
" %s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx \n " ,
__func__ , total_bytes , dsg - > len ) ;
return 0 ;
}
2010-09-28 17:57:37 +04:00
2011-08-05 14:02:43 +04:00
if ( num_llis > = MAX_NUM_TSFR_LLIS ) {
dev_err ( & pl08x - > adev - > dev ,
" %s need to increase MAX_NUM_TSFR_LLIS from 0x%08x \n " ,
2013-08-11 21:59:15 +04:00
__func__ , MAX_NUM_TSFR_LLIS ) ;
2011-08-05 14:02:43 +04:00
return 0 ;
}
2010-09-28 17:57:37 +04:00
}
2011-01-04 01:34:48 +03:00
llis_va = txd - > llis_va ;
2013-08-11 21:59:15 +04:00
last_lli = llis_va + ( num_llis - 1 ) * pl08x - > lli_words ;
2010-09-28 17:57:37 +04:00
2013-08-11 21:59:20 +04:00
if ( txd - > cyclic ) {
/* Link back to the first LLI. */
last_lli [ PL080_LLI_LLI ] = txd - > llis_bus | bd . lli_bus ;
} else {
/* The final LLI terminates the LLI. */
last_lli [ PL080_LLI_LLI ] = 0 ;
/* The final LLI element shall also fire an interrupt. */
2017-05-21 00:42:53 +03:00
if ( pl08x - > vd - > ftdmac020 )
last_lli [ PL080_LLI_CCTL ] & = ~ FTDMAC020_LLI_TC_MSK ;
else
last_lli [ PL080_LLI_CCTL ] | = PL080_CONTROL_TC_IRQ_EN ;
2010-09-28 17:57:37 +04:00
}
2013-08-11 21:59:16 +04:00
pl08x_dump_lli ( pl08x , llis_va , num_llis ) ;
2010-09-28 17:57:37 +04:00
return num_llis ;
}
static void pl08x_free_txd ( struct pl08x_driver_data * pl08x ,
struct pl08x_txd * txd )
{
2011-08-05 14:02:43 +04:00
struct pl08x_sg * dsg , * _dsg ;
2011-08-05 14:02:44 +04:00
if ( txd - > llis_va )
dma_pool_free ( pl08x - > pool , txd - > llis_va , txd - > llis_bus ) ;
2010-09-28 17:57:37 +04:00
2011-08-05 14:02:43 +04:00
list_for_each_entry_safe ( dsg , _dsg , & txd - > dsg_list , node ) {
list_del ( & dsg - > node ) ;
kfree ( dsg ) ;
}
2010-09-28 17:57:37 +04:00
kfree ( txd ) ;
}
2012-05-26 17:42:23 +04:00
static void pl08x_desc_free ( struct virt_dma_desc * vd )
{
struct pl08x_txd * txd = to_pl08x_txd ( & vd - > tx ) ;
struct pl08x_dma_chan * plchan = to_pl08x_chan ( vd - > tx . chan ) ;
2013-11-27 04:03:03 +04:00
dma_descriptor_unmap ( & vd - > tx ) ;
2012-05-26 17:42:23 +04:00
if ( ! txd - > done )
pl08x_release_mux ( plchan ) ;
pl08x_free_txd ( plchan - > host , txd ) ;
}
2010-09-28 17:57:37 +04:00
static void pl08x_free_txd_list ( struct pl08x_driver_data * pl08x ,
struct pl08x_dma_chan * plchan )
{
2012-05-25 16:10:36 +04:00
LIST_HEAD ( head ) ;
2010-09-28 17:57:37 +04:00
2012-05-26 17:27:40 +04:00
vchan_get_all_descriptors ( & plchan - > vc , & head ) ;
2012-10-27 19:49:31 +04:00
vchan_dma_desc_free_list ( & plchan - > vc , & head ) ;
2010-09-28 17:57:37 +04:00
}
/*
* The DMA ENGINE API
*/
static void pl08x_free_chan_resources ( struct dma_chan * chan )
{
2012-05-26 20:00:49 +04:00
/* Ensure all queued descriptors are freed */
vchan_free_chan_resources ( to_virt_chan ( chan ) ) ;
2010-09-28 17:57:37 +04:00
}
static struct dma_async_tx_descriptor * pl08x_prep_dma_interrupt (
struct dma_chan * chan , unsigned long flags )
{
struct dma_async_tx_descriptor * retval = NULL ;
return retval ;
}
/*
2011-01-16 23:18:05 +03:00
* Code accessing dma_async_is_complete ( ) in a tight loop may give problems .
* If slaves are relying on interrupts to signal completion this function
* must not be called with interrupts disabled .
2010-09-28 17:57:37 +04:00
*/
2011-08-05 14:02:27 +04:00
static enum dma_status pl08x_dma_tx_status ( struct dma_chan * chan ,
dma_cookie_t cookie , struct dma_tx_state * txstate )
2010-09-28 17:57:37 +04:00
{
struct pl08x_dma_chan * plchan = to_pl08x_chan ( chan ) ;
2012-05-26 18:05:52 +04:00
struct virt_dma_desc * vd ;
unsigned long flags ;
2010-09-28 17:57:37 +04:00
enum dma_status ret ;
2012-05-26 18:05:52 +04:00
size_t bytes = 0 ;
2010-09-28 17:57:37 +04:00
2012-03-07 02:35:27 +04:00
ret = dma_cookie_status ( chan , cookie , txstate ) ;
2013-10-16 12:03:02 +04:00
if ( ret = = DMA_COMPLETE )
2010-09-28 17:57:37 +04:00
return ret ;
2012-05-26 18:05:52 +04:00
/*
* There ' s no point calculating the residue if there ' s
* no txstate to store the value .
*/
if ( ! txstate ) {
if ( plchan - > state = = PL08X_CHAN_PAUSED )
ret = DMA_PAUSED ;
return ret ;
}
spin_lock_irqsave ( & plchan - > vc . lock , flags ) ;
ret = dma_cookie_status ( chan , cookie , txstate ) ;
2013-10-16 12:03:02 +04:00
if ( ret ! = DMA_COMPLETE ) {
2012-05-26 18:05:52 +04:00
vd = vchan_find_desc ( & plchan - > vc , cookie ) ;
if ( vd ) {
/* On the issued list, so hasn't been processed yet */
struct pl08x_txd * txd = to_pl08x_txd ( & vd - > tx ) ;
struct pl08x_sg * dsg ;
list_for_each_entry ( dsg , & txd - > dsg_list , node )
bytes + = dsg - > len ;
} else {
bytes = pl08x_getbytes_chan ( plchan ) ;
}
}
spin_unlock_irqrestore ( & plchan - > vc . lock , flags ) ;
2010-09-28 17:57:37 +04:00
/*
* This cookie not complete yet
2012-03-07 02:35:27 +04:00
* Get number of bytes left in the active transactions and queue
2010-09-28 17:57:37 +04:00
*/
2012-05-26 18:05:52 +04:00
dma_set_residue ( txstate , bytes ) ;
2010-09-28 17:57:37 +04:00
2012-05-26 18:05:52 +04:00
if ( plchan - > state = = PL08X_CHAN_PAUSED & & ret = = DMA_IN_PROGRESS )
ret = DMA_PAUSED ;
2010-09-28 17:57:37 +04:00
/* Whether waiting or running, we're in progress */
2012-05-26 18:05:52 +04:00
return ret ;
2010-09-28 17:57:37 +04:00
}
/* PrimeCell DMA extension */
struct burst_table {
2011-07-21 20:14:08 +04:00
u32 burstwords ;
2010-09-28 17:57:37 +04:00
u32 reg ;
} ;
static const struct burst_table burst_sizes [ ] = {
{
. burstwords = 256 ,
2011-07-21 20:14:08 +04:00
. reg = PL080_BSIZE_256 ,
2010-09-28 17:57:37 +04:00
} ,
{
. burstwords = 128 ,
2011-07-21 20:14:08 +04:00
. reg = PL080_BSIZE_128 ,
2010-09-28 17:57:37 +04:00
} ,
{
. burstwords = 64 ,
2011-07-21 20:14:08 +04:00
. reg = PL080_BSIZE_64 ,
2010-09-28 17:57:37 +04:00
} ,
{
. burstwords = 32 ,
2011-07-21 20:14:08 +04:00
. reg = PL080_BSIZE_32 ,
2010-09-28 17:57:37 +04:00
} ,
{
. burstwords = 16 ,
2011-07-21 20:14:08 +04:00
. reg = PL080_BSIZE_16 ,
2010-09-28 17:57:37 +04:00
} ,
{
. burstwords = 8 ,
2011-07-21 20:14:08 +04:00
. reg = PL080_BSIZE_8 ,
2010-09-28 17:57:37 +04:00
} ,
{
. burstwords = 4 ,
2011-07-21 20:14:08 +04:00
. reg = PL080_BSIZE_4 ,
2010-09-28 17:57:37 +04:00
} ,
{
2011-07-21 20:14:08 +04:00
. burstwords = 0 ,
. reg = PL080_BSIZE_1 ,
2010-09-28 17:57:37 +04:00
} ,
} ;
2011-07-21 20:13:48 +04:00
/*
* Given the source and destination available bus masks , select which
* will be routed to each port . We try to have source and destination
* on separate ports , but always respect the allowable settings .
*/
2017-05-21 00:42:53 +03:00
static u32 pl08x_select_bus ( bool ftdmac020 , u8 src , u8 dst )
2011-07-21 20:13:48 +04:00
{
u32 cctl = 0 ;
2017-05-21 00:42:53 +03:00
u32 dst_ahb2 ;
u32 src_ahb2 ;
/* The FTDMAC020 use different bits to indicate src/dst bus */
if ( ftdmac020 ) {
dst_ahb2 = FTDMAC020_LLI_DST_SEL ;
src_ahb2 = FTDMAC020_LLI_SRC_SEL ;
} else {
dst_ahb2 = PL080_CONTROL_DST_AHB2 ;
src_ahb2 = PL080_CONTROL_SRC_AHB2 ;
}
2011-07-21 20:13:48 +04:00
if ( ! ( dst & PL08X_AHB1 ) | | ( ( dst & PL08X_AHB2 ) & & ( src & PL08X_AHB1 ) ) )
2017-05-21 00:42:53 +03:00
cctl | = dst_ahb2 ;
2011-07-21 20:13:48 +04:00
if ( ! ( src & PL08X_AHB1 ) | | ( ( src & PL08X_AHB2 ) & & ! ( dst & PL08X_AHB2 ) ) )
2017-05-21 00:42:53 +03:00
cctl | = src_ahb2 ;
2011-07-21 20:13:48 +04:00
return cctl ;
}
2011-07-21 20:12:47 +04:00
static u32 pl08x_cctl ( u32 cctl )
{
cctl & = ~ ( PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
PL080_CONTROL_PROT_MASK ) ;
/* Access the cell in privileged mode, non-bufferable, non-cacheable */
return cctl | PL080_CONTROL_PROT_SYS ;
}
2011-07-21 20:13:28 +04:00
static u32 pl08x_width ( enum dma_slave_buswidth width )
{
switch ( width ) {
case DMA_SLAVE_BUSWIDTH_1_BYTE :
return PL080_WIDTH_8BIT ;
case DMA_SLAVE_BUSWIDTH_2_BYTES :
return PL080_WIDTH_16BIT ;
case DMA_SLAVE_BUSWIDTH_4_BYTES :
return PL080_WIDTH_32BIT ;
2011-07-25 17:52:01 +04:00
default :
return ~ 0 ;
2011-07-21 20:13:28 +04:00
}
}
2011-07-21 20:14:08 +04:00
static u32 pl08x_burst ( u32 maxburst )
{
int i ;
for ( i = 0 ; i < ARRAY_SIZE ( burst_sizes ) ; i + + )
if ( burst_sizes [ i ] . burstwords < = maxburst )
break ;
return burst_sizes [ i ] . reg ;
}
2012-05-16 14:16:03 +04:00
static u32 pl08x_get_cctl ( struct pl08x_dma_chan * plchan ,
enum dma_slave_buswidth addr_width , u32 maxburst )
{
u32 width , burst , cctl = 0 ;
width = pl08x_width ( addr_width ) ;
if ( width = = ~ 0 )
return ~ 0 ;
cctl | = width < < PL080_CONTROL_SWIDTH_SHIFT ;
cctl | = width < < PL080_CONTROL_DWIDTH_SHIFT ;
/*
* If this channel will only request single transfers , set this
* down to ONE element . Also select one element if no maxburst
* is specified .
*/
if ( plchan - > cd - > single )
maxburst = 1 ;
burst = pl08x_burst ( maxburst ) ;
cctl | = burst < < PL080_CONTROL_SB_SIZE_SHIFT ;
cctl | = burst < < PL080_CONTROL_DB_SIZE_SHIFT ;
return pl08x_cctl ( cctl ) ;
}
2010-09-28 17:57:37 +04:00
/*
* Slave transactions callback to the slave device to allow
* synchronization of slave DMA signals with the DMAC enable
*/
static void pl08x_issue_pending ( struct dma_chan * chan )
{
struct pl08x_dma_chan * plchan = to_pl08x_chan ( chan ) ;
unsigned long flags ;
2012-05-26 17:09:53 +04:00
spin_lock_irqsave ( & plchan - > vc . lock , flags ) ;
2012-05-26 17:27:40 +04:00
if ( vchan_issue_pending ( & plchan - > vc ) ) {
2012-05-26 16:54:15 +04:00
if ( ! plchan - > phychan & & plchan - > state ! = PL08X_CHAN_WAITING )
pl08x_phy_alloc_and_start ( plchan ) ;
2010-09-28 17:57:37 +04:00
}
2012-05-26 17:09:53 +04:00
spin_unlock_irqrestore ( & plchan - > vc . lock , flags ) ;
2010-09-28 17:57:37 +04:00
}
2012-05-26 17:27:40 +04:00
static struct pl08x_txd * pl08x_get_txd ( struct pl08x_dma_chan * plchan )
2011-01-04 01:35:49 +03:00
{
2011-08-05 14:02:29 +04:00
struct pl08x_txd * txd = kzalloc ( sizeof ( * txd ) , GFP_NOWAIT ) ;
2011-01-04 01:35:49 +03:00
2017-05-21 00:42:53 +03:00
if ( txd )
2011-08-05 14:02:43 +04:00
INIT_LIST_HEAD ( & txd - > dsg_list ) ;
2011-01-04 01:35:49 +03:00
return txd ;
}
2017-05-21 00:42:53 +03:00
static u32 pl08x_memcpy_cctl ( struct pl08x_driver_data * pl08x )
2010-09-28 17:57:37 +04:00
{
2017-05-21 00:42:50 +03:00
u32 cctl = 0 ;
/* Conjure cctl */
switch ( pl08x - > pd - > memcpy_burst_size ) {
default :
dev_err ( & pl08x - > adev - > dev ,
" illegal burst size for memcpy, set to 1 \n " ) ;
2020-08-24 01:36:59 +03:00
fallthrough ;
2017-05-21 00:42:50 +03:00
case PL08X_BURST_SZ_1 :
cctl | = PL080_BSIZE_1 < < PL080_CONTROL_SB_SIZE_SHIFT |
PL080_BSIZE_1 < < PL080_CONTROL_DB_SIZE_SHIFT ;
break ;
case PL08X_BURST_SZ_4 :
cctl | = PL080_BSIZE_4 < < PL080_CONTROL_SB_SIZE_SHIFT |
PL080_BSIZE_4 < < PL080_CONTROL_DB_SIZE_SHIFT ;
break ;
case PL08X_BURST_SZ_8 :
cctl | = PL080_BSIZE_8 < < PL080_CONTROL_SB_SIZE_SHIFT |
PL080_BSIZE_8 < < PL080_CONTROL_DB_SIZE_SHIFT ;
break ;
case PL08X_BURST_SZ_16 :
cctl | = PL080_BSIZE_16 < < PL080_CONTROL_SB_SIZE_SHIFT |
PL080_BSIZE_16 < < PL080_CONTROL_DB_SIZE_SHIFT ;
break ;
case PL08X_BURST_SZ_32 :
cctl | = PL080_BSIZE_32 < < PL080_CONTROL_SB_SIZE_SHIFT |
PL080_BSIZE_32 < < PL080_CONTROL_DB_SIZE_SHIFT ;
break ;
case PL08X_BURST_SZ_64 :
cctl | = PL080_BSIZE_64 < < PL080_CONTROL_SB_SIZE_SHIFT |
PL080_BSIZE_64 < < PL080_CONTROL_DB_SIZE_SHIFT ;
break ;
case PL08X_BURST_SZ_128 :
cctl | = PL080_BSIZE_128 < < PL080_CONTROL_SB_SIZE_SHIFT |
PL080_BSIZE_128 < < PL080_CONTROL_DB_SIZE_SHIFT ;
break ;
case PL08X_BURST_SZ_256 :
cctl | = PL080_BSIZE_256 < < PL080_CONTROL_SB_SIZE_SHIFT |
PL080_BSIZE_256 < < PL080_CONTROL_DB_SIZE_SHIFT ;
break ;
}
switch ( pl08x - > pd - > memcpy_bus_width ) {
default :
dev_err ( & pl08x - > adev - > dev ,
" illegal bus width for memcpy, set to 8 bits \n " ) ;
2020-08-24 01:36:59 +03:00
fallthrough ;
2017-05-21 00:42:50 +03:00
case PL08X_BUS_WIDTH_8_BITS :
cctl | = PL080_WIDTH_8BIT < < PL080_CONTROL_SWIDTH_SHIFT |
PL080_WIDTH_8BIT < < PL080_CONTROL_DWIDTH_SHIFT ;
break ;
case PL08X_BUS_WIDTH_16_BITS :
cctl | = PL080_WIDTH_16BIT < < PL080_CONTROL_SWIDTH_SHIFT |
PL080_WIDTH_16BIT < < PL080_CONTROL_DWIDTH_SHIFT ;
break ;
case PL08X_BUS_WIDTH_32_BITS :
cctl | = PL080_WIDTH_32BIT < < PL080_CONTROL_SWIDTH_SHIFT |
PL080_WIDTH_32BIT < < PL080_CONTROL_DWIDTH_SHIFT ;
break ;
}
/* Protection flags */
if ( pl08x - > pd - > memcpy_prot_buff )
cctl | = PL080_CONTROL_PROT_BUFF ;
if ( pl08x - > pd - > memcpy_prot_cache )
cctl | = PL080_CONTROL_PROT_CACHE ;
/* We are the kernel, so we are in privileged mode */
cctl | = PL080_CONTROL_PROT_SYS ;
2011-01-04 01:39:33 +03:00
2010-09-28 17:57:37 +04:00
/* Both to be incremented or the code will break */
2017-05-21 00:42:50 +03:00
cctl | = PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR ;
2011-01-04 01:40:53 +03:00
if ( pl08x - > vd - > dualmaster )
2017-05-21 00:42:53 +03:00
cctl | = pl08x_select_bus ( false ,
pl08x - > mem_buses ,
pl08x - > mem_buses ) ;
return cctl ;
}
static u32 pl08x_ftdmac020_memcpy_cctl ( struct pl08x_driver_data * pl08x )
{
u32 cctl = 0 ;
/* Conjure cctl */
switch ( pl08x - > pd - > memcpy_bus_width ) {
default :
dev_err ( & pl08x - > adev - > dev ,
" illegal bus width for memcpy, set to 8 bits \n " ) ;
2020-08-24 01:36:59 +03:00
fallthrough ;
2017-05-21 00:42:53 +03:00
case PL08X_BUS_WIDTH_8_BITS :
cctl | = PL080_WIDTH_8BIT < < FTDMAC020_LLI_SRC_WIDTH_SHIFT |
PL080_WIDTH_8BIT < < FTDMAC020_LLI_DST_WIDTH_SHIFT ;
break ;
case PL08X_BUS_WIDTH_16_BITS :
cctl | = PL080_WIDTH_16BIT < < FTDMAC020_LLI_SRC_WIDTH_SHIFT |
PL080_WIDTH_16BIT < < FTDMAC020_LLI_DST_WIDTH_SHIFT ;
break ;
case PL08X_BUS_WIDTH_32_BITS :
cctl | = PL080_WIDTH_32BIT < < FTDMAC020_LLI_SRC_WIDTH_SHIFT |
PL080_WIDTH_32BIT < < FTDMAC020_LLI_DST_WIDTH_SHIFT ;
break ;
}
/*
* By default mask the TC IRQ on all LLIs , it will be unmasked on
* the last LLI item by other code .
*/
cctl | = FTDMAC020_LLI_TC_MSK ;
/*
* Both to be incremented so leave bits FTDMAC020_LLI_SRCAD_CTL
* and FTDMAC020_LLI_DSTAD_CTL as zero
*/
if ( pl08x - > vd - > dualmaster )
cctl | = pl08x_select_bus ( true ,
pl08x - > mem_buses ,
2017-05-21 00:42:50 +03:00
pl08x - > mem_buses ) ;
2017-05-21 00:42:53 +03:00
return cctl ;
}
/*
* Initialize a descriptor to be used by memcpy submit
*/
static struct dma_async_tx_descriptor * pl08x_prep_dma_memcpy (
struct dma_chan * chan , dma_addr_t dest , dma_addr_t src ,
size_t len , unsigned long flags )
{
struct pl08x_dma_chan * plchan = to_pl08x_chan ( chan ) ;
struct pl08x_driver_data * pl08x = plchan - > host ;
struct pl08x_txd * txd ;
struct pl08x_sg * dsg ;
int ret ;
txd = pl08x_get_txd ( plchan ) ;
if ( ! txd ) {
dev_err ( & pl08x - > adev - > dev ,
" %s no memory for descriptor \n " , __func__ ) ;
return NULL ;
}
dsg = kzalloc ( sizeof ( struct pl08x_sg ) , GFP_NOWAIT ) ;
if ( ! dsg ) {
pl08x_free_txd ( pl08x , txd ) ;
return NULL ;
}
list_add_tail ( & dsg - > node , & txd - > dsg_list ) ;
dsg - > src_addr = src ;
dsg - > dst_addr = dest ;
dsg - > len = len ;
if ( pl08x - > vd - > ftdmac020 ) {
/* Writing CCFG zero ENABLES all interrupts */
txd - > ccfg = 0 ;
txd - > cctl = pl08x_ftdmac020_memcpy_cctl ( pl08x ) ;
} else {
txd - > ccfg = PL080_CONFIG_ERR_IRQ_MASK |
PL080_CONFIG_TC_IRQ_MASK |
PL080_FLOW_MEM2MEM < < PL080_CONFIG_FLOW_CONTROL_SHIFT ;
txd - > cctl = pl08x_memcpy_cctl ( pl08x ) ;
}
2010-09-28 17:57:37 +04:00
2012-05-26 18:43:00 +04:00
ret = pl08x_fill_llis_for_desc ( plchan - > host , txd ) ;
if ( ! ret ) {
pl08x_free_txd ( pl08x , txd ) ;
2010-09-28 17:57:37 +04:00
return NULL ;
2012-05-26 18:43:00 +04:00
}
2010-09-28 17:57:37 +04:00
2012-05-26 17:27:40 +04:00
return vchan_tx_prep ( & plchan - > vc , & txd - > vd , flags ) ;
2010-09-28 17:57:37 +04:00
}
2013-08-11 21:59:20 +04:00
static struct pl08x_txd * pl08x_init_txd (
struct dma_chan * chan ,
enum dma_transfer_direction direction ,
dma_addr_t * slave_addr )
2010-09-28 17:57:37 +04:00
{
struct pl08x_dma_chan * plchan = to_pl08x_chan ( chan ) ;
struct pl08x_driver_data * pl08x = plchan - > host ;
struct pl08x_txd * txd ;
2012-05-16 15:20:55 +04:00
enum dma_slave_buswidth addr_width ;
2011-08-05 14:02:42 +04:00
int ret , tmp ;
2012-05-16 14:08:43 +04:00
u8 src_buses , dst_buses ;
2012-05-16 15:20:55 +04:00
u32 maxburst , cctl ;
2010-09-28 17:57:37 +04:00
2012-05-26 17:27:40 +04:00
txd = pl08x_get_txd ( plchan ) ;
2010-09-28 17:57:37 +04:00
if ( ! txd ) {
dev_err ( & pl08x - > adev - > dev , " %s no txd \n " , __func__ ) ;
return NULL ;
}
/*
* Set up addresses , the PrimeCell configured address
* will take precedence since this may configure the
* channel target address dynamically at runtime .
*/
2011-10-13 21:04:23 +04:00
if ( direction = = DMA_MEM_TO_DEV ) {
2012-05-16 15:20:55 +04:00
cctl = PL080_CONTROL_SRC_INCR ;
2013-08-11 21:59:20 +04:00
* slave_addr = plchan - > cfg . dst_addr ;
2012-05-16 15:20:55 +04:00
addr_width = plchan - > cfg . dst_addr_width ;
maxburst = plchan - > cfg . dst_maxburst ;
2012-05-16 14:08:43 +04:00
src_buses = pl08x - > mem_buses ;
dst_buses = plchan - > cd - > periph_buses ;
2011-10-13 21:04:23 +04:00
} else if ( direction = = DMA_DEV_TO_MEM ) {
2012-05-16 15:20:55 +04:00
cctl = PL080_CONTROL_DST_INCR ;
2013-08-11 21:59:20 +04:00
* slave_addr = plchan - > cfg . src_addr ;
2012-05-16 15:20:55 +04:00
addr_width = plchan - > cfg . src_addr_width ;
maxburst = plchan - > cfg . src_maxburst ;
2012-05-16 14:08:43 +04:00
src_buses = plchan - > cd - > periph_buses ;
dst_buses = pl08x - > mem_buses ;
2010-09-28 17:57:37 +04:00
} else {
2011-08-05 14:02:43 +04:00
pl08x_free_txd ( pl08x , txd ) ;
2010-09-28 17:57:37 +04:00
dev_err ( & pl08x - > adev - > dev ,
" %s direction unsupported \n " , __func__ ) ;
return NULL ;
}
2012-05-16 15:20:55 +04:00
cctl | = pl08x_get_cctl ( plchan , addr_width , maxburst ) ;
2012-05-16 14:33:31 +04:00
if ( cctl = = ~ 0 ) {
pl08x_free_txd ( pl08x , txd ) ;
dev_err ( & pl08x - > adev - > dev ,
" DMA slave configuration botched? \n " ) ;
return NULL ;
}
2017-05-21 00:42:53 +03:00
txd - > cctl = cctl | pl08x_select_bus ( false , src_buses , dst_buses ) ;
2012-05-16 14:08:43 +04:00
2012-05-16 14:05:09 +04:00
if ( plchan - > cfg . device_fc )
2011-10-13 21:04:23 +04:00
tmp = ( direction = = DMA_MEM_TO_DEV ) ? PL080_FLOW_MEM2PER_PER :
2011-08-05 14:02:42 +04:00
PL080_FLOW_PER2MEM_PER ;
else
2011-10-13 21:04:23 +04:00
tmp = ( direction = = DMA_MEM_TO_DEV ) ? PL080_FLOW_MEM2PER :
2011-08-05 14:02:42 +04:00
PL080_FLOW_PER2MEM ;
2017-05-21 00:42:53 +03:00
txd - > ccfg = PL080_CONFIG_ERR_IRQ_MASK |
PL080_CONFIG_TC_IRQ_MASK |
tmp < < PL080_CONFIG_FLOW_CONTROL_SHIFT ;
2011-08-05 14:02:42 +04:00
2012-05-25 14:48:51 +04:00
ret = pl08x_request_mux ( plchan ) ;
if ( ret < 0 ) {
pl08x_free_txd ( pl08x , txd ) ;
dev_dbg ( & pl08x - > adev - > dev ,
" unable to mux for transfer on %s due to platform restrictions \n " ,
plchan - > name ) ;
return NULL ;
}
dev_dbg ( & pl08x - > adev - > dev , " allocated DMA request signal %d for xfer on %s \n " ,
plchan - > signal , plchan - > name ) ;
/* Assign the flow control signal to this channel */
if ( direction = = DMA_MEM_TO_DEV )
txd - > ccfg | = plchan - > signal < < PL080_CONFIG_DST_SEL_SHIFT ;
else
txd - > ccfg | = plchan - > signal < < PL080_CONFIG_SRC_SEL_SHIFT ;
2013-08-11 21:59:20 +04:00
return txd ;
}
static int pl08x_tx_add_sg ( struct pl08x_txd * txd ,
enum dma_transfer_direction direction ,
dma_addr_t slave_addr ,
dma_addr_t buf_addr ,
unsigned int len )
{
struct pl08x_sg * dsg ;
dsg = kzalloc ( sizeof ( struct pl08x_sg ) , GFP_NOWAIT ) ;
if ( ! dsg )
return - ENOMEM ;
list_add_tail ( & dsg - > node , & txd - > dsg_list ) ;
dsg - > len = len ;
if ( direction = = DMA_MEM_TO_DEV ) {
dsg - > src_addr = buf_addr ;
dsg - > dst_addr = slave_addr ;
} else {
dsg - > src_addr = slave_addr ;
dsg - > dst_addr = buf_addr ;
}
return 0 ;
}
static struct dma_async_tx_descriptor * pl08x_prep_slave_sg (
struct dma_chan * chan , struct scatterlist * sgl ,
unsigned int sg_len , enum dma_transfer_direction direction ,
unsigned long flags , void * context )
{
struct pl08x_dma_chan * plchan = to_pl08x_chan ( chan ) ;
struct pl08x_driver_data * pl08x = plchan - > host ;
struct pl08x_txd * txd ;
struct scatterlist * sg ;
int ret , tmp ;
dma_addr_t slave_addr ;
dev_dbg ( & pl08x - > adev - > dev , " %s prepare transaction of %d bytes from %s \n " ,
__func__ , sg_dma_len ( sgl ) , plchan - > name ) ;
txd = pl08x_init_txd ( chan , direction , & slave_addr ) ;
if ( ! txd )
return NULL ;
2011-08-05 14:02:43 +04:00
for_each_sg ( sgl , sg , sg_len , tmp ) {
2013-08-11 21:59:20 +04:00
ret = pl08x_tx_add_sg ( txd , direction , slave_addr ,
sg_dma_address ( sg ) ,
sg_dma_len ( sg ) ) ;
if ( ret ) {
2012-05-25 14:48:51 +04:00
pl08x_release_mux ( plchan ) ;
2011-08-05 14:02:43 +04:00
pl08x_free_txd ( pl08x , txd ) ;
dev_err ( & pl08x - > adev - > dev , " %s no mem for pl080 sg \n " ,
__func__ ) ;
return NULL ;
}
2013-08-11 21:59:20 +04:00
}
2011-08-05 14:02:43 +04:00
2013-08-11 21:59:20 +04:00
ret = pl08x_fill_llis_for_desc ( plchan - > host , txd ) ;
if ( ! ret ) {
pl08x_release_mux ( plchan ) ;
pl08x_free_txd ( pl08x , txd ) ;
return NULL ;
}
return vchan_tx_prep ( & plchan - > vc , & txd - > vd , flags ) ;
}
static struct dma_async_tx_descriptor * pl08x_prep_dma_cyclic (
struct dma_chan * chan , dma_addr_t buf_addr , size_t buf_len ,
size_t period_len , enum dma_transfer_direction direction ,
2014-08-01 14:20:10 +04:00
unsigned long flags )
2013-08-11 21:59:20 +04:00
{
struct pl08x_dma_chan * plchan = to_pl08x_chan ( chan ) ;
struct pl08x_driver_data * pl08x = plchan - > host ;
struct pl08x_txd * txd ;
int ret , tmp ;
dma_addr_t slave_addr ;
dev_dbg ( & pl08x - > adev - > dev ,
2014-08-01 21:09:48 +04:00
" %s prepare cyclic transaction of %zd/%zd bytes %s %s \n " ,
2013-08-11 21:59:20 +04:00
__func__ , period_len , buf_len ,
direction = = DMA_MEM_TO_DEV ? " to " : " from " ,
plchan - > name ) ;
txd = pl08x_init_txd ( chan , direction , & slave_addr ) ;
if ( ! txd )
return NULL ;
txd - > cyclic = true ;
txd - > cctl | = PL080_CONTROL_TC_IRQ_EN ;
for ( tmp = 0 ; tmp < buf_len ; tmp + = period_len ) {
ret = pl08x_tx_add_sg ( txd , direction , slave_addr ,
buf_addr + tmp , period_len ) ;
if ( ret ) {
pl08x_release_mux ( plchan ) ;
pl08x_free_txd ( pl08x , txd ) ;
return NULL ;
2011-08-05 14:02:43 +04:00
}
}
2012-05-26 18:43:00 +04:00
ret = pl08x_fill_llis_for_desc ( plchan - > host , txd ) ;
if ( ! ret ) {
pl08x_release_mux ( plchan ) ;
pl08x_free_txd ( pl08x , txd ) ;
2010-09-28 17:57:37 +04:00
return NULL ;
2012-05-26 18:43:00 +04:00
}
2010-09-28 17:57:37 +04:00
2012-05-26 17:27:40 +04:00
return vchan_tx_prep ( & plchan - > vc , & txd - > vd , flags ) ;
2010-09-28 17:57:37 +04:00
}
2014-11-17 16:42:05 +03:00
static int pl08x_config ( struct dma_chan * chan ,
struct dma_slave_config * config )
{
struct pl08x_dma_chan * plchan = to_pl08x_chan ( chan ) ;
struct pl08x_driver_data * pl08x = plchan - > host ;
if ( ! plchan - > slave )
return - EINVAL ;
/* Reject definitely invalid configurations */
if ( config - > src_addr_width = = DMA_SLAVE_BUSWIDTH_8_BYTES | |
config - > dst_addr_width = = DMA_SLAVE_BUSWIDTH_8_BYTES )
return - EINVAL ;
if ( config - > device_fc & & pl08x - > vd - > pl080s ) {
dev_err ( & pl08x - > adev - > dev ,
" %s: PL080S does not support peripheral flow control \n " ,
__func__ ) ;
return - EINVAL ;
}
plchan - > cfg = * config ;
return 0 ;
}
static int pl08x_terminate_all ( struct dma_chan * chan )
2010-09-28 17:57:37 +04:00
{
struct pl08x_dma_chan * plchan = to_pl08x_chan ( chan ) ;
struct pl08x_driver_data * pl08x = plchan - > host ;
unsigned long flags ;
2014-11-17 16:42:05 +03:00
spin_lock_irqsave ( & plchan - > vc . lock , flags ) ;
if ( ! plchan - > phychan & & ! plchan - > at ) {
spin_unlock_irqrestore ( & plchan - > vc . lock , flags ) ;
return 0 ;
2010-09-28 17:57:37 +04:00
}
2014-11-17 16:42:05 +03:00
plchan - > state = PL08X_CHAN_IDLE ;
if ( plchan - > phychan ) {
/*
* Mark physical channel as free and free any slave
* signal
*/
pl08x_phy_free ( plchan ) ;
}
/* Dequeue jobs and free LLIs */
if ( plchan - > at ) {
2017-11-14 17:32:09 +03:00
vchan_terminate_vdesc ( & plchan - > at - > vd ) ;
2014-11-17 16:42:05 +03:00
plchan - > at = NULL ;
}
/* Dequeue jobs not yet fired as well */
pl08x_free_txd_list ( pl08x , plchan ) ;
spin_unlock_irqrestore ( & plchan - > vc . lock , flags ) ;
return 0 ;
}
2017-11-14 17:32:09 +03:00
static void pl08x_synchronize ( struct dma_chan * chan )
{
struct pl08x_dma_chan * plchan = to_pl08x_chan ( chan ) ;
vchan_synchronize ( & plchan - > vc ) ;
}
2014-11-17 16:42:05 +03:00
static int pl08x_pause ( struct dma_chan * chan )
{
struct pl08x_dma_chan * plchan = to_pl08x_chan ( chan ) ;
unsigned long flags ;
2010-09-28 17:57:37 +04:00
/*
* Anything succeeds on channels with no physical allocation and
* no queued transfers .
*/
2012-05-26 17:09:53 +04:00
spin_lock_irqsave ( & plchan - > vc . lock , flags ) ;
2010-09-28 17:57:37 +04:00
if ( ! plchan - > phychan & & ! plchan - > at ) {
2012-05-26 17:09:53 +04:00
spin_unlock_irqrestore ( & plchan - > vc . lock , flags ) ;
2010-09-28 17:57:37 +04:00
return 0 ;
}
2014-11-17 16:42:05 +03:00
pl08x_pause_phy_chan ( plchan - > phychan ) ;
plchan - > state = PL08X_CHAN_PAUSED ;
2010-09-28 17:57:37 +04:00
2014-11-17 16:42:05 +03:00
spin_unlock_irqrestore ( & plchan - > vc . lock , flags ) ;
return 0 ;
}
static int pl08x_resume ( struct dma_chan * chan )
{
struct pl08x_dma_chan * plchan = to_pl08x_chan ( chan ) ;
unsigned long flags ;
/*
* Anything succeeds on channels with no physical allocation and
* no queued transfers .
*/
spin_lock_irqsave ( & plchan - > vc . lock , flags ) ;
if ( ! plchan - > phychan & & ! plchan - > at ) {
spin_unlock_irqrestore ( & plchan - > vc . lock , flags ) ;
return 0 ;
2010-09-28 17:57:37 +04:00
}
2014-11-17 16:42:05 +03:00
pl08x_resume_phy_chan ( plchan - > phychan ) ;
plchan - > state = PL08X_CHAN_RUNNING ;
2012-05-26 17:09:53 +04:00
spin_unlock_irqrestore ( & plchan - > vc . lock , flags ) ;
2010-09-28 17:57:37 +04:00
2014-11-17 16:42:05 +03:00
return 0 ;
2010-09-28 17:57:37 +04:00
}
bool pl08x_filter_id ( struct dma_chan * chan , void * chan_id )
{
2011-08-31 12:34:35 +04:00
struct pl08x_dma_chan * plchan ;
2010-09-28 17:57:37 +04:00
char * name = chan_id ;
2011-08-31 12:34:35 +04:00
/* Reject channels for devices not bound to this driver */
if ( chan - > device - > dev - > driver ! = & pl08x_amba_driver . drv )
return false ;
plchan = to_pl08x_chan ( chan ) ;
2010-09-28 17:57:37 +04:00
/* Check that the channel is not taken! */
if ( ! strcmp ( plchan - > name , name ) )
return true ;
return false ;
}
2014-01-23 14:40:07 +04:00
EXPORT_SYMBOL_GPL ( pl08x_filter_id ) ;
2010-09-28 17:57:37 +04:00
2016-11-10 18:17:49 +03:00
static bool pl08x_filter_fn ( struct dma_chan * chan , void * chan_id )
{
struct pl08x_dma_chan * plchan = to_pl08x_chan ( chan ) ;
return plchan - > cd = = chan_id ;
}
2010-09-28 17:57:37 +04:00
/*
* Just check that the device is there and active
2011-01-16 23:18:05 +03:00
* TODO : turn this bit on / off depending on the number of physical channels
* actually used , if it is zero . . . well shut it off . That will save some
* power . Cut the clock at the same time .
2010-09-28 17:57:37 +04:00
*/
static void pl08x_ensure_on ( struct pl08x_driver_data * pl08x )
{
2012-04-12 11:01:49 +04:00
/* The Nomadik variant does not have the config register */
if ( pl08x - > vd - > nomadik )
return ;
2017-05-21 00:42:53 +03:00
/* The FTDMAC020 variant does this in another register */
if ( pl08x - > vd - > ftdmac020 ) {
writel ( PL080_CONFIG_ENABLE , pl08x - > base + FTDMAC020_CSR ) ;
return ;
}
2011-08-05 14:02:34 +04:00
writel ( PL080_CONFIG_ENABLE , pl08x - > base + PL080_CONFIG ) ;
2010-09-28 17:57:37 +04:00
}
static irqreturn_t pl08x_irq ( int irq , void * dev )
{
struct pl08x_driver_data * pl08x = dev ;
2011-08-05 14:02:36 +04:00
u32 mask = 0 , err , tc , i ;
/* check & clear - ERR & TC interrupts */
err = readl ( pl08x - > base + PL080_ERR_STATUS ) ;
if ( err ) {
dev_err ( & pl08x - > adev - > dev , " %s error interrupt, register value 0x%08x \n " ,
__func__ , err ) ;
writel ( err , pl08x - > base + PL080_ERR_CLEAR ) ;
2010-09-28 17:57:37 +04:00
}
2012-04-10 00:53:21 +04:00
tc = readl ( pl08x - > base + PL080_TC_STATUS ) ;
2011-08-05 14:02:36 +04:00
if ( tc )
writel ( tc , pl08x - > base + PL080_TC_CLEAR ) ;
if ( ! err & & ! tc )
return IRQ_NONE ;
2010-09-28 17:57:37 +04:00
for ( i = 0 ; i < pl08x - > vd - > channels ; i + + ) {
2017-04-02 17:50:53 +03:00
if ( ( BIT ( i ) & err ) | | ( BIT ( i ) & tc ) ) {
2010-09-28 17:57:37 +04:00
/* Locate physical channel */
struct pl08x_phy_chan * phychan = & pl08x - > phy_chans [ i ] ;
struct pl08x_dma_chan * plchan = phychan - > serving ;
2012-05-25 13:51:19 +04:00
struct pl08x_txd * tx ;
2010-09-28 17:57:37 +04:00
2011-08-05 14:02:36 +04:00
if ( ! plchan ) {
dev_err ( & pl08x - > adev - > dev ,
" %s Error TC interrupt on unused channel: 0x%08x \n " ,
__func__ , i ) ;
continue ;
}
2012-05-26 17:09:53 +04:00
spin_lock ( & plchan - > vc . lock ) ;
2012-05-25 13:51:19 +04:00
tx = plchan - > at ;
2013-08-11 21:59:20 +04:00
if ( tx & & tx - > cyclic ) {
vchan_cyclic_callback ( & tx - > vd ) ;
} else if ( tx ) {
2012-05-25 13:51:19 +04:00
plchan - > at = NULL ;
2012-05-25 14:48:51 +04:00
/*
* This descriptor is done , release its mux
* reservation .
*/
pl08x_release_mux ( plchan ) ;
2012-05-26 17:42:23 +04:00
tx - > done = true ;
vchan_cookie_complete ( & tx - > vd ) ;
2012-05-25 18:41:13 +04:00
2012-05-26 16:54:15 +04:00
/*
* And start the next descriptor ( if any ) ,
* otherwise free this channel .
*/
2012-05-26 17:27:40 +04:00
if ( vchan_next_desc ( & plchan - > vc ) )
2012-05-25 18:41:13 +04:00
pl08x_start_next_txd ( plchan ) ;
2012-05-26 16:54:15 +04:00
else
pl08x_phy_free ( plchan ) ;
2012-05-25 13:51:19 +04:00
}
2012-05-26 17:09:53 +04:00
spin_unlock ( & plchan - > vc . lock ) ;
2012-05-25 13:51:19 +04:00
2017-04-02 17:50:53 +03:00
mask | = BIT ( i ) ;
2010-09-28 17:57:37 +04:00
}
}
return mask ? IRQ_HANDLED : IRQ_NONE ;
}
2011-07-21 20:13:48 +04:00
static void pl08x_dma_slave_init ( struct pl08x_dma_chan * chan )
{
chan - > slave = true ;
chan - > name = chan - > cd - > bus_id ;
2012-05-16 14:02:40 +04:00
chan - > cfg . src_addr = chan - > cd - > addr ;
chan - > cfg . dst_addr = chan - > cd - > addr ;
2011-07-21 20:13:48 +04:00
}
2010-09-28 17:57:37 +04:00
/*
* Initialise the DMAC memcpy / slave channels .
* Make a local wrapper to hold required data
*/
static int pl08x_dma_init_virtual_channels ( struct pl08x_driver_data * pl08x ,
2011-08-05 14:02:27 +04:00
struct dma_device * dmadev , unsigned int channels , bool slave )
2010-09-28 17:57:37 +04:00
{
struct pl08x_dma_chan * chan ;
int i ;
INIT_LIST_HEAD ( & dmadev - > channels ) ;
2011-01-16 23:18:05 +03:00
2010-09-28 17:57:37 +04:00
/*
* Register as many many memcpy as we have physical channels ,
* we won ' t always be able to use all but the code will have
* to cope with that situation .
*/
for ( i = 0 ; i < channels ; i + + ) {
2011-08-05 14:02:29 +04:00
chan = kzalloc ( sizeof ( * chan ) , GFP_KERNEL ) ;
2016-06-07 20:38:41 +03:00
if ( ! chan )
2010-09-28 17:57:37 +04:00
return - ENOMEM ;
chan - > host = pl08x ;
chan - > state = PL08X_CHAN_IDLE ;
2012-05-25 14:15:15 +04:00
chan - > signal = - 1 ;
2010-09-28 17:57:37 +04:00
if ( slave ) {
chan - > cd = & pl08x - > pd - > slave_channels [ i ] ;
2016-04-04 23:44:59 +03:00
/*
* Some implementations have muxed signals , whereas some
* use a mux in front of the signals and need dynamic
* assignment of signals .
*/
chan - > signal = i ;
2011-07-21 20:13:48 +04:00
pl08x_dma_slave_init ( chan ) ;
2010-09-28 17:57:37 +04:00
} else {
2017-05-21 00:42:50 +03:00
chan - > cd = kzalloc ( sizeof ( * chan - > cd ) , GFP_KERNEL ) ;
if ( ! chan - > cd ) {
kfree ( chan ) ;
return - ENOMEM ;
}
chan - > cd - > bus_id = " memcpy " ;
chan - > cd - > periph_buses = pl08x - > pd - > mem_buses ;
2010-09-28 17:57:37 +04:00
chan - > name = kasprintf ( GFP_KERNEL , " memcpy%d " , i ) ;
if ( ! chan - > name ) {
2017-05-21 00:42:50 +03:00
kfree ( chan - > cd ) ;
2010-09-28 17:57:37 +04:00
kfree ( chan ) ;
return - ENOMEM ;
}
}
2011-08-05 14:02:32 +04:00
dev_dbg ( & pl08x - > adev - > dev ,
2010-09-28 17:57:37 +04:00
" initialize virtual channel \" %s \" \n " ,
chan - > name ) ;
2012-05-26 17:42:23 +04:00
chan - > vc . desc_free = pl08x_desc_free ;
2012-05-26 17:09:53 +04:00
vchan_init ( & chan - > vc , dmadev ) ;
2010-09-28 17:57:37 +04:00
}
dev_info ( & pl08x - > adev - > dev , " initialized %d virtual %s channels \n " ,
i , slave ? " slave " : " memcpy " ) ;
return i ;
}
static void pl08x_free_virtual_channels ( struct dma_device * dmadev )
{
struct pl08x_dma_chan * chan = NULL ;
struct pl08x_dma_chan * next ;
list_for_each_entry_safe ( chan ,
2012-05-26 17:04:29 +04:00
next , & dmadev - > channels , vc . chan . device_node ) {
list_del ( & chan - > vc . chan . device_node ) ;
2010-09-28 17:57:37 +04:00
kfree ( chan ) ;
}
}
# ifdef CONFIG_DEBUG_FS
static const char * pl08x_state_str ( enum pl08x_dma_chan_state state )
{
switch ( state ) {
case PL08X_CHAN_IDLE :
return " idle " ;
case PL08X_CHAN_RUNNING :
return " running " ;
case PL08X_CHAN_PAUSED :
return " paused " ;
case PL08X_CHAN_WAITING :
return " waiting " ;
default :
break ;
}
return " UNKNOWN STATE " ;
}
static int pl08x_debugfs_show ( struct seq_file * s , void * data )
{
struct pl08x_driver_data * pl08x = s - > private ;
struct pl08x_dma_chan * chan ;
struct pl08x_phy_chan * ch ;
unsigned long flags ;
int i ;
seq_printf ( s , " PL08x physical channels: \n " ) ;
seq_printf ( s , " CHANNEL: \t USER: \n " ) ;
seq_printf ( s , " -------- \t ----- \n " ) ;
for ( i = 0 ; i < pl08x - > vd - > channels ; i + + ) {
struct pl08x_dma_chan * virt_chan ;
ch = & pl08x - > phy_chans [ i ] ;
spin_lock_irqsave ( & ch - > lock , flags ) ;
virt_chan = ch - > serving ;
2012-04-12 11:01:49 +04:00
seq_printf ( s , " %d \t \t %s%s \n " ,
ch - > id ,
virt_chan ? virt_chan - > name : " (none) " ,
ch - > locked ? " LOCKED " : " " ) ;
2010-09-28 17:57:37 +04:00
spin_unlock_irqrestore ( & ch - > lock , flags ) ;
}
seq_printf ( s , " \n PL08x virtual memcpy channels: \n " ) ;
seq_printf ( s , " CHANNEL: \t STATE: \n " ) ;
seq_printf ( s , " -------- \t ------ \n " ) ;
2012-05-26 17:04:29 +04:00
list_for_each_entry ( chan , & pl08x - > memcpy . channels , vc . chan . device_node ) {
2011-01-04 01:32:46 +03:00
seq_printf ( s , " %s \t \t %s \n " , chan - > name ,
2010-09-28 17:57:37 +04:00
pl08x_state_str ( chan - > state ) ) ;
}
2017-05-21 00:42:52 +03:00
if ( pl08x - > has_slave ) {
seq_printf ( s , " \n PL08x virtual slave channels: \n " ) ;
seq_printf ( s , " CHANNEL: \t STATE: \n " ) ;
seq_printf ( s , " -------- \t ------ \n " ) ;
list_for_each_entry ( chan , & pl08x - > slave . channels ,
vc . chan . device_node ) {
seq_printf ( s , " %s \t \t %s \n " , chan - > name ,
pl08x_state_str ( chan - > state ) ) ;
}
2010-09-28 17:57:37 +04:00
}
return 0 ;
}
2018-12-05 19:18:57 +03:00
DEFINE_SHOW_ATTRIBUTE ( pl08x_debugfs ) ;
2010-09-28 17:57:37 +04:00
static void init_pl08x_debugfs ( struct pl08x_driver_data * pl08x )
{
/* Expose a simple debugfs interface to view all clocks */
2019-06-12 15:25:52 +03:00
debugfs_create_file ( dev_name ( & pl08x - > adev - > dev ) , S_IFREG | S_IRUGO ,
NULL , pl08x , & pl08x_debugfs_fops ) ;
2010-09-28 17:57:37 +04:00
}
# else
static inline void init_pl08x_debugfs ( struct pl08x_driver_data * pl08x )
{
}
# endif
2015-07-11 15:12:04 +03:00
# ifdef CONFIG_OF
static struct dma_chan * pl08x_find_chan_id ( struct pl08x_driver_data * pl08x ,
u32 id )
{
struct pl08x_dma_chan * chan ;
2017-05-21 00:42:52 +03:00
/* Trying to get a slave channel from something with no slave support */
if ( ! pl08x - > has_slave )
return NULL ;
2015-07-11 15:12:04 +03:00
list_for_each_entry ( chan , & pl08x - > slave . channels , vc . chan . device_node ) {
if ( chan - > signal = = id )
return & chan - > vc . chan ;
}
return NULL ;
}
static struct dma_chan * pl08x_of_xlate ( struct of_phandle_args * dma_spec ,
struct of_dma * ofdma )
{
struct pl08x_driver_data * pl08x = ofdma - > of_dma_data ;
struct dma_chan * dma_chan ;
2016-04-04 23:44:59 +03:00
struct pl08x_dma_chan * plchan ;
2015-07-11 15:12:04 +03:00
if ( ! pl08x )
return NULL ;
2016-04-04 23:44:59 +03:00
if ( dma_spec - > args_count ! = 2 ) {
dev_err ( & pl08x - > adev - > dev ,
" DMA channel translation requires two cells \n " ) ;
2015-07-11 15:12:04 +03:00
return NULL ;
2016-04-04 23:44:59 +03:00
}
2015-07-11 15:12:04 +03:00
dma_chan = pl08x_find_chan_id ( pl08x , dma_spec - > args [ 0 ] ) ;
2016-04-04 23:44:59 +03:00
if ( ! dma_chan ) {
dev_err ( & pl08x - > adev - > dev ,
" DMA slave channel not found \n " ) ;
2015-07-11 15:12:04 +03:00
return NULL ;
2016-04-04 23:44:59 +03:00
}
2015-07-11 15:12:04 +03:00
2016-04-04 23:44:59 +03:00
plchan = to_pl08x_chan ( dma_chan ) ;
dev_dbg ( & pl08x - > adev - > dev ,
" translated channel for signal %d \n " ,
dma_spec - > args [ 0 ] ) ;
2015-07-11 15:12:04 +03:00
2016-04-04 23:44:59 +03:00
/* Augment channel data for applicable AHB buses */
plchan - > cd - > periph_buses = dma_spec - > args [ 1 ] ;
return dma_get_slave_channel ( dma_chan ) ;
2015-07-11 15:12:04 +03:00
}
static int pl08x_of_probe ( struct amba_device * adev ,
struct pl08x_driver_data * pl08x ,
struct device_node * np )
{
struct pl08x_platform_data * pd ;
2016-04-04 23:44:59 +03:00
struct pl08x_channel_data * chanp = NULL ;
2015-07-11 15:12:04 +03:00
u32 val ;
int ret ;
2016-04-04 23:44:59 +03:00
int i ;
2015-07-11 15:12:04 +03:00
pd = devm_kzalloc ( & adev - > dev , sizeof ( * pd ) , GFP_KERNEL ) ;
if ( ! pd )
return - ENOMEM ;
/* Eligible bus masters for fetching LLIs */
if ( of_property_read_bool ( np , " lli-bus-interface-ahb1 " ) )
pd - > lli_buses | = PL08X_AHB1 ;
if ( of_property_read_bool ( np , " lli-bus-interface-ahb2 " ) )
pd - > lli_buses | = PL08X_AHB2 ;
if ( ! pd - > lli_buses ) {
dev_info ( & adev - > dev , " no bus masters for LLIs stated, assume all \n " ) ;
pd - > lli_buses | = PL08X_AHB1 | PL08X_AHB2 ;
}
/* Eligible bus masters for memory access */
if ( of_property_read_bool ( np , " mem-bus-interface-ahb1 " ) )
pd - > mem_buses | = PL08X_AHB1 ;
if ( of_property_read_bool ( np , " mem-bus-interface-ahb2 " ) )
pd - > mem_buses | = PL08X_AHB2 ;
if ( ! pd - > mem_buses ) {
dev_info ( & adev - > dev , " no bus masters for memory stated, assume all \n " ) ;
pd - > mem_buses | = PL08X_AHB1 | PL08X_AHB2 ;
}
/* Parse the memcpy channel properties */
ret = of_property_read_u32 ( np , " memcpy-burst-size " , & val ) ;
if ( ret ) {
dev_info ( & adev - > dev , " no memcpy burst size specified, using 1 byte \n " ) ;
val = 1 ;
}
switch ( val ) {
default :
dev_err ( & adev - > dev , " illegal burst size for memcpy, set to 1 \n " ) ;
2020-08-24 01:36:59 +03:00
fallthrough ;
2015-07-11 15:12:04 +03:00
case 1 :
2017-05-21 00:42:50 +03:00
pd - > memcpy_burst_size = PL08X_BURST_SZ_1 ;
2015-07-11 15:12:04 +03:00
break ;
case 4 :
2017-05-21 00:42:50 +03:00
pd - > memcpy_burst_size = PL08X_BURST_SZ_4 ;
2015-07-11 15:12:04 +03:00
break ;
case 8 :
2017-05-21 00:42:50 +03:00
pd - > memcpy_burst_size = PL08X_BURST_SZ_8 ;
2015-07-11 15:12:04 +03:00
break ;
case 16 :
2017-05-21 00:42:50 +03:00
pd - > memcpy_burst_size = PL08X_BURST_SZ_16 ;
2015-07-11 15:12:04 +03:00
break ;
case 32 :
2017-05-21 00:42:50 +03:00
pd - > memcpy_burst_size = PL08X_BURST_SZ_32 ;
2015-07-11 15:12:04 +03:00
break ;
case 64 :
2017-05-21 00:42:50 +03:00
pd - > memcpy_burst_size = PL08X_BURST_SZ_64 ;
2015-07-11 15:12:04 +03:00
break ;
case 128 :
2017-05-21 00:42:50 +03:00
pd - > memcpy_burst_size = PL08X_BURST_SZ_128 ;
2015-07-11 15:12:04 +03:00
break ;
case 256 :
2017-05-21 00:42:50 +03:00
pd - > memcpy_burst_size = PL08X_BURST_SZ_256 ;
2015-07-11 15:12:04 +03:00
break ;
}
ret = of_property_read_u32 ( np , " memcpy-bus-width " , & val ) ;
if ( ret ) {
dev_info ( & adev - > dev , " no memcpy bus width specified, using 8 bits \n " ) ;
val = 8 ;
}
switch ( val ) {
default :
dev_err ( & adev - > dev , " illegal bus width for memcpy, set to 8 bits \n " ) ;
2020-08-24 01:36:59 +03:00
fallthrough ;
2015-07-11 15:12:04 +03:00
case 8 :
2017-05-21 00:42:50 +03:00
pd - > memcpy_bus_width = PL08X_BUS_WIDTH_8_BITS ;
2015-07-11 15:12:04 +03:00
break ;
case 16 :
2017-05-21 00:42:50 +03:00
pd - > memcpy_bus_width = PL08X_BUS_WIDTH_16_BITS ;
2015-07-11 15:12:04 +03:00
break ;
case 32 :
2017-05-21 00:42:50 +03:00
pd - > memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS ;
2015-07-11 15:12:04 +03:00
break ;
}
2016-04-04 23:44:59 +03:00
/*
* Allocate channel data for all possible slave channels ( one
* for each possible signal ) , channels will then be allocated
* for a device and have it ' s AHB interfaces set up at
* translation time .
*/
2017-05-21 00:42:52 +03:00
if ( pl08x - > vd - > signals ) {
chanp = devm_kcalloc ( & adev - > dev ,
pl08x - > vd - > signals ,
sizeof ( struct pl08x_channel_data ) ,
GFP_KERNEL ) ;
if ( ! chanp )
return - ENOMEM ;
2016-04-04 23:44:59 +03:00
2017-05-21 00:42:52 +03:00
pd - > slave_channels = chanp ;
for ( i = 0 ; i < pl08x - > vd - > signals ; i + + ) {
/*
* chanp - > periph_buses will be assigned at translation
*/
chanp - > bus_id = kasprintf ( GFP_KERNEL , " slave%d " , i ) ;
chanp + + ;
}
pd - > num_slave_channels = pl08x - > vd - > signals ;
2016-04-04 23:44:59 +03:00
}
2015-07-11 15:12:04 +03:00
pl08x - > pd = pd ;
return of_dma_controller_register ( adev - > dev . of_node , pl08x_of_xlate ,
pl08x ) ;
}
# else
static inline int pl08x_of_probe ( struct amba_device * adev ,
struct pl08x_driver_data * pl08x ,
struct device_node * np )
{
return - EINVAL ;
}
# endif
2011-02-19 18:55:00 +03:00
static int pl08x_probe ( struct amba_device * adev , const struct amba_id * id )
2010-09-28 17:57:37 +04:00
{
struct pl08x_driver_data * pl08x ;
2017-05-21 00:42:53 +03:00
struct vendor_data * vd = id - > data ;
2015-07-11 15:12:04 +03:00
struct device_node * np = adev - > dev . of_node ;
2013-08-11 21:59:15 +04:00
u32 tsfr_size ;
2010-09-28 17:57:37 +04:00
int ret = 0 ;
int i ;
ret = amba_request_regions ( adev , NULL ) ;
if ( ret )
return ret ;
2013-06-27 13:29:32 +04:00
/* Ensure that we can do DMA */
ret = dma_set_mask_and_coherent ( & adev - > dev , DMA_BIT_MASK ( 32 ) ) ;
if ( ret )
goto out_no_pl08x ;
2010-09-28 17:57:37 +04:00
/* Create the driver state holder */
2011-08-05 14:02:29 +04:00
pl08x = kzalloc ( sizeof ( * pl08x ) , GFP_KERNEL ) ;
2010-09-28 17:57:37 +04:00
if ( ! pl08x ) {
ret = - ENOMEM ;
goto out_no_pl08x ;
}
2016-04-04 23:44:59 +03:00
/* Assign useful pointers to the driver state */
pl08x - > adev = adev ;
pl08x - > vd = vd ;
2017-05-21 00:42:53 +03:00
pl08x - > base = ioremap ( adev - > res . start , resource_size ( & adev - > res ) ) ;
if ( ! pl08x - > base ) {
ret = - ENOMEM ;
goto out_no_ioremap ;
}
if ( vd - > ftdmac020 ) {
u32 val ;
val = readl ( pl08x - > base + FTDMAC020_REVISION ) ;
dev_info ( & pl08x - > adev - > dev , " FTDMAC020 %d.%d rel %d \n " ,
( val > > 16 ) & 0xff , ( val > > 8 ) & 0xff , val & 0xff ) ;
val = readl ( pl08x - > base + FTDMAC020_FEATURE ) ;
dev_info ( & pl08x - > adev - > dev , " FTDMAC020 %d channels, "
" %s built-in bridge, %s, %s linked lists \n " ,
( val > > 12 ) & 0x0f ,
( val & BIT ( 10 ) ) ? " no " : " has " ,
( val & BIT ( 9 ) ) ? " AHB0 and AHB1 " : " AHB0 " ,
( val & BIT ( 8 ) ) ? " supports " : " does not support " ) ;
/* Vendor data from feature register */
if ( ! ( val & BIT ( 8 ) ) )
dev_warn ( & pl08x - > adev - > dev ,
" linked lists not supported, required \n " ) ;
vd - > channels = ( val > > 12 ) & 0x0f ;
vd - > dualmaster = ! ! ( val & BIT ( 9 ) ) ;
}
2010-09-28 17:57:37 +04:00
/* Initialize memcpy engine */
dma_cap_set ( DMA_MEMCPY , pl08x - > memcpy . cap_mask ) ;
pl08x - > memcpy . dev = & adev - > dev ;
pl08x - > memcpy . device_free_chan_resources = pl08x_free_chan_resources ;
pl08x - > memcpy . device_prep_dma_memcpy = pl08x_prep_dma_memcpy ;
pl08x - > memcpy . device_prep_dma_interrupt = pl08x_prep_dma_interrupt ;
pl08x - > memcpy . device_tx_status = pl08x_dma_tx_status ;
pl08x - > memcpy . device_issue_pending = pl08x_issue_pending ;
2014-11-17 16:42:05 +03:00
pl08x - > memcpy . device_config = pl08x_config ;
pl08x - > memcpy . device_pause = pl08x_pause ;
pl08x - > memcpy . device_resume = pl08x_resume ;
pl08x - > memcpy . device_terminate_all = pl08x_terminate_all ;
2017-11-14 17:32:09 +03:00
pl08x - > memcpy . device_synchronize = pl08x_synchronize ;
2015-03-18 02:25:36 +03:00
pl08x - > memcpy . src_addr_widths = PL80X_DMA_BUSWIDTHS ;
pl08x - > memcpy . dst_addr_widths = PL80X_DMA_BUSWIDTHS ;
pl08x - > memcpy . directions = BIT ( DMA_MEM_TO_MEM ) ;
pl08x - > memcpy . residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT ;
2017-05-21 00:42:53 +03:00
if ( vd - > ftdmac020 )
pl08x - > memcpy . copy_align = DMAENGINE_ALIGN_4_BYTES ;
2010-09-28 17:57:37 +04:00
2017-05-21 00:42:52 +03:00
/*
* Initialize slave engine , if the block has no signals , that means
* we have no slave support .
*/
if ( vd - > signals ) {
pl08x - > has_slave = true ;
dma_cap_set ( DMA_SLAVE , pl08x - > slave . cap_mask ) ;
dma_cap_set ( DMA_CYCLIC , pl08x - > slave . cap_mask ) ;
pl08x - > slave . dev = & adev - > dev ;
pl08x - > slave . device_free_chan_resources =
pl08x_free_chan_resources ;
pl08x - > slave . device_prep_dma_interrupt =
pl08x_prep_dma_interrupt ;
pl08x - > slave . device_tx_status = pl08x_dma_tx_status ;
pl08x - > slave . device_issue_pending = pl08x_issue_pending ;
pl08x - > slave . device_prep_slave_sg = pl08x_prep_slave_sg ;
pl08x - > slave . device_prep_dma_cyclic = pl08x_prep_dma_cyclic ;
pl08x - > slave . device_config = pl08x_config ;
pl08x - > slave . device_pause = pl08x_pause ;
pl08x - > slave . device_resume = pl08x_resume ;
pl08x - > slave . device_terminate_all = pl08x_terminate_all ;
2017-11-14 17:32:09 +03:00
pl08x - > slave . device_synchronize = pl08x_synchronize ;
2017-05-21 00:42:52 +03:00
pl08x - > slave . src_addr_widths = PL80X_DMA_BUSWIDTHS ;
pl08x - > slave . dst_addr_widths = PL80X_DMA_BUSWIDTHS ;
pl08x - > slave . directions =
BIT ( DMA_DEV_TO_MEM ) | BIT ( DMA_MEM_TO_DEV ) ;
pl08x - > slave . residue_granularity =
DMA_RESIDUE_GRANULARITY_SEGMENT ;
}
2010-09-28 17:57:37 +04:00
/* Get the platform data */
pl08x - > pd = dev_get_platdata ( & adev - > dev ) ;
if ( ! pl08x - > pd ) {
2015-07-11 15:12:04 +03:00
if ( np ) {
ret = pl08x_of_probe ( adev , pl08x , np ) ;
if ( ret )
goto out_no_platdata ;
} else {
dev_err ( & adev - > dev , " no platform data supplied \n " ) ;
ret = - EINVAL ;
goto out_no_platdata ;
}
2016-11-10 18:17:49 +03:00
} else {
pl08x - > slave . filter . map = pl08x - > pd - > slave_map ;
pl08x - > slave . filter . mapcnt = pl08x - > pd - > slave_map_len ;
pl08x - > slave . filter . fn = pl08x_filter_fn ;
2010-09-28 17:57:37 +04:00
}
2011-01-04 01:41:13 +03:00
/* By default, AHB1 only. If dualmaster, from platform */
pl08x - > lli_buses = PL08X_AHB1 ;
pl08x - > mem_buses = PL08X_AHB1 ;
if ( pl08x - > vd - > dualmaster ) {
pl08x - > lli_buses = pl08x - > pd - > lli_buses ;
pl08x - > mem_buses = pl08x - > pd - > mem_buses ;
}
2013-08-11 21:59:17 +04:00
if ( vd - > pl080s )
pl08x - > lli_words = PL080S_LLI_WORDS ;
else
pl08x - > lli_words = PL080_LLI_WORDS ;
2013-08-11 21:59:15 +04:00
tsfr_size = MAX_NUM_TSFR_LLIS * pl08x - > lli_words * sizeof ( u32 ) ;
2010-09-28 17:57:37 +04:00
/* A DMA memory pool for LLIs, align on 1-byte boundary */
pl08x - > pool = dma_pool_create ( DRIVER_NAME , & pl08x - > adev - > dev ,
2013-08-11 21:59:15 +04:00
tsfr_size , PL08X_ALIGN , 0 ) ;
2010-09-28 17:57:37 +04:00
if ( ! pl08x - > pool ) {
ret = - ENOMEM ;
goto out_no_lli_pool ;
}
/* Turn on the PL08x */
pl08x_ensure_on ( pl08x ) ;
2017-05-21 00:42:53 +03:00
/* Clear any pending interrupts */
if ( vd - > ftdmac020 )
/* This variant has error IRQs in bits 16-19 */
writel ( 0x0000FFFF , pl08x - > base + PL080_ERR_CLEAR ) ;
else
writel ( 0x000000FF , pl08x - > base + PL080_ERR_CLEAR ) ;
2010-09-28 17:57:37 +04:00
writel ( 0x000000FF , pl08x - > base + PL080_TC_CLEAR ) ;
2017-05-21 00:42:53 +03:00
/* Attach the interrupt handler */
2013-10-13 09:10:51 +04:00
ret = request_irq ( adev - > irq [ 0 ] , pl08x_irq , 0 , DRIVER_NAME , pl08x ) ;
2010-09-28 17:57:37 +04:00
if ( ret ) {
dev_err ( & adev - > dev , " %s failed to request interrupt %d \n " ,
__func__ , adev - > irq [ 0 ] ) ;
goto out_no_irq ;
}
/* Initialize physical channels */
2012-04-12 11:01:49 +04:00
pl08x - > phy_chans = kzalloc ( ( vd - > channels * sizeof ( * pl08x - > phy_chans ) ) ,
2010-09-28 17:57:37 +04:00
GFP_KERNEL ) ;
if ( ! pl08x - > phy_chans ) {
2012-08-14 16:58:32 +04:00
ret = - ENOMEM ;
2010-09-28 17:57:37 +04:00
goto out_no_phychans ;
}
for ( i = 0 ; i < vd - > channels ; i + + ) {
struct pl08x_phy_chan * ch = & pl08x - > phy_chans [ i ] ;
ch - > id = i ;
ch - > base = pl08x - > base + PL080_Cx_BASE ( i ) ;
2017-05-21 00:42:53 +03:00
if ( vd - > ftdmac020 ) {
/* FTDMA020 has a special channel busy register */
ch - > reg_busy = ch - > base + FTDMAC020_CH_BUSY ;
ch - > reg_config = ch - > base + FTDMAC020_CH_CFG ;
ch - > reg_control = ch - > base + FTDMAC020_CH_CSR ;
ch - > reg_src = ch - > base + FTDMAC020_CH_SRC_ADDR ;
ch - > reg_dst = ch - > base + FTDMAC020_CH_DST_ADDR ;
ch - > reg_lli = ch - > base + FTDMAC020_CH_LLP ;
ch - > ftdmac020 = true ;
} else {
ch - > reg_config = ch - > base + vd - > config_offset ;
ch - > reg_control = ch - > base + PL080_CH_CONTROL ;
ch - > reg_src = ch - > base + PL080_CH_SRC_ADDR ;
ch - > reg_dst = ch - > base + PL080_CH_DST_ADDR ;
ch - > reg_lli = ch - > base + PL080_CH_LLI ;
}
if ( vd - > pl080s )
ch - > pl080s = true ;
2010-09-28 17:57:37 +04:00
spin_lock_init ( & ch - > lock ) ;
2012-04-12 11:01:49 +04:00
/*
* Nomadik variants can have channels that are locked
* down for the secure world only . Lock up these channels
* by perpetually serving a dummy virtual channel .
*/
if ( vd - > nomadik ) {
u32 val ;
2013-08-11 21:59:14 +04:00
val = readl ( ch - > reg_config ) ;
2012-04-12 11:01:49 +04:00
if ( val & ( PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT ) ) {
dev_info ( & adev - > dev , " physical channel %d reserved for secure access only \n " , i ) ;
ch - > locked = true ;
}
}
2011-08-05 14:02:32 +04:00
dev_dbg ( & adev - > dev , " physical channel %d is %s \n " ,
i , pl08x_phy_channel_busy ( ch ) ? " BUSY " : " FREE " ) ;
2010-09-28 17:57:37 +04:00
}
/* Register as many memcpy channels as there are physical channels */
ret = pl08x_dma_init_virtual_channels ( pl08x , & pl08x - > memcpy ,
pl08x - > vd - > channels , false ) ;
if ( ret < = 0 ) {
dev_warn ( & pl08x - > adev - > dev ,
" %s failed to enumerate memcpy channels - %d \n " ,
__func__ , ret ) ;
goto out_no_memcpy ;
}
/* Register slave channels */
2017-05-21 00:42:52 +03:00
if ( pl08x - > has_slave ) {
ret = pl08x_dma_init_virtual_channels ( pl08x , & pl08x - > slave ,
pl08x - > pd - > num_slave_channels , true ) ;
if ( ret < 0 ) {
dev_warn ( & pl08x - > adev - > dev ,
" %s failed to enumerate slave channels - %d \n " ,
__func__ , ret ) ;
goto out_no_slave ;
}
2010-09-28 17:57:37 +04:00
}
ret = dma_async_device_register ( & pl08x - > memcpy ) ;
if ( ret ) {
dev_warn ( & pl08x - > adev - > dev ,
" %s failed to register memcpy as an async device - %d \n " ,
__func__ , ret ) ;
goto out_no_memcpy_reg ;
}
2017-05-21 00:42:52 +03:00
if ( pl08x - > has_slave ) {
ret = dma_async_device_register ( & pl08x - > slave ) ;
if ( ret ) {
dev_warn ( & pl08x - > adev - > dev ,
2010-09-28 17:57:37 +04:00
" %s failed to register slave as an async device - %d \n " ,
__func__ , ret ) ;
2017-05-21 00:42:52 +03:00
goto out_no_slave_reg ;
}
2010-09-28 17:57:37 +04:00
}
amba_set_drvdata ( adev , pl08x ) ;
init_pl08x_debugfs ( pl08x ) ;
2013-08-11 21:59:17 +04:00
dev_info ( & pl08x - > adev - > dev , " DMA: PL%03x%s rev%u at 0x%08llx irq %d \n " ,
amba_part ( adev ) , pl08x - > vd - > pl080s ? " s " : " " , amba_rev ( adev ) ,
2011-01-04 01:33:26 +03:00
( unsigned long long ) adev - > res . start , adev - > irq [ 0 ] ) ;
2011-08-05 14:02:33 +04:00
2010-09-28 17:57:37 +04:00
return 0 ;
out_no_slave_reg :
dma_async_device_unregister ( & pl08x - > memcpy ) ;
out_no_memcpy_reg :
2017-05-21 00:42:52 +03:00
if ( pl08x - > has_slave )
pl08x_free_virtual_channels ( & pl08x - > slave ) ;
2010-09-28 17:57:37 +04:00
out_no_slave :
pl08x_free_virtual_channels ( & pl08x - > memcpy ) ;
out_no_memcpy :
kfree ( pl08x - > phy_chans ) ;
out_no_phychans :
free_irq ( adev - > irq [ 0 ] , pl08x ) ;
out_no_irq :
dma_pool_destroy ( pl08x - > pool ) ;
out_no_lli_pool :
out_no_platdata :
2017-05-21 00:42:53 +03:00
iounmap ( pl08x - > base ) ;
out_no_ioremap :
2010-09-28 17:57:37 +04:00
kfree ( pl08x ) ;
out_no_pl08x :
amba_release_regions ( adev ) ;
return ret ;
}
/* PL080 has 8 channels and the PL080 have just 2 */
static struct vendor_data vendor_pl080 = {
2013-08-11 21:59:14 +04:00
. config_offset = PL080_CH_CONFIG ,
2010-09-28 17:57:37 +04:00
. channels = 8 ,
2016-04-04 23:44:59 +03:00
. signals = 16 ,
2010-09-28 17:57:37 +04:00
. dualmaster = true ,
2013-08-11 21:59:18 +04:00
. max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK ,
2010-09-28 17:57:37 +04:00
} ;
2012-04-12 11:01:49 +04:00
static struct vendor_data vendor_nomadik = {
2013-08-11 21:59:14 +04:00
. config_offset = PL080_CH_CONFIG ,
2012-04-12 11:01:49 +04:00
. channels = 8 ,
2016-04-04 23:44:59 +03:00
. signals = 32 ,
2012-04-12 11:01:49 +04:00
. dualmaster = true ,
. nomadik = true ,
2013-08-11 21:59:18 +04:00
. max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK ,
2012-04-12 11:01:49 +04:00
} ;
2013-08-11 21:59:17 +04:00
static struct vendor_data vendor_pl080s = {
. config_offset = PL080S_CH_CONFIG ,
. channels = 8 ,
2016-04-04 23:44:59 +03:00
. signals = 32 ,
2013-08-11 21:59:17 +04:00
. pl080s = true ,
2013-08-11 21:59:18 +04:00
. max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK ,
2012-04-12 11:01:49 +04:00
} ;
2010-09-28 17:57:37 +04:00
static struct vendor_data vendor_pl081 = {
2013-08-11 21:59:14 +04:00
. config_offset = PL080_CH_CONFIG ,
2010-09-28 17:57:37 +04:00
. channels = 2 ,
2016-04-04 23:44:59 +03:00
. signals = 16 ,
2010-09-28 17:57:37 +04:00
. dualmaster = false ,
2013-08-11 21:59:18 +04:00
. max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK ,
2010-09-28 17:57:37 +04:00
} ;
2017-05-21 00:42:53 +03:00
static struct vendor_data vendor_ftdmac020 = {
. config_offset = PL080_CH_CONFIG ,
. ftdmac020 = true ,
. max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK ,
} ;
2017-08-23 19:27:12 +03:00
static const struct amba_id pl08x_ids [ ] = {
2013-08-11 21:59:17 +04:00
/* Samsung PL080S variant */
{
. id = 0x0a141080 ,
. mask = 0xffffffff ,
. data = & vendor_pl080s ,
} ,
2010-09-28 17:57:37 +04:00
/* PL080 */
{
. id = 0x00041080 ,
. mask = 0x000fffff ,
. data = & vendor_pl080 ,
} ,
/* PL081 */
{
. id = 0x00041081 ,
. mask = 0x000fffff ,
. data = & vendor_pl081 ,
} ,
/* Nomadik 8815 PL080 variant */
{
2012-04-12 11:01:49 +04:00
. id = 0x00280080 ,
2010-09-28 17:57:37 +04:00
. mask = 0x00ffffff ,
2012-04-12 11:01:49 +04:00
. data = & vendor_nomadik ,
2010-09-28 17:57:37 +04:00
} ,
2017-05-21 00:42:53 +03:00
/* Faraday Technology FTDMAC020 */
{
. id = 0x0003b080 ,
. mask = 0x000fffff ,
. data = & vendor_ftdmac020 ,
} ,
2010-09-28 17:57:37 +04:00
{ 0 , 0 } ,
} ;
2011-10-05 18:15:20 +04:00
MODULE_DEVICE_TABLE ( amba , pl08x_ids ) ;
2010-09-28 17:57:37 +04:00
static struct amba_driver pl08x_amba_driver = {
. drv . name = DRIVER_NAME ,
. id_table = pl08x_ids ,
. probe = pl08x_probe ,
} ;
static int __init pl08x_init ( void )
{
int retval ;
retval = amba_driver_register ( & pl08x_amba_driver ) ;
if ( retval )
printk ( KERN_WARNING DRIVER_NAME
2011-01-04 01:30:24 +03:00
" failed to register as an AMBA device (%d) \n " ,
2010-09-28 17:57:37 +04:00
retval ) ;
return retval ;
}
subsys_initcall ( pl08x_init ) ;