2011-12-26 13:49:52 +04:00
/*
* Copyright ( c ) 2012 Samsung Electronics Co . , Ltd .
* http : //www.samsung.com
2010-05-24 07:28:19 +04:00
*
* Copyright ( C ) 2010 Samsung Electronics Co . Ltd .
* Jaswinder Singh < jassi . brar @ samsung . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*/
2011-12-26 13:49:52 +04:00
# include <linux/kernel.h>
2010-05-24 07:28:19 +04:00
# include <linux/io.h>
# include <linux/init.h>
# include <linux/slab.h>
# include <linux/module.h>
2011-12-26 13:49:52 +04:00
# include <linux/string.h>
# include <linux/delay.h>
# include <linux/interrupt.h>
# include <linux/dma-mapping.h>
2010-05-24 07:28:19 +04:00
# include <linux/dmaengine.h>
# include <linux/amba/bus.h>
# include <linux/amba/pl330.h>
2011-09-02 04:44:30 +04:00
# include <linux/scatterlist.h>
2011-10-24 13:43:31 +04:00
# include <linux/of.h>
2013-02-14 07:40:06 +04:00
# include <linux/of_dma.h>
2013-03-04 13:06:27 +04:00
# include <linux/err.h>
2010-05-24 07:28:19 +04:00
2012-03-07 02:34:26 +04:00
# include "dmaengine.h"
2011-12-26 13:49:52 +04:00
# define PL330_MAX_CHAN 8
# define PL330_MAX_IRQS 32
# define PL330_MAX_PERI 32
2014-07-06 22:32:19 +04:00
enum pl330_cachectrl {
CCTRL0 , /* Noncacheable and nonbufferable */
CCTRL1 , /* Bufferable only */
CCTRL2 , /* Cacheable, but do not allocate */
CCTRL3 , /* Cacheable and bufferable, but do not allocate */
INVALID1 , /* AWCACHE = 0x1000 */
INVALID2 ,
CCTRL6 , /* Cacheable write-through, allocate on writes only */
CCTRL7 , /* Cacheable write-back, allocate on writes only */
2011-12-26 13:49:52 +04:00
} ;
enum pl330_byteswap {
SWAP_NO ,
SWAP_2 ,
SWAP_4 ,
SWAP_8 ,
SWAP_16 ,
} ;
/* Register and Bit field Definitions */
# define DS 0x0
# define DS_ST_STOP 0x0
# define DS_ST_EXEC 0x1
# define DS_ST_CMISS 0x2
# define DS_ST_UPDTPC 0x3
# define DS_ST_WFE 0x4
# define DS_ST_ATBRR 0x5
# define DS_ST_QBUSY 0x6
# define DS_ST_WFP 0x7
# define DS_ST_KILL 0x8
# define DS_ST_CMPLT 0x9
# define DS_ST_FLTCMP 0xe
# define DS_ST_FAULT 0xf
# define DPC 0x4
# define INTEN 0x20
# define ES 0x24
# define INTSTATUS 0x28
# define INTCLR 0x2c
# define FSM 0x30
# define FSC 0x34
# define FTM 0x38
# define _FTC 0x40
# define FTC(n) (_FTC + (n)*0x4)
# define _CS 0x100
# define CS(n) (_CS + (n)*0x8)
# define CS_CNS (1 << 21)
# define _CPC 0x104
# define CPC(n) (_CPC + (n)*0x8)
# define _SA 0x400
# define SA(n) (_SA + (n)*0x20)
# define _DA 0x404
# define DA(n) (_DA + (n)*0x20)
# define _CC 0x408
# define CC(n) (_CC + (n)*0x20)
# define CC_SRCINC (1 << 0)
# define CC_DSTINC (1 << 14)
# define CC_SRCPRI (1 << 8)
# define CC_DSTPRI (1 << 22)
# define CC_SRCNS (1 << 9)
# define CC_DSTNS (1 << 23)
# define CC_SRCIA (1 << 10)
# define CC_DSTIA (1 << 24)
# define CC_SRCBRSTLEN_SHFT 4
# define CC_DSTBRSTLEN_SHFT 18
# define CC_SRCBRSTSIZE_SHFT 1
# define CC_DSTBRSTSIZE_SHFT 15
# define CC_SRCCCTRL_SHFT 11
# define CC_SRCCCTRL_MASK 0x7
# define CC_DSTCCTRL_SHFT 25
# define CC_DRCCCTRL_MASK 0x7
# define CC_SWAP_SHFT 28
# define _LC0 0x40c
# define LC0(n) (_LC0 + (n)*0x20)
# define _LC1 0x410
# define LC1(n) (_LC1 + (n)*0x20)
# define DBGSTATUS 0xd00
# define DBG_BUSY (1 << 0)
# define DBGCMD 0xd04
# define DBGINST0 0xd08
# define DBGINST1 0xd0c
# define CR0 0xe00
# define CR1 0xe04
# define CR2 0xe08
# define CR3 0xe0c
# define CR4 0xe10
# define CRD 0xe14
# define PERIPH_ID 0xfe0
2011-12-26 13:55:47 +04:00
# define PERIPH_REV_SHIFT 20
# define PERIPH_REV_MASK 0xf
# define PERIPH_REV_R0P0 0
# define PERIPH_REV_R1P0 1
# define PERIPH_REV_R1P1 2
2011-12-26 13:49:52 +04:00
# define CR0_PERIPH_REQ_SET (1 << 0)
# define CR0_BOOT_EN_SET (1 << 1)
# define CR0_BOOT_MAN_NS (1 << 2)
# define CR0_NUM_CHANS_SHIFT 4
# define CR0_NUM_CHANS_MASK 0x7
# define CR0_NUM_PERIPH_SHIFT 12
# define CR0_NUM_PERIPH_MASK 0x1f
# define CR0_NUM_EVENTS_SHIFT 17
# define CR0_NUM_EVENTS_MASK 0x1f
# define CR1_ICACHE_LEN_SHIFT 0
# define CR1_ICACHE_LEN_MASK 0x7
# define CR1_NUM_ICACHELINES_SHIFT 4
# define CR1_NUM_ICACHELINES_MASK 0xf
# define CRD_DATA_WIDTH_SHIFT 0
# define CRD_DATA_WIDTH_MASK 0x7
# define CRD_WR_CAP_SHIFT 4
# define CRD_WR_CAP_MASK 0x7
# define CRD_WR_Q_DEP_SHIFT 8
# define CRD_WR_Q_DEP_MASK 0xf
# define CRD_RD_CAP_SHIFT 12
# define CRD_RD_CAP_MASK 0x7
# define CRD_RD_Q_DEP_SHIFT 16
# define CRD_RD_Q_DEP_MASK 0xf
# define CRD_DATA_BUFF_SHIFT 20
# define CRD_DATA_BUFF_MASK 0x3ff
# define PART 0x330
# define DESIGNER 0x41
# define REVISION 0x0
# define INTEG_CFG 0x0
# define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
# define PL330_STATE_STOPPED (1 << 0)
# define PL330_STATE_EXECUTING (1 << 1)
# define PL330_STATE_WFE (1 << 2)
# define PL330_STATE_FAULTING (1 << 3)
# define PL330_STATE_COMPLETING (1 << 4)
# define PL330_STATE_WFP (1 << 5)
# define PL330_STATE_KILLING (1 << 6)
# define PL330_STATE_FAULT_COMPLETING (1 << 7)
# define PL330_STATE_CACHEMISS (1 << 8)
# define PL330_STATE_UPDTPC (1 << 9)
# define PL330_STATE_ATBARRIER (1 << 10)
# define PL330_STATE_QUEUEBUSY (1 << 11)
# define PL330_STATE_INVALID (1 << 15)
# define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
| PL330_STATE_WFE | PL330_STATE_FAULTING )
# define CMD_DMAADDH 0x54
# define CMD_DMAEND 0x00
# define CMD_DMAFLUSHP 0x35
# define CMD_DMAGO 0xa0
# define CMD_DMALD 0x04
# define CMD_DMALDP 0x25
# define CMD_DMALP 0x20
# define CMD_DMALPEND 0x28
# define CMD_DMAKILL 0x01
# define CMD_DMAMOV 0xbc
# define CMD_DMANOP 0x18
# define CMD_DMARMB 0x12
# define CMD_DMASEV 0x34
# define CMD_DMAST 0x08
# define CMD_DMASTP 0x29
# define CMD_DMASTZ 0x0c
# define CMD_DMAWFE 0x36
# define CMD_DMAWFP 0x30
# define CMD_DMAWMB 0x13
# define SZ_DMAADDH 3
# define SZ_DMAEND 1
# define SZ_DMAFLUSHP 2
# define SZ_DMALD 1
# define SZ_DMALDP 2
# define SZ_DMALP 2
# define SZ_DMALPEND 2
# define SZ_DMAKILL 1
# define SZ_DMAMOV 6
# define SZ_DMANOP 1
# define SZ_DMARMB 1
# define SZ_DMASEV 2
# define SZ_DMAST 1
# define SZ_DMASTP 2
# define SZ_DMASTZ 1
# define SZ_DMAWFE 2
# define SZ_DMAWFP 2
# define SZ_DMAWMB 1
# define SZ_DMAGO 6
# define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
# define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
# define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
# define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
/*
* With 256 bytes , we can do more than 2.5 MB and 5 MB xfers per req
* at 1 byte / burst for P < - > M and M < - > M respectively .
* For typical scenario , at 1 word / burst , 10 MB and 20 MB xfers per req
* should be enough for P < - > M and M < - > M respectively .
*/
# define MCODE_BUFF_PER_REQ 256
/* If the _pl330_req is available to the client */
# define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
/* Use this _only_ to wait on transient states */
# define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
# ifdef PL330_DEBUG_MCGEN
static unsigned cmd_line ;
# define PL330_DBGCMD_DUMP(off, x...) do { \
printk ( " %x: " , cmd_line ) ; \
printk ( x ) ; \
cmd_line + = off ; \
} while ( 0 )
# define PL330_DBGMC_START(addr) (cmd_line = addr)
# else
# define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
# define PL330_DBGMC_START(addr) do {} while (0)
# endif
/* The number of default descriptors */
2012-03-07 02:34:26 +04:00
2010-05-24 07:28:19 +04:00
# define NR_DEFAULT_DESC 16
2011-12-26 13:49:52 +04:00
/* Populated by the PL330 core driver for DMA API driver's info */
struct pl330_config {
u32 periph_id ;
# define DMAC_MODE_NS (1 << 0)
unsigned int mode ;
unsigned int data_bus_width : 10 ; /* In number of bits */
unsigned int data_buf_dep : 10 ;
unsigned int num_chan : 4 ;
unsigned int num_peri : 6 ;
u32 peri_ns ;
unsigned int num_events : 6 ;
u32 irq_ns ;
} ;
/**
* Request Configuration .
* The PL330 core does not modify this and uses the last
* working configuration if the request doesn ' t provide any .
*
* The Client may want to provide this info only for the
* first request and a request with new settings .
*/
struct pl330_reqcfg {
/* Address Incrementing */
unsigned dst_inc : 1 ;
unsigned src_inc : 1 ;
/*
* For now , the SRC & DST protection levels
* and burst size / length are assumed same .
*/
bool nonsecure ;
bool privileged ;
bool insnaccess ;
unsigned brst_len : 5 ;
unsigned brst_size : 3 ; /* in power of 2 */
2014-07-06 22:32:19 +04:00
enum pl330_cachectrl dcctl ;
enum pl330_cachectrl scctl ;
2011-12-26 13:49:52 +04:00
enum pl330_byteswap swap ;
2011-12-26 13:55:47 +04:00
struct pl330_config * pcfg ;
2011-12-26 13:49:52 +04:00
} ;
/*
* One cycle of DMAC operation .
* There may be more than one xfer in a request .
*/
struct pl330_xfer {
u32 src_addr ;
u32 dst_addr ;
/* Size to xfer */
u32 bytes ;
} ;
/* The xfer callbacks are made with one of these arguments. */
enum pl330_op_err {
/* The all xfers in the request were success. */
PL330_ERR_NONE ,
/* If req aborted due to global error. */
PL330_ERR_ABORT ,
/* If req failed due to problem with Channel. */
PL330_ERR_FAIL ,
} ;
/* A request defining Scatter-Gather List ending with NULL xfer. */
struct pl330_req {
2014-07-06 22:32:18 +04:00
enum dma_transfer_direction rqtype ;
2011-12-26 13:49:52 +04:00
/* Index of peripheral for the xfer. */
unsigned peri : 5 ;
/* If NULL, req will be done at last set parameters. */
struct pl330_reqcfg * cfg ;
/* Pointer to first xfer in the request. */
struct pl330_xfer * x ;
2012-06-13 18:07:00 +04:00
/* Hook to attach to DMAC's list of reqs with due callback */
struct list_head rqd ;
2011-12-26 13:49:52 +04:00
} ;
enum pl330_chan_op {
/* Start the channel */
PL330_OP_START ,
/* Abort the active xfer */
PL330_OP_ABORT ,
/* Stop xfer and flush queue */
PL330_OP_FLUSH ,
} ;
struct _xfer_spec {
u32 ccr ;
struct pl330_req * r ;
struct pl330_xfer * x ;
} ;
enum dmamov_dst {
SAR = 0 ,
CCR ,
DAR ,
} ;
enum pl330_dst {
SRC = 0 ,
DST ,
} ;
enum pl330_cond {
SINGLE ,
BURST ,
ALWAYS ,
} ;
struct _pl330_req {
u32 mc_bus ;
void * mc_cpu ;
struct pl330_req * r ;
} ;
/* ToBeDone for tasklet */
struct _pl330_tbd {
bool reset_dmac ;
bool reset_mngr ;
u8 reset_chan ;
} ;
/* A DMAC Thread */
struct pl330_thread {
u8 id ;
int ev ;
/* If the channel is not yet acquired by any client */
bool free ;
/* Parent DMAC */
struct pl330_dmac * dmac ;
/* Only two at a time */
struct _pl330_req req [ 2 ] ;
/* Index of the last enqueued request */
unsigned lstenq ;
/* Index of the last submitted request or -1 if the DMA is stopped */
int req_running ;
} ;
enum pl330_dmac_state {
UNINIT ,
INIT ,
DYING ,
} ;
2010-05-24 07:28:19 +04:00
enum desc_status {
/* In the DMAC pool */
FREE ,
/*
2012-08-04 18:37:53 +04:00
* Allocated to some channel during prep_xxx
2010-05-24 07:28:19 +04:00
* Also may be sitting on the work_list .
*/
PREP ,
/*
* Sitting on the work_list and already submitted
* to the PL330 core . Not more than two descriptors
* of a channel can be BUSY at any time .
*/
BUSY ,
/*
* Sitting on the channel work_list but xfer done
* by PL330 core
*/
DONE ,
} ;
struct dma_pl330_chan {
/* Schedule desc completion */
struct tasklet_struct task ;
/* DMA-Engine Channel */
struct dma_chan chan ;
2014-01-11 23:08:38 +04:00
/* List of submitted descriptors */
struct list_head submitted_list ;
/* List of issued descriptors */
2010-05-24 07:28:19 +04:00
struct list_head work_list ;
2013-08-27 22:34:05 +04:00
/* List of completed descriptors */
struct list_head completed_list ;
2010-05-24 07:28:19 +04:00
/* Pointer to the DMAC that manages this channel,
* NULL if the channel is available to be acquired .
* As the parent , this DMAC also provides descriptors
* to the channel .
*/
2014-07-06 22:32:29 +04:00
struct pl330_dmac * dmac ;
2010-05-24 07:28:19 +04:00
/* To protect channel manipulation */
spinlock_t lock ;
2014-07-06 22:32:26 +04:00
/*
* Hardware channel thread of PL330 DMAC . NULL if the channel is
* available .
2010-05-24 07:28:19 +04:00
*/
2014-07-06 22:32:26 +04:00
struct pl330_thread * thread ;
2011-09-02 04:44:30 +04:00
/* For D-to-M and M-to-D channels */
int burst_sz ; /* the peripheral fifo width */
2011-09-02 04:44:31 +04:00
int burst_len ; /* the number of burst */
2011-09-02 04:44:30 +04:00
dma_addr_t fifo_addr ;
2011-09-02 04:44:33 +04:00
/* for cyclic capability */
bool cyclic ;
2010-05-24 07:28:19 +04:00
} ;
2014-07-06 22:32:29 +04:00
struct pl330_dmac {
2010-05-24 07:28:19 +04:00
/* DMA-Engine Device */
struct dma_device ddma ;
2013-11-25 19:07:46 +04:00
/* Holds info about sg limitations */
struct device_dma_parameters dma_parms ;
2010-05-24 07:28:19 +04:00
/* Pool of descriptors available for the DMAC's channels */
struct list_head desc_pool ;
/* To protect desc_pool manipulation */
spinlock_t pool_lock ;
2014-07-06 22:32:29 +04:00
/* Size of MicroCode buffers for each channel. */
unsigned mcbufsz ;
/* ioremap'ed address of PL330 registers. */
void __iomem * base ;
/* Populated by the PL330 core driver during pl330_add */
struct pl330_config pcfg ;
spinlock_t lock ;
/* Maximum possible events/irqs */
int events [ 32 ] ;
/* BUS address of MicroCode buffer */
dma_addr_t mcode_bus ;
/* CPU address of MicroCode buffer */
void * mcode_cpu ;
/* List of all Channel threads */
struct pl330_thread * channels ;
/* Pointer to the MANAGER thread */
struct pl330_thread * manager ;
/* To handle bad news in interrupt */
struct tasklet_struct tasks ;
struct _pl330_tbd dmac_tbd ;
/* State of DMAC operation */
enum pl330_dmac_state state ;
/* Holds list of reqs with due callbacks */
struct list_head req_done ;
2010-05-24 07:28:19 +04:00
/* Peripheral channels connected to this DMAC */
2014-01-11 23:08:39 +04:00
unsigned int num_peripherals ;
2011-07-26 01:05:04 +04:00
struct dma_pl330_chan * peripherals ; /* keep at end */
2010-05-24 07:28:19 +04:00
} ;
struct dma_pl330_desc {
/* To attach to a queue as child */
struct list_head node ;
/* Descriptor for the DMA Engine API */
struct dma_async_tx_descriptor txd ;
/* Xfer for PL330 core */
struct pl330_xfer px ;
struct pl330_reqcfg rqcfg ;
struct pl330_req req ;
enum desc_status status ;
/* The channel which currently holds this desc */
struct dma_pl330_chan * pchan ;
} ;
2011-12-26 13:49:52 +04:00
static inline bool _queue_empty ( struct pl330_thread * thrd )
{
return ( IS_FREE ( & thrd - > req [ 0 ] ) & & IS_FREE ( & thrd - > req [ 1 ] ) )
? true : false ;
}
static inline bool _queue_full ( struct pl330_thread * thrd )
{
return ( IS_FREE ( & thrd - > req [ 0 ] ) | | IS_FREE ( & thrd - > req [ 1 ] ) )
? false : true ;
}
static inline bool is_manager ( struct pl330_thread * thrd )
{
2014-07-06 22:32:28 +04:00
return thrd - > dmac - > manager = = thrd ;
2011-12-26 13:49:52 +04:00
}
/* If manager of the thread is in Non-Secure mode */
static inline bool _manager_ns ( struct pl330_thread * thrd )
{
2014-07-06 22:32:29 +04:00
return ( thrd - > dmac - > pcfg . mode & DMAC_MODE_NS ) ? true : false ;
2011-12-26 13:49:52 +04:00
}
2011-12-26 13:55:47 +04:00
static inline u32 get_revision ( u32 periph_id )
{
return ( periph_id > > PERIPH_REV_SHIFT ) & PERIPH_REV_MASK ;
}
2011-12-26 13:49:52 +04:00
static inline u32 _emit_ADDH ( unsigned dry_run , u8 buf [ ] ,
enum pl330_dst da , u16 val )
{
if ( dry_run )
return SZ_DMAADDH ;
buf [ 0 ] = CMD_DMAADDH ;
buf [ 0 ] | = ( da < < 1 ) ;
* ( ( u16 * ) & buf [ 1 ] ) = val ;
PL330_DBGCMD_DUMP ( SZ_DMAADDH , " \t DMAADDH %s %u \n " ,
da = = 1 ? " DA " : " SA " , val ) ;
return SZ_DMAADDH ;
}
static inline u32 _emit_END ( unsigned dry_run , u8 buf [ ] )
{
if ( dry_run )
return SZ_DMAEND ;
buf [ 0 ] = CMD_DMAEND ;
PL330_DBGCMD_DUMP ( SZ_DMAEND , " \t DMAEND \n " ) ;
return SZ_DMAEND ;
}
static inline u32 _emit_FLUSHP ( unsigned dry_run , u8 buf [ ] , u8 peri )
{
if ( dry_run )
return SZ_DMAFLUSHP ;
buf [ 0 ] = CMD_DMAFLUSHP ;
peri & = 0x1f ;
peri < < = 3 ;
buf [ 1 ] = peri ;
PL330_DBGCMD_DUMP ( SZ_DMAFLUSHP , " \t DMAFLUSHP %u \n " , peri > > 3 ) ;
return SZ_DMAFLUSHP ;
}
static inline u32 _emit_LD ( unsigned dry_run , u8 buf [ ] , enum pl330_cond cond )
{
if ( dry_run )
return SZ_DMALD ;
buf [ 0 ] = CMD_DMALD ;
if ( cond = = SINGLE )
buf [ 0 ] | = ( 0 < < 1 ) | ( 1 < < 0 ) ;
else if ( cond = = BURST )
buf [ 0 ] | = ( 1 < < 1 ) | ( 1 < < 0 ) ;
PL330_DBGCMD_DUMP ( SZ_DMALD , " \t DMALD%c \n " ,
cond = = SINGLE ? ' S ' : ( cond = = BURST ? ' B ' : ' A ' ) ) ;
return SZ_DMALD ;
}
static inline u32 _emit_LDP ( unsigned dry_run , u8 buf [ ] ,
enum pl330_cond cond , u8 peri )
{
if ( dry_run )
return SZ_DMALDP ;
buf [ 0 ] = CMD_DMALDP ;
if ( cond = = BURST )
buf [ 0 ] | = ( 1 < < 1 ) ;
peri & = 0x1f ;
peri < < = 3 ;
buf [ 1 ] = peri ;
PL330_DBGCMD_DUMP ( SZ_DMALDP , " \t DMALDP%c %u \n " ,
cond = = SINGLE ? ' S ' : ' B ' , peri > > 3 ) ;
return SZ_DMALDP ;
}
static inline u32 _emit_LP ( unsigned dry_run , u8 buf [ ] ,
unsigned loop , u8 cnt )
{
if ( dry_run )
return SZ_DMALP ;
buf [ 0 ] = CMD_DMALP ;
if ( loop )
buf [ 0 ] | = ( 1 < < 1 ) ;
cnt - - ; /* DMAC increments by 1 internally */
buf [ 1 ] = cnt ;
PL330_DBGCMD_DUMP ( SZ_DMALP , " \t DMALP_%c %u \n " , loop ? ' 1 ' : ' 0 ' , cnt ) ;
return SZ_DMALP ;
}
struct _arg_LPEND {
enum pl330_cond cond ;
bool forever ;
unsigned loop ;
u8 bjump ;
} ;
static inline u32 _emit_LPEND ( unsigned dry_run , u8 buf [ ] ,
const struct _arg_LPEND * arg )
{
enum pl330_cond cond = arg - > cond ;
bool forever = arg - > forever ;
unsigned loop = arg - > loop ;
u8 bjump = arg - > bjump ;
if ( dry_run )
return SZ_DMALPEND ;
buf [ 0 ] = CMD_DMALPEND ;
if ( loop )
buf [ 0 ] | = ( 1 < < 2 ) ;
if ( ! forever )
buf [ 0 ] | = ( 1 < < 4 ) ;
if ( cond = = SINGLE )
buf [ 0 ] | = ( 0 < < 1 ) | ( 1 < < 0 ) ;
else if ( cond = = BURST )
buf [ 0 ] | = ( 1 < < 1 ) | ( 1 < < 0 ) ;
buf [ 1 ] = bjump ;
PL330_DBGCMD_DUMP ( SZ_DMALPEND , " \t DMALP%s%c_%c bjmpto_%x \n " ,
forever ? " FE " : " END " ,
cond = = SINGLE ? ' S ' : ( cond = = BURST ? ' B ' : ' A ' ) ,
loop ? ' 1 ' : ' 0 ' ,
bjump ) ;
return SZ_DMALPEND ;
}
static inline u32 _emit_KILL ( unsigned dry_run , u8 buf [ ] )
{
if ( dry_run )
return SZ_DMAKILL ;
buf [ 0 ] = CMD_DMAKILL ;
return SZ_DMAKILL ;
}
static inline u32 _emit_MOV ( unsigned dry_run , u8 buf [ ] ,
enum dmamov_dst dst , u32 val )
{
if ( dry_run )
return SZ_DMAMOV ;
buf [ 0 ] = CMD_DMAMOV ;
buf [ 1 ] = dst ;
* ( ( u32 * ) & buf [ 2 ] ) = val ;
PL330_DBGCMD_DUMP ( SZ_DMAMOV , " \t DMAMOV %s 0x%x \n " ,
dst = = SAR ? " SAR " : ( dst = = DAR ? " DAR " : " CCR " ) , val ) ;
return SZ_DMAMOV ;
}
static inline u32 _emit_NOP ( unsigned dry_run , u8 buf [ ] )
{
if ( dry_run )
return SZ_DMANOP ;
buf [ 0 ] = CMD_DMANOP ;
PL330_DBGCMD_DUMP ( SZ_DMANOP , " \t DMANOP \n " ) ;
return SZ_DMANOP ;
}
static inline u32 _emit_RMB ( unsigned dry_run , u8 buf [ ] )
{
if ( dry_run )
return SZ_DMARMB ;
buf [ 0 ] = CMD_DMARMB ;
PL330_DBGCMD_DUMP ( SZ_DMARMB , " \t DMARMB \n " ) ;
return SZ_DMARMB ;
}
static inline u32 _emit_SEV ( unsigned dry_run , u8 buf [ ] , u8 ev )
{
if ( dry_run )
return SZ_DMASEV ;
buf [ 0 ] = CMD_DMASEV ;
ev & = 0x1f ;
ev < < = 3 ;
buf [ 1 ] = ev ;
PL330_DBGCMD_DUMP ( SZ_DMASEV , " \t DMASEV %u \n " , ev > > 3 ) ;
return SZ_DMASEV ;
}
static inline u32 _emit_ST ( unsigned dry_run , u8 buf [ ] , enum pl330_cond cond )
{
if ( dry_run )
return SZ_DMAST ;
buf [ 0 ] = CMD_DMAST ;
if ( cond = = SINGLE )
buf [ 0 ] | = ( 0 < < 1 ) | ( 1 < < 0 ) ;
else if ( cond = = BURST )
buf [ 0 ] | = ( 1 < < 1 ) | ( 1 < < 0 ) ;
PL330_DBGCMD_DUMP ( SZ_DMAST , " \t DMAST%c \n " ,
cond = = SINGLE ? ' S ' : ( cond = = BURST ? ' B ' : ' A ' ) ) ;
return SZ_DMAST ;
}
static inline u32 _emit_STP ( unsigned dry_run , u8 buf [ ] ,
enum pl330_cond cond , u8 peri )
{
if ( dry_run )
return SZ_DMASTP ;
buf [ 0 ] = CMD_DMASTP ;
if ( cond = = BURST )
buf [ 0 ] | = ( 1 < < 1 ) ;
peri & = 0x1f ;
peri < < = 3 ;
buf [ 1 ] = peri ;
PL330_DBGCMD_DUMP ( SZ_DMASTP , " \t DMASTP%c %u \n " ,
cond = = SINGLE ? ' S ' : ' B ' , peri > > 3 ) ;
return SZ_DMASTP ;
}
static inline u32 _emit_STZ ( unsigned dry_run , u8 buf [ ] )
{
if ( dry_run )
return SZ_DMASTZ ;
buf [ 0 ] = CMD_DMASTZ ;
PL330_DBGCMD_DUMP ( SZ_DMASTZ , " \t DMASTZ \n " ) ;
return SZ_DMASTZ ;
}
static inline u32 _emit_WFE ( unsigned dry_run , u8 buf [ ] , u8 ev ,
unsigned invalidate )
{
if ( dry_run )
return SZ_DMAWFE ;
buf [ 0 ] = CMD_DMAWFE ;
ev & = 0x1f ;
ev < < = 3 ;
buf [ 1 ] = ev ;
if ( invalidate )
buf [ 1 ] | = ( 1 < < 1 ) ;
PL330_DBGCMD_DUMP ( SZ_DMAWFE , " \t DMAWFE %u%s \n " ,
ev > > 3 , invalidate ? " , I " : " " ) ;
return SZ_DMAWFE ;
}
static inline u32 _emit_WFP ( unsigned dry_run , u8 buf [ ] ,
enum pl330_cond cond , u8 peri )
{
if ( dry_run )
return SZ_DMAWFP ;
buf [ 0 ] = CMD_DMAWFP ;
if ( cond = = SINGLE )
buf [ 0 ] | = ( 0 < < 1 ) | ( 0 < < 0 ) ;
else if ( cond = = BURST )
buf [ 0 ] | = ( 1 < < 1 ) | ( 0 < < 0 ) ;
else
buf [ 0 ] | = ( 0 < < 1 ) | ( 1 < < 0 ) ;
peri & = 0x1f ;
peri < < = 3 ;
buf [ 1 ] = peri ;
PL330_DBGCMD_DUMP ( SZ_DMAWFP , " \t DMAWFP%c %u \n " ,
cond = = SINGLE ? ' S ' : ( cond = = BURST ? ' B ' : ' P ' ) , peri > > 3 ) ;
return SZ_DMAWFP ;
}
static inline u32 _emit_WMB ( unsigned dry_run , u8 buf [ ] )
{
if ( dry_run )
return SZ_DMAWMB ;
buf [ 0 ] = CMD_DMAWMB ;
PL330_DBGCMD_DUMP ( SZ_DMAWMB , " \t DMAWMB \n " ) ;
return SZ_DMAWMB ;
}
struct _arg_GO {
u8 chan ;
u32 addr ;
unsigned ns ;
} ;
static inline u32 _emit_GO ( unsigned dry_run , u8 buf [ ] ,
const struct _arg_GO * arg )
{
u8 chan = arg - > chan ;
u32 addr = arg - > addr ;
unsigned ns = arg - > ns ;
if ( dry_run )
return SZ_DMAGO ;
buf [ 0 ] = CMD_DMAGO ;
buf [ 0 ] | = ( ns < < 1 ) ;
buf [ 1 ] = chan & 0x7 ;
* ( ( u32 * ) & buf [ 2 ] ) = addr ;
return SZ_DMAGO ;
}
# define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
/* Returns Time-Out */
static bool _until_dmac_idle ( struct pl330_thread * thrd )
{
2014-07-06 22:32:29 +04:00
void __iomem * regs = thrd - > dmac - > base ;
2011-12-26 13:49:52 +04:00
unsigned long loops = msecs_to_loops ( 5 ) ;
do {
/* Until Manager is Idle */
if ( ! ( readl ( regs + DBGSTATUS ) & DBG_BUSY ) )
break ;
cpu_relax ( ) ;
} while ( - - loops ) ;
if ( ! loops )
return true ;
return false ;
}
static inline void _execute_DBGINSN ( struct pl330_thread * thrd ,
u8 insn [ ] , bool as_manager )
{
2014-07-06 22:32:29 +04:00
void __iomem * regs = thrd - > dmac - > base ;
2011-12-26 13:49:52 +04:00
u32 val ;
val = ( insn [ 0 ] < < 16 ) | ( insn [ 1 ] < < 24 ) ;
if ( ! as_manager ) {
val | = ( 1 < < 0 ) ;
val | = ( thrd - > id < < 8 ) ; /* Channel Number */
}
writel ( val , regs + DBGINST0 ) ;
val = * ( ( u32 * ) & insn [ 2 ] ) ;
writel ( val , regs + DBGINST1 ) ;
/* If timed out due to halted state-machine */
if ( _until_dmac_idle ( thrd ) ) {
2014-07-06 22:32:29 +04:00
dev_err ( thrd - > dmac - > ddma . dev , " DMAC halted! \n " ) ;
2011-12-26 13:49:52 +04:00
return ;
}
/* Get going */
writel ( 0 , regs + DBGCMD ) ;
}
/*
* Mark a _pl330_req as free .
* We do it by writing DMAEND as the first instruction
* because no valid request is going to have DMAEND as
* its first instruction to execute .
*/
static void mark_free ( struct pl330_thread * thrd , int idx )
{
struct _pl330_req * req = & thrd - > req [ idx ] ;
_emit_END ( 0 , req - > mc_cpu ) ;
thrd - > req_running = - 1 ;
}
static inline u32 _state ( struct pl330_thread * thrd )
{
2014-07-06 22:32:29 +04:00
void __iomem * regs = thrd - > dmac - > base ;
2011-12-26 13:49:52 +04:00
u32 val ;
if ( is_manager ( thrd ) )
val = readl ( regs + DS ) & 0xf ;
else
val = readl ( regs + CS ( thrd - > id ) ) & 0xf ;
switch ( val ) {
case DS_ST_STOP :
return PL330_STATE_STOPPED ;
case DS_ST_EXEC :
return PL330_STATE_EXECUTING ;
case DS_ST_CMISS :
return PL330_STATE_CACHEMISS ;
case DS_ST_UPDTPC :
return PL330_STATE_UPDTPC ;
case DS_ST_WFE :
return PL330_STATE_WFE ;
case DS_ST_FAULT :
return PL330_STATE_FAULTING ;
case DS_ST_ATBRR :
if ( is_manager ( thrd ) )
return PL330_STATE_INVALID ;
else
return PL330_STATE_ATBARRIER ;
case DS_ST_QBUSY :
if ( is_manager ( thrd ) )
return PL330_STATE_INVALID ;
else
return PL330_STATE_QUEUEBUSY ;
case DS_ST_WFP :
if ( is_manager ( thrd ) )
return PL330_STATE_INVALID ;
else
return PL330_STATE_WFP ;
case DS_ST_KILL :
if ( is_manager ( thrd ) )
return PL330_STATE_INVALID ;
else
return PL330_STATE_KILLING ;
case DS_ST_CMPLT :
if ( is_manager ( thrd ) )
return PL330_STATE_INVALID ;
else
return PL330_STATE_COMPLETING ;
case DS_ST_FLTCMP :
if ( is_manager ( thrd ) )
return PL330_STATE_INVALID ;
else
return PL330_STATE_FAULT_COMPLETING ;
default :
return PL330_STATE_INVALID ;
}
}
static void _stop ( struct pl330_thread * thrd )
{
2014-07-06 22:32:29 +04:00
void __iomem * regs = thrd - > dmac - > base ;
2011-12-26 13:49:52 +04:00
u8 insn [ 6 ] = { 0 , 0 , 0 , 0 , 0 , 0 } ;
if ( _state ( thrd ) = = PL330_STATE_FAULT_COMPLETING )
UNTIL ( thrd , PL330_STATE_FAULTING | PL330_STATE_KILLING ) ;
/* Return if nothing needs to be done */
if ( _state ( thrd ) = = PL330_STATE_COMPLETING
| | _state ( thrd ) = = PL330_STATE_KILLING
| | _state ( thrd ) = = PL330_STATE_STOPPED )
return ;
_emit_KILL ( 0 , insn ) ;
/* Stop generating interrupts for SEV */
writel ( readl ( regs + INTEN ) & ~ ( 1 < < thrd - > ev ) , regs + INTEN ) ;
_execute_DBGINSN ( thrd , insn , is_manager ( thrd ) ) ;
}
/* Start doing req 'idx' of thread 'thrd' */
static bool _trigger ( struct pl330_thread * thrd )
{
2014-07-06 22:32:29 +04:00
void __iomem * regs = thrd - > dmac - > base ;
2011-12-26 13:49:52 +04:00
struct _pl330_req * req ;
struct pl330_req * r ;
struct _arg_GO go ;
unsigned ns ;
u8 insn [ 6 ] = { 0 , 0 , 0 , 0 , 0 , 0 } ;
int idx ;
/* Return if already ACTIVE */
if ( _state ( thrd ) ! = PL330_STATE_STOPPED )
return true ;
idx = 1 - thrd - > lstenq ;
if ( ! IS_FREE ( & thrd - > req [ idx ] ) )
req = & thrd - > req [ idx ] ;
else {
idx = thrd - > lstenq ;
if ( ! IS_FREE ( & thrd - > req [ idx ] ) )
req = & thrd - > req [ idx ] ;
else
req = NULL ;
}
/* Return if no request */
if ( ! req | | ! req - > r )
return true ;
r = req - > r ;
if ( r - > cfg )
ns = r - > cfg - > nonsecure ? 1 : 0 ;
else if ( readl ( regs + CS ( thrd - > id ) ) & CS_CNS )
ns = 1 ;
else
ns = 0 ;
/* See 'Abort Sources' point-4 at Page 2-25 */
if ( _manager_ns ( thrd ) & & ! ns )
2014-07-06 22:32:29 +04:00
dev_info ( thrd - > dmac - > ddma . dev , " %s:%d Recipe for ABORT! \n " ,
2011-12-26 13:49:52 +04:00
__func__ , __LINE__ ) ;
go . chan = thrd - > id ;
go . addr = req - > mc_bus ;
go . ns = ns ;
_emit_GO ( 0 , insn , & go ) ;
/* Set to generate interrupts for SEV */
writel ( readl ( regs + INTEN ) | ( 1 < < thrd - > ev ) , regs + INTEN ) ;
/* Only manager can execute GO */
_execute_DBGINSN ( thrd , insn , true ) ;
thrd - > req_running = idx ;
return true ;
}
static bool _start ( struct pl330_thread * thrd )
{
switch ( _state ( thrd ) ) {
case PL330_STATE_FAULT_COMPLETING :
UNTIL ( thrd , PL330_STATE_FAULTING | PL330_STATE_KILLING ) ;
if ( _state ( thrd ) = = PL330_STATE_KILLING )
UNTIL ( thrd , PL330_STATE_STOPPED )
case PL330_STATE_FAULTING :
_stop ( thrd ) ;
case PL330_STATE_KILLING :
case PL330_STATE_COMPLETING :
UNTIL ( thrd , PL330_STATE_STOPPED )
case PL330_STATE_STOPPED :
return _trigger ( thrd ) ;
case PL330_STATE_WFP :
case PL330_STATE_QUEUEBUSY :
case PL330_STATE_ATBARRIER :
case PL330_STATE_UPDTPC :
case PL330_STATE_CACHEMISS :
case PL330_STATE_EXECUTING :
return true ;
case PL330_STATE_WFE : /* For RESUME, nothing yet */
default :
return false ;
}
}
static inline int _ldst_memtomem ( unsigned dry_run , u8 buf [ ] ,
const struct _xfer_spec * pxs , int cyc )
{
int off = 0 ;
2011-12-26 13:55:47 +04:00
struct pl330_config * pcfg = pxs - > r - > cfg - > pcfg ;
2011-12-26 13:49:52 +04:00
2011-12-26 13:55:47 +04:00
/* check lock-up free version */
if ( get_revision ( pcfg - > periph_id ) > = PERIPH_REV_R1P0 ) {
while ( cyc - - ) {
off + = _emit_LD ( dry_run , & buf [ off ] , ALWAYS ) ;
off + = _emit_ST ( dry_run , & buf [ off ] , ALWAYS ) ;
}
} else {
while ( cyc - - ) {
off + = _emit_LD ( dry_run , & buf [ off ] , ALWAYS ) ;
off + = _emit_RMB ( dry_run , & buf [ off ] ) ;
off + = _emit_ST ( dry_run , & buf [ off ] , ALWAYS ) ;
off + = _emit_WMB ( dry_run , & buf [ off ] ) ;
}
2011-12-26 13:49:52 +04:00
}
return off ;
}
static inline int _ldst_devtomem ( unsigned dry_run , u8 buf [ ] ,
const struct _xfer_spec * pxs , int cyc )
{
int off = 0 ;
while ( cyc - - ) {
off + = _emit_WFP ( dry_run , & buf [ off ] , SINGLE , pxs - > r - > peri ) ;
off + = _emit_LDP ( dry_run , & buf [ off ] , SINGLE , pxs - > r - > peri ) ;
off + = _emit_ST ( dry_run , & buf [ off ] , ALWAYS ) ;
off + = _emit_FLUSHP ( dry_run , & buf [ off ] , pxs - > r - > peri ) ;
}
return off ;
}
static inline int _ldst_memtodev ( unsigned dry_run , u8 buf [ ] ,
const struct _xfer_spec * pxs , int cyc )
{
int off = 0 ;
while ( cyc - - ) {
off + = _emit_WFP ( dry_run , & buf [ off ] , SINGLE , pxs - > r - > peri ) ;
off + = _emit_LD ( dry_run , & buf [ off ] , ALWAYS ) ;
off + = _emit_STP ( dry_run , & buf [ off ] , SINGLE , pxs - > r - > peri ) ;
off + = _emit_FLUSHP ( dry_run , & buf [ off ] , pxs - > r - > peri ) ;
}
return off ;
}
static int _bursts ( unsigned dry_run , u8 buf [ ] ,
const struct _xfer_spec * pxs , int cyc )
{
int off = 0 ;
switch ( pxs - > r - > rqtype ) {
2014-07-06 22:32:18 +04:00
case DMA_MEM_TO_DEV :
2011-12-26 13:49:52 +04:00
off + = _ldst_memtodev ( dry_run , & buf [ off ] , pxs , cyc ) ;
break ;
2014-07-06 22:32:18 +04:00
case DMA_DEV_TO_MEM :
2011-12-26 13:49:52 +04:00
off + = _ldst_devtomem ( dry_run , & buf [ off ] , pxs , cyc ) ;
break ;
2014-07-06 22:32:18 +04:00
case DMA_MEM_TO_MEM :
2011-12-26 13:49:52 +04:00
off + = _ldst_memtomem ( dry_run , & buf [ off ] , pxs , cyc ) ;
break ;
default :
off + = 0x40000000 ; /* Scare off the Client */
break ;
}
return off ;
}
/* Returns bytes consumed and updates bursts */
static inline int _loop ( unsigned dry_run , u8 buf [ ] ,
unsigned long * bursts , const struct _xfer_spec * pxs )
{
int cyc , cycmax , szlp , szlpend , szbrst , off ;
unsigned lcnt0 , lcnt1 , ljmp0 , ljmp1 ;
struct _arg_LPEND lpend ;
/* Max iterations possible in DMALP is 256 */
if ( * bursts > = 256 * 256 ) {
lcnt1 = 256 ;
lcnt0 = 256 ;
cyc = * bursts / lcnt1 / lcnt0 ;
} else if ( * bursts > 256 ) {
lcnt1 = 256 ;
lcnt0 = * bursts / lcnt1 ;
cyc = 1 ;
} else {
lcnt1 = * bursts ;
lcnt0 = 0 ;
cyc = 1 ;
}
szlp = _emit_LP ( 1 , buf , 0 , 0 ) ;
szbrst = _bursts ( 1 , buf , pxs , 1 ) ;
lpend . cond = ALWAYS ;
lpend . forever = false ;
lpend . loop = 0 ;
lpend . bjump = 0 ;
szlpend = _emit_LPEND ( 1 , buf , & lpend ) ;
if ( lcnt0 ) {
szlp * = 2 ;
szlpend * = 2 ;
}
/*
* Max bursts that we can unroll due to limit on the
* size of backward jump that can be encoded in DMALPEND
* which is 8 - bits and hence 255
*/
cycmax = ( 255 - ( szlp + szlpend ) ) / szbrst ;
cyc = ( cycmax < cyc ) ? cycmax : cyc ;
off = 0 ;
if ( lcnt0 ) {
off + = _emit_LP ( dry_run , & buf [ off ] , 0 , lcnt0 ) ;
ljmp0 = off ;
}
off + = _emit_LP ( dry_run , & buf [ off ] , 1 , lcnt1 ) ;
ljmp1 = off ;
off + = _bursts ( dry_run , & buf [ off ] , pxs , cyc ) ;
lpend . cond = ALWAYS ;
lpend . forever = false ;
lpend . loop = 1 ;
lpend . bjump = off - ljmp1 ;
off + = _emit_LPEND ( dry_run , & buf [ off ] , & lpend ) ;
if ( lcnt0 ) {
lpend . cond = ALWAYS ;
lpend . forever = false ;
lpend . loop = 0 ;
lpend . bjump = off - ljmp0 ;
off + = _emit_LPEND ( dry_run , & buf [ off ] , & lpend ) ;
}
* bursts = lcnt1 * cyc ;
if ( lcnt0 )
* bursts * = lcnt0 ;
return off ;
}
static inline int _setup_loops ( unsigned dry_run , u8 buf [ ] ,
const struct _xfer_spec * pxs )
{
struct pl330_xfer * x = pxs - > x ;
u32 ccr = pxs - > ccr ;
unsigned long c , bursts = BYTE_TO_BURST ( x - > bytes , ccr ) ;
int off = 0 ;
while ( bursts ) {
c = bursts ;
off + = _loop ( dry_run , & buf [ off ] , & c , pxs ) ;
bursts - = c ;
}
return off ;
}
static inline int _setup_xfer ( unsigned dry_run , u8 buf [ ] ,
const struct _xfer_spec * pxs )
{
struct pl330_xfer * x = pxs - > x ;
int off = 0 ;
/* DMAMOV SAR, x->src_addr */
off + = _emit_MOV ( dry_run , & buf [ off ] , SAR , x - > src_addr ) ;
/* DMAMOV DAR, x->dst_addr */
off + = _emit_MOV ( dry_run , & buf [ off ] , DAR , x - > dst_addr ) ;
/* Setup Loop(s) */
off + = _setup_loops ( dry_run , & buf [ off ] , pxs ) ;
return off ;
}
/*
* A req is a sequence of one or more xfer units .
* Returns the number of bytes taken to setup the MC for the req .
*/
static int _setup_req ( unsigned dry_run , struct pl330_thread * thrd ,
unsigned index , struct _xfer_spec * pxs )
{
struct _pl330_req * req = & thrd - > req [ index ] ;
struct pl330_xfer * x ;
u8 * buf = req - > mc_cpu ;
int off = 0 ;
PL330_DBGMC_START ( req - > mc_bus ) ;
/* DMAMOV CCR, ccr */
off + = _emit_MOV ( dry_run , & buf [ off ] , CCR , pxs - > ccr ) ;
x = pxs - > r - > x ;
2014-07-06 22:32:23 +04:00
/* Error if xfer length is not aligned at burst size */
if ( x - > bytes % ( BRST_SIZE ( pxs - > ccr ) * BRST_LEN ( pxs - > ccr ) ) )
return - EINVAL ;
2011-12-26 13:49:52 +04:00
2014-07-06 22:32:23 +04:00
pxs - > x = x ;
off + = _setup_xfer ( dry_run , & buf [ off ] , pxs ) ;
2011-12-26 13:49:52 +04:00
/* DMASEV peripheral/event */
off + = _emit_SEV ( dry_run , & buf [ off ] , thrd - > ev ) ;
/* DMAEND */
off + = _emit_END ( dry_run , & buf [ off ] ) ;
return off ;
}
static inline u32 _prepare_ccr ( const struct pl330_reqcfg * rqc )
{
u32 ccr = 0 ;
if ( rqc - > src_inc )
ccr | = CC_SRCINC ;
if ( rqc - > dst_inc )
ccr | = CC_DSTINC ;
/* We set same protection levels for Src and DST for now */
if ( rqc - > privileged )
ccr | = CC_SRCPRI | CC_DSTPRI ;
if ( rqc - > nonsecure )
ccr | = CC_SRCNS | CC_DSTNS ;
if ( rqc - > insnaccess )
ccr | = CC_SRCIA | CC_DSTIA ;
ccr | = ( ( ( rqc - > brst_len - 1 ) & 0xf ) < < CC_SRCBRSTLEN_SHFT ) ;
ccr | = ( ( ( rqc - > brst_len - 1 ) & 0xf ) < < CC_DSTBRSTLEN_SHFT ) ;
ccr | = ( rqc - > brst_size < < CC_SRCBRSTSIZE_SHFT ) ;
ccr | = ( rqc - > brst_size < < CC_DSTBRSTSIZE_SHFT ) ;
ccr | = ( rqc - > scctl < < CC_SRCCCTRL_SHFT ) ;
ccr | = ( rqc - > dcctl < < CC_DSTCCTRL_SHFT ) ;
ccr | = ( rqc - > swap < < CC_SWAP_SHFT ) ;
return ccr ;
}
/*
* Submit a list of xfers after which the client wants notification .
* Client is not notified after each xfer unit , just once after all
* xfer units are done or some error occurs .
*/
2014-07-06 22:32:26 +04:00
static int pl330_submit_req ( struct pl330_thread * thrd , struct pl330_req * r )
2011-12-26 13:49:52 +04:00
{
2014-07-06 22:32:29 +04:00
struct pl330_dmac * pl330 = thrd - > dmac ;
2011-12-26 13:49:52 +04:00
struct _xfer_spec xs ;
unsigned long flags ;
void __iomem * regs ;
unsigned idx ;
u32 ccr ;
int ret = 0 ;
/* No Req or Unacquired Channel or DMAC */
if ( ! r | | ! thrd | | thrd - > free )
return - EINVAL ;
2014-07-06 22:32:29 +04:00
regs = thrd - > dmac - > base ;
2011-12-26 13:49:52 +04:00
if ( pl330 - > state = = DYING
| | pl330 - > dmac_tbd . reset_chan & ( 1 < < thrd - > id ) ) {
2014-07-06 22:32:29 +04:00
dev_info ( thrd - > dmac - > ddma . dev , " %s:%d \n " ,
2011-12-26 13:49:52 +04:00
__func__ , __LINE__ ) ;
return - EAGAIN ;
}
/* If request for non-existing peripheral */
2014-07-06 22:32:29 +04:00
if ( r - > rqtype ! = DMA_MEM_TO_MEM & & r - > peri > = pl330 - > pcfg . num_peri ) {
dev_info ( thrd - > dmac - > ddma . dev ,
2011-12-26 13:49:52 +04:00
" %s:%d Invalid peripheral(%u)! \n " ,
__func__ , __LINE__ , r - > peri ) ;
return - EINVAL ;
}
spin_lock_irqsave ( & pl330 - > lock , flags ) ;
if ( _queue_full ( thrd ) ) {
ret = - EAGAIN ;
goto xfer_exit ;
}
/* Use last settings, if not provided */
2012-09-17 13:50:22 +04:00
if ( r - > cfg ) {
/* Prefer Secure Channel */
if ( ! _manager_ns ( thrd ) )
r - > cfg - > nonsecure = 0 ;
else
r - > cfg - > nonsecure = 1 ;
2011-12-26 13:49:52 +04:00
ccr = _prepare_ccr ( r - > cfg ) ;
2012-09-17 13:50:22 +04:00
} else {
2011-12-26 13:49:52 +04:00
ccr = readl ( regs + CC ( thrd - > id ) ) ;
2012-09-17 13:50:22 +04:00
}
2011-12-26 13:49:52 +04:00
idx = IS_FREE ( & thrd - > req [ 0 ] ) ? 0 : 1 ;
xs . ccr = ccr ;
xs . r = r ;
/* First dry run to check if req is acceptable */
ret = _setup_req ( 1 , thrd , idx , & xs ) ;
if ( ret < 0 )
goto xfer_exit ;
2014-07-06 22:32:29 +04:00
if ( ret > pl330 - > mcbufsz / 2 ) {
dev_info ( pl330 - > ddma . dev , " %s:%d Trying increasing mcbufsz \n " ,
2011-12-26 13:49:52 +04:00
__func__ , __LINE__ ) ;
ret = - ENOMEM ;
goto xfer_exit ;
}
/* Hook the request */
thrd - > lstenq = idx ;
thrd - > req [ idx ] . r = r ;
2014-07-06 22:32:24 +04:00
_setup_req ( 0 , thrd , idx , & xs ) ;
2011-12-26 13:49:52 +04:00
ret = 0 ;
xfer_exit :
spin_unlock_irqrestore ( & pl330 - > lock , flags ) ;
return ret ;
}
2014-07-06 22:32:25 +04:00
static void dma_pl330_rqcb ( struct pl330_req * req , enum pl330_op_err err )
{
struct dma_pl330_desc * desc = container_of ( req , struct dma_pl330_desc , req ) ;
struct dma_pl330_chan * pch = desc - > pchan ;
unsigned long flags ;
/* If desc aborted */
if ( ! pch )
return ;
spin_lock_irqsave ( & pch - > lock , flags ) ;
desc - > status = DONE ;
spin_unlock_irqrestore ( & pch - > lock , flags ) ;
tasklet_schedule ( & pch - > task ) ;
}
2011-12-26 13:49:52 +04:00
static void pl330_dotask ( unsigned long data )
{
struct pl330_dmac * pl330 = ( struct pl330_dmac * ) data ;
unsigned long flags ;
int i ;
spin_lock_irqsave ( & pl330 - > lock , flags ) ;
/* The DMAC itself gone nuts */
if ( pl330 - > dmac_tbd . reset_dmac ) {
pl330 - > state = DYING ;
/* Reset the manager too */
pl330 - > dmac_tbd . reset_mngr = true ;
/* Clear the reset flag */
pl330 - > dmac_tbd . reset_dmac = false ;
}
if ( pl330 - > dmac_tbd . reset_mngr ) {
_stop ( pl330 - > manager ) ;
/* Reset all channels */
2014-07-06 22:32:29 +04:00
pl330 - > dmac_tbd . reset_chan = ( 1 < < pl330 - > pcfg . num_chan ) - 1 ;
2011-12-26 13:49:52 +04:00
/* Clear the reset flag */
pl330 - > dmac_tbd . reset_mngr = false ;
}
2014-07-06 22:32:29 +04:00
for ( i = 0 ; i < pl330 - > pcfg . num_chan ; i + + ) {
2011-12-26 13:49:52 +04:00
if ( pl330 - > dmac_tbd . reset_chan & ( 1 < < i ) ) {
struct pl330_thread * thrd = & pl330 - > channels [ i ] ;
2014-07-06 22:32:29 +04:00
void __iomem * regs = pl330 - > base ;
2011-12-26 13:49:52 +04:00
enum pl330_op_err err ;
_stop ( thrd ) ;
if ( readl ( regs + FSC ) & ( 1 < < thrd - > id ) )
err = PL330_ERR_FAIL ;
else
err = PL330_ERR_ABORT ;
spin_unlock_irqrestore ( & pl330 - > lock , flags ) ;
2014-07-06 22:32:25 +04:00
dma_pl330_rqcb ( thrd - > req [ 1 - thrd - > lstenq ] . r , err ) ;
dma_pl330_rqcb ( thrd - > req [ thrd - > lstenq ] . r , err ) ;
2011-12-26 13:49:52 +04:00
spin_lock_irqsave ( & pl330 - > lock , flags ) ;
thrd - > req [ 0 ] . r = NULL ;
thrd - > req [ 1 ] . r = NULL ;
mark_free ( thrd , 0 ) ;
mark_free ( thrd , 1 ) ;
/* Clear the reset flag */
pl330 - > dmac_tbd . reset_chan & = ~ ( 1 < < i ) ;
}
}
spin_unlock_irqrestore ( & pl330 - > lock , flags ) ;
return ;
}
/* Returns 1 if state was updated, 0 otherwise */
2014-07-06 22:32:29 +04:00
static int pl330_update ( struct pl330_dmac * pl330 )
2011-12-26 13:49:52 +04:00
{
2012-06-13 18:07:00 +04:00
struct pl330_req * rqdone , * tmp ;
2011-12-26 13:49:52 +04:00
unsigned long flags ;
void __iomem * regs ;
u32 val ;
int id , ev , ret = 0 ;
2014-07-06 22:32:29 +04:00
regs = pl330 - > base ;
2011-12-26 13:49:52 +04:00
spin_lock_irqsave ( & pl330 - > lock , flags ) ;
val = readl ( regs + FSM ) & 0x1 ;
if ( val )
pl330 - > dmac_tbd . reset_mngr = true ;
else
pl330 - > dmac_tbd . reset_mngr = false ;
2014-07-06 22:32:29 +04:00
val = readl ( regs + FSC ) & ( ( 1 < < pl330 - > pcfg . num_chan ) - 1 ) ;
2011-12-26 13:49:52 +04:00
pl330 - > dmac_tbd . reset_chan | = val ;
if ( val ) {
int i = 0 ;
2014-07-06 22:32:29 +04:00
while ( i < pl330 - > pcfg . num_chan ) {
2011-12-26 13:49:52 +04:00
if ( val & ( 1 < < i ) ) {
2014-07-06 22:32:29 +04:00
dev_info ( pl330 - > ddma . dev ,
2011-12-26 13:49:52 +04:00
" Reset Channel-%d \t CS-%x FTC-%x \n " ,
i , readl ( regs + CS ( i ) ) ,
readl ( regs + FTC ( i ) ) ) ;
_stop ( & pl330 - > channels [ i ] ) ;
}
i + + ;
}
}
/* Check which event happened i.e, thread notified */
val = readl ( regs + ES ) ;
2014-07-06 22:32:29 +04:00
if ( pl330 - > pcfg . num_events < 32
& & val & ~ ( ( 1 < < pl330 - > pcfg . num_events ) - 1 ) ) {
2011-12-26 13:49:52 +04:00
pl330 - > dmac_tbd . reset_dmac = true ;
2014-07-06 22:32:29 +04:00
dev_err ( pl330 - > ddma . dev , " %s:%d Unexpected! \n " , __func__ ,
__LINE__ ) ;
2011-12-26 13:49:52 +04:00
ret = 1 ;
goto updt_exit ;
}
2014-07-06 22:32:29 +04:00
for ( ev = 0 ; ev < pl330 - > pcfg . num_events ; ev + + ) {
2011-12-26 13:49:52 +04:00
if ( val & ( 1 < < ev ) ) { /* Event occurred */
struct pl330_thread * thrd ;
u32 inten = readl ( regs + INTEN ) ;
int active ;
/* Clear the event */
if ( inten & ( 1 < < ev ) )
writel ( 1 < < ev , regs + INTCLR ) ;
ret = 1 ;
id = pl330 - > events [ ev ] ;
thrd = & pl330 - > channels [ id ] ;
active = thrd - > req_running ;
if ( active = = - 1 ) /* Aborted */
continue ;
2012-06-13 18:07:00 +04:00
/* Detach the req */
rqdone = thrd - > req [ active ] . r ;
thrd - > req [ active ] . r = NULL ;
2011-12-26 13:49:52 +04:00
mark_free ( thrd , active ) ;
/* Get going again ASAP */
_start ( thrd ) ;
/* For now, just make a list of callbacks to be done */
list_add_tail ( & rqdone - > rqd , & pl330 - > req_done ) ;
}
}
/* Now that we are in no hurry, do the callbacks */
2012-06-13 18:07:00 +04:00
list_for_each_entry_safe ( rqdone , tmp , & pl330 - > req_done , rqd ) {
list_del ( & rqdone - > rqd ) ;
2011-12-26 13:49:52 +04:00
spin_unlock_irqrestore ( & pl330 - > lock , flags ) ;
2014-07-06 22:32:25 +04:00
dma_pl330_rqcb ( rqdone , PL330_ERR_NONE ) ;
2011-12-26 13:49:52 +04:00
spin_lock_irqsave ( & pl330 - > lock , flags ) ;
}
updt_exit :
spin_unlock_irqrestore ( & pl330 - > lock , flags ) ;
if ( pl330 - > dmac_tbd . reset_dmac
| | pl330 - > dmac_tbd . reset_mngr
| | pl330 - > dmac_tbd . reset_chan ) {
ret = 1 ;
tasklet_schedule ( & pl330 - > tasks ) ;
}
return ret ;
}
2014-07-06 22:32:26 +04:00
static int pl330_chan_ctrl ( struct pl330_thread * thrd , enum pl330_chan_op op )
2011-12-26 13:49:52 +04:00
{
struct pl330_dmac * pl330 ;
unsigned long flags ;
2012-03-30 02:34:57 +04:00
int ret = 0 , active ;
2011-12-26 13:49:52 +04:00
if ( ! thrd | | thrd - > free | | thrd - > dmac - > state = = DYING )
return - EINVAL ;
pl330 = thrd - > dmac ;
2012-03-30 02:34:57 +04:00
active = thrd - > req_running ;
2011-12-26 13:49:52 +04:00
spin_lock_irqsave ( & pl330 - > lock , flags ) ;
switch ( op ) {
case PL330_OP_FLUSH :
/* Make sure the channel is stopped */
_stop ( thrd ) ;
thrd - > req [ 0 ] . r = NULL ;
thrd - > req [ 1 ] . r = NULL ;
mark_free ( thrd , 0 ) ;
mark_free ( thrd , 1 ) ;
break ;
case PL330_OP_ABORT :
/* Make sure the channel is stopped */
_stop ( thrd ) ;
/* ABORT is only for the active req */
if ( active = = - 1 )
break ;
thrd - > req [ active ] . r = NULL ;
mark_free ( thrd , active ) ;
/* Start the next */
case PL330_OP_START :
if ( ( active = = - 1 ) & & ! _start ( thrd ) )
ret = - EIO ;
break ;
default :
ret = - EINVAL ;
}
spin_unlock_irqrestore ( & pl330 - > lock , flags ) ;
return ret ;
}
/* Reserve an event */
static inline int _alloc_event ( struct pl330_thread * thrd )
{
struct pl330_dmac * pl330 = thrd - > dmac ;
int ev ;
2014-07-06 22:32:29 +04:00
for ( ev = 0 ; ev < pl330 - > pcfg . num_events ; ev + + )
2011-12-26 13:49:52 +04:00
if ( pl330 - > events [ ev ] = = - 1 ) {
pl330 - > events [ ev ] = thrd - > id ;
return ev ;
}
return - 1 ;
}
2014-07-06 22:32:29 +04:00
static bool _chan_ns ( const struct pl330_dmac * pl330 , int i )
2011-12-26 13:49:52 +04:00
{
2014-07-06 22:32:29 +04:00
return pl330 - > pcfg . irq_ns & ( 1 < < i ) ;
2011-12-26 13:49:52 +04:00
}
/* Upon success, returns IdentityToken for the
* allocated channel , NULL otherwise .
*/
2014-07-06 22:32:29 +04:00
static struct pl330_thread * pl330_request_channel ( struct pl330_dmac * pl330 )
2011-12-26 13:49:52 +04:00
{
struct pl330_thread * thrd = NULL ;
unsigned long flags ;
int chans , i ;
if ( pl330 - > state = = DYING )
return NULL ;
2014-07-06 22:32:29 +04:00
chans = pl330 - > pcfg . num_chan ;
2011-12-26 13:49:52 +04:00
spin_lock_irqsave ( & pl330 - > lock , flags ) ;
for ( i = 0 ; i < chans ; i + + ) {
thrd = & pl330 - > channels [ i ] ;
if ( ( thrd - > free ) & & ( ! _manager_ns ( thrd ) | |
2014-07-06 22:32:29 +04:00
_chan_ns ( pl330 , i ) ) ) {
2011-12-26 13:49:52 +04:00
thrd - > ev = _alloc_event ( thrd ) ;
if ( thrd - > ev > = 0 ) {
thrd - > free = false ;
thrd - > lstenq = 1 ;
thrd - > req [ 0 ] . r = NULL ;
mark_free ( thrd , 0 ) ;
thrd - > req [ 1 ] . r = NULL ;
mark_free ( thrd , 1 ) ;
break ;
}
}
thrd = NULL ;
}
spin_unlock_irqrestore ( & pl330 - > lock , flags ) ;
return thrd ;
}
/* Release an event */
static inline void _free_event ( struct pl330_thread * thrd , int ev )
{
struct pl330_dmac * pl330 = thrd - > dmac ;
/* If the event is valid and was held by the thread */
2014-07-06 22:32:29 +04:00
if ( ev > = 0 & & ev < pl330 - > pcfg . num_events
2011-12-26 13:49:52 +04:00
& & pl330 - > events [ ev ] = = thrd - > id )
pl330 - > events [ ev ] = - 1 ;
}
2014-07-06 22:32:26 +04:00
static void pl330_release_channel ( struct pl330_thread * thrd )
2011-12-26 13:49:52 +04:00
{
struct pl330_dmac * pl330 ;
unsigned long flags ;
if ( ! thrd | | thrd - > free )
return ;
_stop ( thrd ) ;
2014-07-06 22:32:25 +04:00
dma_pl330_rqcb ( thrd - > req [ 1 - thrd - > lstenq ] . r , PL330_ERR_ABORT ) ;
dma_pl330_rqcb ( thrd - > req [ thrd - > lstenq ] . r , PL330_ERR_ABORT ) ;
2011-12-26 13:49:52 +04:00
pl330 = thrd - > dmac ;
spin_lock_irqsave ( & pl330 - > lock , flags ) ;
_free_event ( thrd , thrd - > ev ) ;
thrd - > free = true ;
spin_unlock_irqrestore ( & pl330 - > lock , flags ) ;
}
/* Initialize the structure for PL330 configuration, that can be used
* by the client driver the make best use of the DMAC
*/
2014-07-06 22:32:29 +04:00
static void read_dmac_config ( struct pl330_dmac * pl330 )
2011-12-26 13:49:52 +04:00
{
2014-07-06 22:32:29 +04:00
void __iomem * regs = pl330 - > base ;
2011-12-26 13:49:52 +04:00
u32 val ;
val = readl ( regs + CRD ) > > CRD_DATA_WIDTH_SHIFT ;
val & = CRD_DATA_WIDTH_MASK ;
2014-07-06 22:32:29 +04:00
pl330 - > pcfg . data_bus_width = 8 * ( 1 < < val ) ;
2011-12-26 13:49:52 +04:00
val = readl ( regs + CRD ) > > CRD_DATA_BUFF_SHIFT ;
val & = CRD_DATA_BUFF_MASK ;
2014-07-06 22:32:29 +04:00
pl330 - > pcfg . data_buf_dep = val + 1 ;
2011-12-26 13:49:52 +04:00
val = readl ( regs + CR0 ) > > CR0_NUM_CHANS_SHIFT ;
val & = CR0_NUM_CHANS_MASK ;
val + = 1 ;
2014-07-06 22:32:29 +04:00
pl330 - > pcfg . num_chan = val ;
2011-12-26 13:49:52 +04:00
val = readl ( regs + CR0 ) ;
if ( val & CR0_PERIPH_REQ_SET ) {
val = ( val > > CR0_NUM_PERIPH_SHIFT ) & CR0_NUM_PERIPH_MASK ;
val + = 1 ;
2014-07-06 22:32:29 +04:00
pl330 - > pcfg . num_peri = val ;
pl330 - > pcfg . peri_ns = readl ( regs + CR4 ) ;
2011-12-26 13:49:52 +04:00
} else {
2014-07-06 22:32:29 +04:00
pl330 - > pcfg . num_peri = 0 ;
2011-12-26 13:49:52 +04:00
}
val = readl ( regs + CR0 ) ;
if ( val & CR0_BOOT_MAN_NS )
2014-07-06 22:32:29 +04:00
pl330 - > pcfg . mode | = DMAC_MODE_NS ;
2011-12-26 13:49:52 +04:00
else
2014-07-06 22:32:29 +04:00
pl330 - > pcfg . mode & = ~ DMAC_MODE_NS ;
2011-12-26 13:49:52 +04:00
val = readl ( regs + CR0 ) > > CR0_NUM_EVENTS_SHIFT ;
val & = CR0_NUM_EVENTS_MASK ;
val + = 1 ;
2014-07-06 22:32:29 +04:00
pl330 - > pcfg . num_events = val ;
2011-12-26 13:49:52 +04:00
2014-07-06 22:32:29 +04:00
pl330 - > pcfg . irq_ns = readl ( regs + CR3 ) ;
2011-12-26 13:49:52 +04:00
}
static inline void _reset_thread ( struct pl330_thread * thrd )
{
struct pl330_dmac * pl330 = thrd - > dmac ;
thrd - > req [ 0 ] . mc_cpu = pl330 - > mcode_cpu
2014-07-06 22:32:29 +04:00
+ ( thrd - > id * pl330 - > mcbufsz ) ;
2011-12-26 13:49:52 +04:00
thrd - > req [ 0 ] . mc_bus = pl330 - > mcode_bus
2014-07-06 22:32:29 +04:00
+ ( thrd - > id * pl330 - > mcbufsz ) ;
2011-12-26 13:49:52 +04:00
thrd - > req [ 0 ] . r = NULL ;
mark_free ( thrd , 0 ) ;
thrd - > req [ 1 ] . mc_cpu = thrd - > req [ 0 ] . mc_cpu
2014-07-06 22:32:29 +04:00
+ pl330 - > mcbufsz / 2 ;
2011-12-26 13:49:52 +04:00
thrd - > req [ 1 ] . mc_bus = thrd - > req [ 0 ] . mc_bus
2014-07-06 22:32:29 +04:00
+ pl330 - > mcbufsz / 2 ;
2011-12-26 13:49:52 +04:00
thrd - > req [ 1 ] . r = NULL ;
mark_free ( thrd , 1 ) ;
}
static int dmac_alloc_threads ( struct pl330_dmac * pl330 )
{
2014-07-06 22:32:29 +04:00
int chans = pl330 - > pcfg . num_chan ;
2011-12-26 13:49:52 +04:00
struct pl330_thread * thrd ;
int i ;
/* Allocate 1 Manager and 'chans' Channel threads */
pl330 - > channels = kzalloc ( ( 1 + chans ) * sizeof ( * thrd ) ,
GFP_KERNEL ) ;
if ( ! pl330 - > channels )
return - ENOMEM ;
/* Init Channel threads */
for ( i = 0 ; i < chans ; i + + ) {
thrd = & pl330 - > channels [ i ] ;
thrd - > id = i ;
thrd - > dmac = pl330 ;
_reset_thread ( thrd ) ;
thrd - > free = true ;
}
/* MANAGER is indexed at the end */
thrd = & pl330 - > channels [ chans ] ;
thrd - > id = chans ;
thrd - > dmac = pl330 ;
thrd - > free = false ;
pl330 - > manager = thrd ;
return 0 ;
}
static int dmac_alloc_resources ( struct pl330_dmac * pl330 )
{
2014-07-06 22:32:29 +04:00
int chans = pl330 - > pcfg . num_chan ;
2011-12-26 13:49:52 +04:00
int ret ;
2010-05-24 07:28:19 +04:00
/*
2011-12-26 13:49:52 +04:00
* Alloc MicroCode buffer for ' chans ' Channel threads .
* A channel ' s buffer offset is ( Channel_Id * MCODE_BUFF_PERCHAN )
2010-05-24 07:28:19 +04:00
*/
2014-07-06 22:32:29 +04:00
pl330 - > mcode_cpu = dma_alloc_coherent ( pl330 - > ddma . dev ,
chans * pl330 - > mcbufsz ,
2011-12-26 13:49:52 +04:00
& pl330 - > mcode_bus , GFP_KERNEL ) ;
if ( ! pl330 - > mcode_cpu ) {
2014-07-06 22:32:29 +04:00
dev_err ( pl330 - > ddma . dev , " %s:%d Can't allocate memory! \n " ,
2011-12-26 13:49:52 +04:00
__func__ , __LINE__ ) ;
return - ENOMEM ;
}
ret = dmac_alloc_threads ( pl330 ) ;
if ( ret ) {
2014-07-06 22:32:29 +04:00
dev_err ( pl330 - > ddma . dev , " %s:%d Can't to create channels for DMAC! \n " ,
2011-12-26 13:49:52 +04:00
__func__ , __LINE__ ) ;
2014-07-06 22:32:29 +04:00
dma_free_coherent ( pl330 - > ddma . dev ,
chans * pl330 - > mcbufsz ,
2011-12-26 13:49:52 +04:00
pl330 - > mcode_cpu , pl330 - > mcode_bus ) ;
return ret ;
}
return 0 ;
}
2014-07-06 22:32:29 +04:00
static int pl330_add ( struct pl330_dmac * pl330 )
2011-12-26 13:49:52 +04:00
{
void __iomem * regs ;
int i , ret ;
2014-07-06 22:32:29 +04:00
regs = pl330 - > base ;
2010-05-24 07:28:19 +04:00
2011-12-26 13:49:52 +04:00
/* Check if we can handle this DMAC */
2014-07-06 22:32:29 +04:00
if ( ( pl330 - > pcfg . periph_id & 0xfffff ) ! = PERIPH_ID_VAL ) {
dev_err ( pl330 - > ddma . dev , " PERIPH_ID 0x%x ! \n " ,
pl330 - > pcfg . periph_id ) ;
2011-12-26 13:49:52 +04:00
return - EINVAL ;
}
2010-05-24 07:28:19 +04:00
2011-12-26 13:49:52 +04:00
/* Read the configuration of the DMAC */
2014-07-06 22:32:29 +04:00
read_dmac_config ( pl330 ) ;
2010-05-24 07:28:19 +04:00
2014-07-06 22:32:29 +04:00
if ( pl330 - > pcfg . num_events = = 0 ) {
dev_err ( pl330 - > ddma . dev , " %s:%d Can't work without events! \n " ,
2011-12-26 13:49:52 +04:00
__func__ , __LINE__ ) ;
return - EINVAL ;
}
2010-05-24 07:28:19 +04:00
2011-12-26 13:49:52 +04:00
spin_lock_init ( & pl330 - > lock ) ;
2011-09-02 04:44:30 +04:00
2011-12-26 13:49:52 +04:00
INIT_LIST_HEAD ( & pl330 - > req_done ) ;
2011-09-02 04:44:33 +04:00
2011-12-26 13:49:52 +04:00
/* Use default MC buffer size if not provided */
2014-07-06 22:32:29 +04:00
if ( ! pl330 - > mcbufsz )
pl330 - > mcbufsz = MCODE_BUFF_PER_REQ * 2 ;
2010-05-24 07:28:19 +04:00
2011-12-26 13:49:52 +04:00
/* Mark all events as free */
2014-07-06 22:32:29 +04:00
for ( i = 0 ; i < pl330 - > pcfg . num_events ; i + + )
2011-12-26 13:49:52 +04:00
pl330 - > events [ i ] = - 1 ;
2010-05-24 07:28:19 +04:00
2011-12-26 13:49:52 +04:00
/* Allocate resources needed by the DMAC */
ret = dmac_alloc_resources ( pl330 ) ;
if ( ret ) {
2014-07-06 22:32:29 +04:00
dev_err ( pl330 - > ddma . dev , " Unable to create channels for DMAC \n " ) ;
2011-12-26 13:49:52 +04:00
return ret ;
}
2010-05-24 07:28:19 +04:00
2011-12-26 13:49:52 +04:00
tasklet_init ( & pl330 - > tasks , pl330_dotask , ( unsigned long ) pl330 ) ;
2010-05-24 07:28:19 +04:00
2011-12-26 13:49:52 +04:00
pl330 - > state = INIT ;
2011-09-02 04:44:29 +04:00
2011-12-26 13:49:52 +04:00
return 0 ;
}
2010-05-24 07:28:19 +04:00
2011-12-26 13:49:52 +04:00
static int dmac_free_threads ( struct pl330_dmac * pl330 )
{
struct pl330_thread * thrd ;
int i ;
2010-05-24 07:28:19 +04:00
2011-12-26 13:49:52 +04:00
/* Release Channel threads */
2014-07-06 22:32:29 +04:00
for ( i = 0 ; i < pl330 - > pcfg . num_chan ; i + + ) {
2011-12-26 13:49:52 +04:00
thrd = & pl330 - > channels [ i ] ;
2014-07-06 22:32:26 +04:00
pl330_release_channel ( thrd ) ;
2011-12-26 13:49:52 +04:00
}
2010-05-24 07:28:19 +04:00
2011-12-26 13:49:52 +04:00
/* Free memory */
kfree ( pl330 - > channels ) ;
2010-05-24 07:28:19 +04:00
2011-12-26 13:49:52 +04:00
return 0 ;
}
2010-05-24 07:28:19 +04:00
2014-07-06 22:32:29 +04:00
static void pl330_del ( struct pl330_dmac * pl330 )
2011-12-26 13:49:52 +04:00
{
pl330 - > state = UNINIT ;
tasklet_kill ( & pl330 - > tasks ) ;
/* Free DMAC resources */
2014-07-06 22:32:29 +04:00
dmac_free_threads ( pl330 ) ;
2011-12-26 13:49:52 +04:00
2014-07-06 22:32:29 +04:00
dma_free_coherent ( pl330 - > ddma . dev ,
pl330 - > pcfg . num_chan * pl330 - > mcbufsz , pl330 - > mcode_cpu ,
pl330 - > mcode_bus ) ;
2011-12-26 13:49:52 +04:00
}
2010-05-24 07:28:19 +04:00
2011-10-24 13:43:02 +04:00
/* forward declaration */
static struct amba_driver pl330_driver ;
2010-05-24 07:28:19 +04:00
static inline struct dma_pl330_chan *
to_pchan ( struct dma_chan * ch )
{
if ( ! ch )
return NULL ;
return container_of ( ch , struct dma_pl330_chan , chan ) ;
}
static inline struct dma_pl330_desc *
to_desc ( struct dma_async_tx_descriptor * tx )
{
return container_of ( tx , struct dma_pl330_desc , txd ) ;
}
static inline void fill_queue ( struct dma_pl330_chan * pch )
{
struct dma_pl330_desc * desc ;
int ret ;
list_for_each_entry ( desc , & pch - > work_list , node ) {
/* If already submitted */
if ( desc - > status = = BUSY )
2013-02-13 14:43:14 +04:00
continue ;
2010-05-24 07:28:19 +04:00
2014-07-06 22:32:26 +04:00
ret = pl330_submit_req ( pch - > thread , & desc - > req ) ;
2010-05-24 07:28:19 +04:00
if ( ! ret ) {
desc - > status = BUSY ;
} else if ( ret = = - EAGAIN ) {
/* QFull or DMAC Dying */
break ;
} else {
/* Unacceptable request */
desc - > status = DONE ;
2014-07-06 22:32:29 +04:00
dev_err ( pch - > dmac - > ddma . dev , " %s:%d Bad Desc(%d) \n " ,
2010-05-24 07:28:19 +04:00
__func__ , __LINE__ , desc - > txd . cookie ) ;
tasklet_schedule ( & pch - > task ) ;
}
}
}
static void pl330_tasklet ( unsigned long data )
{
struct dma_pl330_chan * pch = ( struct dma_pl330_chan * ) data ;
struct dma_pl330_desc * desc , * _dt ;
unsigned long flags ;
spin_lock_irqsave ( & pch - > lock , flags ) ;
/* Pick up ripe tomatoes */
list_for_each_entry_safe ( desc , _dt , & pch - > work_list , node )
if ( desc - > status = = DONE ) {
2012-05-23 15:17:31 +04:00
if ( ! pch - > cyclic )
2012-05-11 09:54:41 +04:00
dma_cookie_complete ( & desc - > txd ) ;
2013-08-27 22:34:05 +04:00
list_move_tail ( & desc - > node , & pch - > completed_list ) ;
2010-05-24 07:28:19 +04:00
}
/* Try to submit a req imm. next to the last completed cookie */
fill_queue ( pch ) ;
/* Make sure the PL330 Channel thread is active */
2014-07-06 22:32:26 +04:00
pl330_chan_ctrl ( pch - > thread , PL330_OP_START ) ;
2010-05-24 07:28:19 +04:00
2013-08-27 22:34:05 +04:00
while ( ! list_empty ( & pch - > completed_list ) ) {
dma_async_tx_callback callback ;
void * callback_param ;
2010-05-24 07:28:19 +04:00
2013-08-27 22:34:05 +04:00
desc = list_first_entry ( & pch - > completed_list ,
struct dma_pl330_desc , node ) ;
callback = desc - > txd . callback ;
callback_param = desc - > txd . callback_param ;
if ( pch - > cyclic ) {
desc - > status = PREP ;
list_move_tail ( & desc - > node , & pch - > work_list ) ;
} else {
desc - > status = FREE ;
list_move_tail ( & desc - > node , & pch - > dmac - > desc_pool ) ;
}
2013-10-18 21:35:23 +04:00
dma_descriptor_unmap ( & desc - > txd ) ;
2013-08-27 22:34:05 +04:00
if ( callback ) {
spin_unlock_irqrestore ( & pch - > lock , flags ) ;
callback ( callback_param ) ;
spin_lock_irqsave ( & pch - > lock , flags ) ;
}
}
spin_unlock_irqrestore ( & pch - > lock , flags ) ;
2010-05-24 07:28:19 +04:00
}
2011-10-24 13:43:02 +04:00
bool pl330_filter ( struct dma_chan * chan , void * param )
{
2011-10-24 13:43:11 +04:00
u8 * peri_id ;
2011-10-24 13:43:02 +04:00
if ( chan - > device - > dev - > driver ! = & pl330_driver . drv )
return false ;
2011-10-24 13:43:11 +04:00
peri_id = chan - > private ;
2013-11-08 13:51:16 +04:00
return * peri_id = = ( unsigned long ) param ;
2011-10-24 13:43:02 +04:00
}
EXPORT_SYMBOL ( pl330_filter ) ;
2013-02-14 07:40:06 +04:00
static struct dma_chan * of_dma_pl330_xlate ( struct of_phandle_args * dma_spec ,
struct of_dma * ofdma )
{
int count = dma_spec - > args_count ;
2014-07-06 22:32:29 +04:00
struct pl330_dmac * pl330 = ofdma - > of_dma_data ;
2014-01-11 23:08:39 +04:00
unsigned int chan_id ;
2013-02-14 07:40:06 +04:00
2014-07-06 22:32:29 +04:00
if ( ! pl330 )
return NULL ;
2013-02-14 07:40:06 +04:00
if ( count ! = 1 )
return NULL ;
2014-01-11 23:08:39 +04:00
chan_id = dma_spec - > args [ 0 ] ;
2014-07-06 22:32:29 +04:00
if ( chan_id > = pl330 - > num_peripherals )
2014-01-11 23:08:39 +04:00
return NULL ;
2013-02-14 07:40:06 +04:00
2014-07-06 22:32:29 +04:00
return dma_get_slave_channel ( & pl330 - > peripherals [ chan_id ] . chan ) ;
2013-02-14 07:40:06 +04:00
}
2010-05-24 07:28:19 +04:00
static int pl330_alloc_chan_resources ( struct dma_chan * chan )
{
struct dma_pl330_chan * pch = to_pchan ( chan ) ;
2014-07-06 22:32:29 +04:00
struct pl330_dmac * pl330 = pch - > dmac ;
2010-05-24 07:28:19 +04:00
unsigned long flags ;
spin_lock_irqsave ( & pch - > lock , flags ) ;
2012-03-07 02:35:47 +04:00
dma_cookie_init ( chan ) ;
2011-09-02 04:44:33 +04:00
pch - > cyclic = false ;
2010-05-24 07:28:19 +04:00
2014-07-06 22:32:29 +04:00
pch - > thread = pl330_request_channel ( pl330 ) ;
2014-07-06 22:32:26 +04:00
if ( ! pch - > thread ) {
2010-05-24 07:28:19 +04:00
spin_unlock_irqrestore ( & pch - > lock , flags ) ;
2012-09-17 08:27:45 +04:00
return - ENOMEM ;
2010-05-24 07:28:19 +04:00
}
tasklet_init ( & pch - > task , pl330_tasklet , ( unsigned long ) pch ) ;
spin_unlock_irqrestore ( & pch - > lock , flags ) ;
return 1 ;
}
static int pl330_control ( struct dma_chan * chan , enum dma_ctrl_cmd cmd , unsigned long arg )
{
struct dma_pl330_chan * pch = to_pchan ( chan ) ;
2013-08-27 22:34:05 +04:00
struct dma_pl330_desc * desc ;
2010-05-24 07:28:19 +04:00
unsigned long flags ;
2014-07-06 22:32:29 +04:00
struct pl330_dmac * pl330 = pch - > dmac ;
2011-09-02 04:44:31 +04:00
struct dma_slave_config * slave_config ;
2011-09-02 04:44:32 +04:00
LIST_HEAD ( list ) ;
2010-05-24 07:28:19 +04:00
2011-09-02 04:44:31 +04:00
switch ( cmd ) {
case DMA_TERMINATE_ALL :
spin_lock_irqsave ( & pch - > lock , flags ) ;
2010-05-24 07:28:19 +04:00
2011-09-02 04:44:31 +04:00
/* FLUSH the PL330 Channel thread */
2014-07-06 22:32:26 +04:00
pl330_chan_ctrl ( pch - > thread , PL330_OP_FLUSH ) ;
2010-05-24 07:28:19 +04:00
2011-09-02 04:44:31 +04:00
/* Mark all desc done */
2014-01-11 23:08:38 +04:00
list_for_each_entry ( desc , & pch - > submitted_list , node ) {
desc - > status = FREE ;
dma_cookie_complete ( & desc - > txd ) ;
}
2013-08-27 22:34:05 +04:00
list_for_each_entry ( desc , & pch - > work_list , node ) {
desc - > status = FREE ;
dma_cookie_complete ( & desc - > txd ) ;
}
list_for_each_entry ( desc , & pch - > completed_list , node ) {
desc - > status = FREE ;
dma_cookie_complete ( & desc - > txd ) ;
2011-09-02 04:44:32 +04:00
}
2010-05-24 07:28:19 +04:00
2014-07-06 22:32:29 +04:00
list_splice_tail_init ( & pch - > submitted_list , & pl330 - > desc_pool ) ;
list_splice_tail_init ( & pch - > work_list , & pl330 - > desc_pool ) ;
list_splice_tail_init ( & pch - > completed_list , & pl330 - > desc_pool ) ;
2011-09-02 04:44:31 +04:00
spin_unlock_irqrestore ( & pch - > lock , flags ) ;
break ;
case DMA_SLAVE_CONFIG :
slave_config = ( struct dma_slave_config * ) arg ;
2011-10-13 21:04:23 +04:00
if ( slave_config - > direction = = DMA_MEM_TO_DEV ) {
2011-09-02 04:44:31 +04:00
if ( slave_config - > dst_addr )
pch - > fifo_addr = slave_config - > dst_addr ;
if ( slave_config - > dst_addr_width )
pch - > burst_sz = __ffs ( slave_config - > dst_addr_width ) ;
if ( slave_config - > dst_maxburst )
pch - > burst_len = slave_config - > dst_maxburst ;
2011-10-13 21:04:23 +04:00
} else if ( slave_config - > direction = = DMA_DEV_TO_MEM ) {
2011-09-02 04:44:31 +04:00
if ( slave_config - > src_addr )
pch - > fifo_addr = slave_config - > src_addr ;
if ( slave_config - > src_addr_width )
pch - > burst_sz = __ffs ( slave_config - > src_addr_width ) ;
if ( slave_config - > src_maxburst )
pch - > burst_len = slave_config - > src_maxburst ;
}
break ;
default :
2014-07-06 22:32:29 +04:00
dev_err ( pch - > dmac - > ddma . dev , " Not supported command. \n " ) ;
2011-09-02 04:44:31 +04:00
return - ENXIO ;
}
2010-05-24 07:28:19 +04:00
return 0 ;
}
static void pl330_free_chan_resources ( struct dma_chan * chan )
{
struct dma_pl330_chan * pch = to_pchan ( chan ) ;
unsigned long flags ;
tasklet_kill ( & pch - > task ) ;
2013-07-04 02:00:43 +04:00
spin_lock_irqsave ( & pch - > lock , flags ) ;
2014-07-06 22:32:26 +04:00
pl330_release_channel ( pch - > thread ) ;
pch - > thread = NULL ;
2010-05-24 07:28:19 +04:00
2011-09-02 04:44:33 +04:00
if ( pch - > cyclic )
list_splice_tail_init ( & pch - > work_list , & pch - > dmac - > desc_pool ) ;
2010-05-24 07:28:19 +04:00
spin_unlock_irqrestore ( & pch - > lock , flags ) ;
}
static enum dma_status
pl330_tx_status ( struct dma_chan * chan , dma_cookie_t cookie ,
struct dma_tx_state * txstate )
{
2012-03-07 02:35:27 +04:00
return dma_cookie_status ( chan , cookie , txstate ) ;
2010-05-24 07:28:19 +04:00
}
static void pl330_issue_pending ( struct dma_chan * chan )
{
2014-01-11 23:08:38 +04:00
struct dma_pl330_chan * pch = to_pchan ( chan ) ;
unsigned long flags ;
spin_lock_irqsave ( & pch - > lock , flags ) ;
list_splice_tail_init ( & pch - > submitted_list , & pch - > work_list ) ;
spin_unlock_irqrestore ( & pch - > lock , flags ) ;
pl330_tasklet ( ( unsigned long ) pch ) ;
2010-05-24 07:28:19 +04:00
}
/*
* We returned the last one of the circular list of descriptor ( s )
* from prep_xxx , so the argument to submit corresponds to the last
* descriptor of the list .
*/
static dma_cookie_t pl330_tx_submit ( struct dma_async_tx_descriptor * tx )
{
struct dma_pl330_desc * desc , * last = to_desc ( tx ) ;
struct dma_pl330_chan * pch = to_pchan ( tx - > chan ) ;
dma_cookie_t cookie ;
unsigned long flags ;
spin_lock_irqsave ( & pch - > lock , flags ) ;
/* Assign cookies to all nodes */
while ( ! list_empty ( & last - > node ) ) {
desc = list_entry ( last - > node . next , struct dma_pl330_desc , node ) ;
2013-07-23 12:24:50 +04:00
if ( pch - > cyclic ) {
desc - > txd . callback = last - > txd . callback ;
desc - > txd . callback_param = last - > txd . callback_param ;
}
2010-05-24 07:28:19 +04:00
2012-03-07 02:34:46 +04:00
dma_cookie_assign ( & desc - > txd ) ;
2010-05-24 07:28:19 +04:00
2014-01-11 23:08:38 +04:00
list_move_tail ( & desc - > node , & pch - > submitted_list ) ;
2010-05-24 07:28:19 +04:00
}
2012-03-07 02:34:46 +04:00
cookie = dma_cookie_assign ( & last - > txd ) ;
2014-01-11 23:08:38 +04:00
list_add_tail ( & last - > node , & pch - > submitted_list ) ;
2010-05-24 07:28:19 +04:00
spin_unlock_irqrestore ( & pch - > lock , flags ) ;
return cookie ;
}
static inline void _init_desc ( struct dma_pl330_desc * desc )
{
desc - > req . x = & desc - > px ;
desc - > rqcfg . swap = SWAP_NO ;
2014-07-06 22:32:19 +04:00
desc - > rqcfg . scctl = CCTRL0 ;
desc - > rqcfg . dcctl = CCTRL0 ;
2010-05-24 07:28:19 +04:00
desc - > req . cfg = & desc - > rqcfg ;
desc - > txd . tx_submit = pl330_tx_submit ;
INIT_LIST_HEAD ( & desc - > node ) ;
}
/* Returns the number of descriptors added to the DMAC pool */
2014-07-06 22:32:29 +04:00
static int add_desc ( struct pl330_dmac * pl330 , gfp_t flg , int count )
2010-05-24 07:28:19 +04:00
{
struct dma_pl330_desc * desc ;
unsigned long flags ;
int i ;
2013-12-02 22:01:30 +04:00
desc = kcalloc ( count , sizeof ( * desc ) , flg ) ;
2010-05-24 07:28:19 +04:00
if ( ! desc )
return 0 ;
2014-07-06 22:32:29 +04:00
spin_lock_irqsave ( & pl330 - > pool_lock , flags ) ;
2010-05-24 07:28:19 +04:00
for ( i = 0 ; i < count ; i + + ) {
_init_desc ( & desc [ i ] ) ;
2014-07-06 22:32:29 +04:00
list_add_tail ( & desc [ i ] . node , & pl330 - > desc_pool ) ;
2010-05-24 07:28:19 +04:00
}
2014-07-06 22:32:29 +04:00
spin_unlock_irqrestore ( & pl330 - > pool_lock , flags ) ;
2010-05-24 07:28:19 +04:00
return count ;
}
2014-07-06 22:32:29 +04:00
static struct dma_pl330_desc * pluck_desc ( struct pl330_dmac * pl330 )
2010-05-24 07:28:19 +04:00
{
struct dma_pl330_desc * desc = NULL ;
unsigned long flags ;
2014-07-06 22:32:29 +04:00
spin_lock_irqsave ( & pl330 - > pool_lock , flags ) ;
2010-05-24 07:28:19 +04:00
2014-07-06 22:32:29 +04:00
if ( ! list_empty ( & pl330 - > desc_pool ) ) {
desc = list_entry ( pl330 - > desc_pool . next ,
2010-05-24 07:28:19 +04:00
struct dma_pl330_desc , node ) ;
list_del_init ( & desc - > node ) ;
desc - > status = PREP ;
desc - > txd . callback = NULL ;
}
2014-07-06 22:32:29 +04:00
spin_unlock_irqrestore ( & pl330 - > pool_lock , flags ) ;
2010-05-24 07:28:19 +04:00
return desc ;
}
static struct dma_pl330_desc * pl330_get_desc ( struct dma_pl330_chan * pch )
{
2014-07-06 22:32:29 +04:00
struct pl330_dmac * pl330 = pch - > dmac ;
2011-10-24 13:43:11 +04:00
u8 * peri_id = pch - > chan . private ;
2010-05-24 07:28:19 +04:00
struct dma_pl330_desc * desc ;
/* Pluck one desc from the pool of DMAC */
2014-07-06 22:32:29 +04:00
desc = pluck_desc ( pl330 ) ;
2010-05-24 07:28:19 +04:00
/* If the DMAC pool is empty, alloc new */
if ( ! desc ) {
2014-07-06 22:32:29 +04:00
if ( ! add_desc ( pl330 , GFP_ATOMIC , 1 ) )
2010-05-24 07:28:19 +04:00
return NULL ;
/* Try again */
2014-07-06 22:32:29 +04:00
desc = pluck_desc ( pl330 ) ;
2010-05-24 07:28:19 +04:00
if ( ! desc ) {
2014-07-06 22:32:29 +04:00
dev_err ( pch - > dmac - > ddma . dev ,
2010-05-24 07:28:19 +04:00
" %s:%d ALERT! \n " , __func__ , __LINE__ ) ;
return NULL ;
}
}
/* Initialize the descriptor */
desc - > pchan = pch ;
desc - > txd . cookie = 0 ;
async_tx_ack ( & desc - > txd ) ;
2011-10-24 13:43:11 +04:00
desc - > req . peri = peri_id ? pch - > chan . chan_id : 0 ;
2014-07-06 22:32:29 +04:00
desc - > rqcfg . pcfg = & pch - > dmac - > pcfg ;
2010-05-24 07:28:19 +04:00
dma_async_tx_descriptor_init ( & desc - > txd , & pch - > chan ) ;
return desc ;
}
static inline void fill_px ( struct pl330_xfer * px ,
dma_addr_t dst , dma_addr_t src , size_t len )
{
px - > bytes = len ;
px - > dst_addr = dst ;
px - > src_addr = src ;
}
static struct dma_pl330_desc *
__pl330_prep_dma_memcpy ( struct dma_pl330_chan * pch , dma_addr_t dst ,
dma_addr_t src , size_t len )
{
struct dma_pl330_desc * desc = pl330_get_desc ( pch ) ;
if ( ! desc ) {
2014-07-06 22:32:29 +04:00
dev_err ( pch - > dmac - > ddma . dev , " %s:%d Unable to fetch desc \n " ,
2010-05-24 07:28:19 +04:00
__func__ , __LINE__ ) ;
return NULL ;
}
/*
* Ideally we should lookout for reqs bigger than
* those that can be programmed with 256 bytes of
* MC buffer , but considering a req size is seldom
* going to be word - unaligned and more than 200 MB ,
* we take it easy .
* Also , should the limit is reached we ' d rather
* have the platform increase MC buffer size than
* complicating this API driver .
*/
fill_px ( & desc - > px , dst , src , len ) ;
return desc ;
}
/* Call after fixing burst size */
static inline int get_burst_len ( struct dma_pl330_desc * desc , size_t len )
{
struct dma_pl330_chan * pch = desc - > pchan ;
2014-07-06 22:32:29 +04:00
struct pl330_dmac * pl330 = pch - > dmac ;
2010-05-24 07:28:19 +04:00
int burst_len ;
2014-07-06 22:32:29 +04:00
burst_len = pl330 - > pcfg . data_bus_width / 8 ;
burst_len * = pl330 - > pcfg . data_buf_dep ;
2010-05-24 07:28:19 +04:00
burst_len > > = desc - > rqcfg . brst_size ;
/* src/dst_burst_len can't be more than 16 */
if ( burst_len > 16 )
burst_len = 16 ;
while ( burst_len > 1 ) {
if ( ! ( len % ( burst_len < < desc - > rqcfg . brst_size ) ) )
break ;
burst_len - - ;
}
return burst_len ;
}
2011-09-02 04:44:33 +04:00
static struct dma_async_tx_descriptor * pl330_prep_dma_cyclic (
struct dma_chan * chan , dma_addr_t dma_addr , size_t len ,
2012-03-09 00:35:13 +04:00
size_t period_len , enum dma_transfer_direction direction ,
2012-09-14 16:05:47 +04:00
unsigned long flags , void * context )
2011-09-02 04:44:33 +04:00
{
2013-07-23 12:24:50 +04:00
struct dma_pl330_desc * desc = NULL , * first = NULL ;
2011-09-02 04:44:33 +04:00
struct dma_pl330_chan * pch = to_pchan ( chan ) ;
2014-07-06 22:32:29 +04:00
struct pl330_dmac * pl330 = pch - > dmac ;
2013-07-23 12:24:50 +04:00
unsigned int i ;
2011-09-02 04:44:33 +04:00
dma_addr_t dst ;
dma_addr_t src ;
2013-07-23 12:24:50 +04:00
if ( len % period_len ! = 0 )
2011-09-02 04:44:33 +04:00
return NULL ;
2013-07-23 12:24:50 +04:00
if ( ! is_slave_direction ( direction ) ) {
2014-07-06 22:32:29 +04:00
dev_err ( pch - > dmac - > ddma . dev , " %s:%d Invalid dma direction \n " ,
2011-09-02 04:44:33 +04:00
__func__ , __LINE__ ) ;
return NULL ;
}
2013-07-23 12:24:50 +04:00
for ( i = 0 ; i < len / period_len ; i + + ) {
desc = pl330_get_desc ( pch ) ;
if ( ! desc ) {
2014-07-06 22:32:29 +04:00
dev_err ( pch - > dmac - > ddma . dev , " %s:%d Unable to fetch desc \n " ,
2013-07-23 12:24:50 +04:00
__func__ , __LINE__ ) ;
2011-09-02 04:44:33 +04:00
2013-07-23 12:24:50 +04:00
if ( ! first )
return NULL ;
2014-07-06 22:32:29 +04:00
spin_lock_irqsave ( & pl330 - > pool_lock , flags ) ;
2013-07-23 12:24:50 +04:00
while ( ! list_empty ( & first - > node ) ) {
desc = list_entry ( first - > node . next ,
struct dma_pl330_desc , node ) ;
2014-07-06 22:32:29 +04:00
list_move_tail ( & desc - > node , & pl330 - > desc_pool ) ;
2013-07-23 12:24:50 +04:00
}
2014-07-06 22:32:29 +04:00
list_move_tail ( & first - > node , & pl330 - > desc_pool ) ;
2013-07-23 12:24:50 +04:00
2014-07-06 22:32:29 +04:00
spin_unlock_irqrestore ( & pl330 - > pool_lock , flags ) ;
2011-09-02 04:44:33 +04:00
2013-07-23 12:24:50 +04:00
return NULL ;
}
switch ( direction ) {
case DMA_MEM_TO_DEV :
desc - > rqcfg . src_inc = 1 ;
desc - > rqcfg . dst_inc = 0 ;
src = dma_addr ;
dst = pch - > fifo_addr ;
break ;
case DMA_DEV_TO_MEM :
desc - > rqcfg . src_inc = 0 ;
desc - > rqcfg . dst_inc = 1 ;
src = pch - > fifo_addr ;
dst = dma_addr ;
break ;
default :
break ;
}
2014-07-06 22:32:18 +04:00
desc - > req . rqtype = direction ;
2013-07-23 12:24:50 +04:00
desc - > rqcfg . brst_size = pch - > burst_sz ;
desc - > rqcfg . brst_len = 1 ;
fill_px ( & desc - > px , dst , src , period_len ) ;
if ( ! first )
first = desc ;
else
list_add_tail ( & desc - > node , & first - > node ) ;
dma_addr + = period_len ;
}
if ( ! desc )
return NULL ;
pch - > cyclic = true ;
desc - > txd . flags = flags ;
2011-09-02 04:44:33 +04:00
return & desc - > txd ;
}
2010-05-24 07:28:19 +04:00
static struct dma_async_tx_descriptor *
pl330_prep_dma_memcpy ( struct dma_chan * chan , dma_addr_t dst ,
dma_addr_t src , size_t len , unsigned long flags )
{
struct dma_pl330_desc * desc ;
struct dma_pl330_chan * pch = to_pchan ( chan ) ;
2014-07-06 22:32:29 +04:00
struct pl330_dmac * pl330 = pch - > dmac ;
2010-05-24 07:28:19 +04:00
int burst ;
2011-07-26 01:05:04 +04:00
if ( unlikely ( ! pch | | ! len ) )
2010-05-24 07:28:19 +04:00
return NULL ;
desc = __pl330_prep_dma_memcpy ( pch , dst , src , len ) ;
if ( ! desc )
return NULL ;
desc - > rqcfg . src_inc = 1 ;
desc - > rqcfg . dst_inc = 1 ;
2014-07-06 22:32:18 +04:00
desc - > req . rqtype = DMA_MEM_TO_MEM ;
2010-05-24 07:28:19 +04:00
/* Select max possible burst size */
2014-07-06 22:32:29 +04:00
burst = pl330 - > pcfg . data_bus_width / 8 ;
2010-05-24 07:28:19 +04:00
while ( burst > 1 ) {
if ( ! ( len % burst ) )
break ;
burst / = 2 ;
}
desc - > rqcfg . brst_size = 0 ;
while ( burst ! = ( 1 < < desc - > rqcfg . brst_size ) )
desc - > rqcfg . brst_size + + ;
desc - > rqcfg . brst_len = get_burst_len ( desc , len ) ;
desc - > txd . flags = flags ;
return & desc - > txd ;
}
2014-07-06 22:32:29 +04:00
static void __pl330_giveback_desc ( struct pl330_dmac * pl330 ,
2013-08-09 15:11:33 +04:00
struct dma_pl330_desc * first )
{
unsigned long flags ;
struct dma_pl330_desc * desc ;
if ( ! first )
return ;
2014-07-06 22:32:29 +04:00
spin_lock_irqsave ( & pl330 - > pool_lock , flags ) ;
2013-08-09 15:11:33 +04:00
while ( ! list_empty ( & first - > node ) ) {
desc = list_entry ( first - > node . next ,
struct dma_pl330_desc , node ) ;
2014-07-06 22:32:29 +04:00
list_move_tail ( & desc - > node , & pl330 - > desc_pool ) ;
2013-08-09 15:11:33 +04:00
}
2014-07-06 22:32:29 +04:00
list_move_tail ( & first - > node , & pl330 - > desc_pool ) ;
2013-08-09 15:11:33 +04:00
2014-07-06 22:32:29 +04:00
spin_unlock_irqrestore ( & pl330 - > pool_lock , flags ) ;
2013-08-09 15:11:33 +04:00
}
2010-05-24 07:28:19 +04:00
static struct dma_async_tx_descriptor *
pl330_prep_slave_sg ( struct dma_chan * chan , struct scatterlist * sgl ,
2011-10-13 21:04:23 +04:00
unsigned int sg_len , enum dma_transfer_direction direction ,
2012-03-09 00:35:13 +04:00
unsigned long flg , void * context )
2010-05-24 07:28:19 +04:00
{
struct dma_pl330_desc * first , * desc = NULL ;
struct dma_pl330_chan * pch = to_pchan ( chan ) ;
struct scatterlist * sg ;
2011-09-02 04:44:30 +04:00
int i ;
2010-05-24 07:28:19 +04:00
dma_addr_t addr ;
2011-10-24 13:43:11 +04:00
if ( unlikely ( ! pch | | ! sgl | | ! sg_len ) )
2010-05-24 07:28:19 +04:00
return NULL ;
2011-09-02 04:44:30 +04:00
addr = pch - > fifo_addr ;
2010-05-24 07:28:19 +04:00
first = NULL ;
for_each_sg ( sgl , sg , sg_len , i ) {
desc = pl330_get_desc ( pch ) ;
if ( ! desc ) {
2014-07-06 22:32:29 +04:00
struct pl330_dmac * pl330 = pch - > dmac ;
2010-05-24 07:28:19 +04:00
2014-07-06 22:32:29 +04:00
dev_err ( pch - > dmac - > ddma . dev ,
2010-05-24 07:28:19 +04:00
" %s:%d Unable to fetch desc \n " ,
__func__ , __LINE__ ) ;
2014-07-06 22:32:29 +04:00
__pl330_giveback_desc ( pl330 , first ) ;
2010-05-24 07:28:19 +04:00
return NULL ;
}
if ( ! first )
first = desc ;
else
list_add_tail ( & desc - > node , & first - > node ) ;
2011-10-13 21:04:23 +04:00
if ( direction = = DMA_MEM_TO_DEV ) {
2010-05-24 07:28:19 +04:00
desc - > rqcfg . src_inc = 1 ;
desc - > rqcfg . dst_inc = 0 ;
fill_px ( & desc - > px ,
addr , sg_dma_address ( sg ) , sg_dma_len ( sg ) ) ;
} else {
desc - > rqcfg . src_inc = 0 ;
desc - > rqcfg . dst_inc = 1 ;
fill_px ( & desc - > px ,
sg_dma_address ( sg ) , addr , sg_dma_len ( sg ) ) ;
}
2011-09-02 04:44:30 +04:00
desc - > rqcfg . brst_size = pch - > burst_sz ;
2010-05-24 07:28:19 +04:00
desc - > rqcfg . brst_len = 1 ;
2014-07-06 22:32:18 +04:00
desc - > req . rqtype = direction ;
2010-05-24 07:28:19 +04:00
}
/* Return the last desc in the chain */
desc - > txd . flags = flg ;
return & desc - > txd ;
}
static irqreturn_t pl330_irq_handler ( int irq , void * data )
{
if ( pl330_update ( data ) )
return IRQ_HANDLED ;
else
return IRQ_NONE ;
}
2013-07-15 19:53:08 +04:00
# define PL330_DMA_BUSWIDTHS \
BIT ( DMA_SLAVE_BUSWIDTH_UNDEFINED ) | \
BIT ( DMA_SLAVE_BUSWIDTH_1_BYTE ) | \
BIT ( DMA_SLAVE_BUSWIDTH_2_BYTES ) | \
BIT ( DMA_SLAVE_BUSWIDTH_4_BYTES ) | \
BIT ( DMA_SLAVE_BUSWIDTH_8_BYTES )
static int pl330_dma_device_slave_caps ( struct dma_chan * dchan ,
struct dma_slave_caps * caps )
{
caps - > src_addr_widths = PL330_DMA_BUSWIDTHS ;
caps - > dstn_addr_widths = PL330_DMA_BUSWIDTHS ;
caps - > directions = BIT ( DMA_DEV_TO_MEM ) | BIT ( DMA_MEM_TO_DEV ) ;
caps - > cmd_pause = false ;
caps - > cmd_terminate = true ;
2014-01-11 17:02:17 +04:00
caps - > residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR ;
2013-07-15 19:53:08 +04:00
return 0 ;
}
2012-11-19 22:22:55 +04:00
static int
2011-02-19 18:55:00 +03:00
pl330_probe ( struct amba_device * adev , const struct amba_id * id )
2010-05-24 07:28:19 +04:00
{
struct dma_pl330_platdata * pdat ;
2014-07-06 22:32:29 +04:00
struct pl330_config * pcfg ;
struct pl330_dmac * pl330 ;
2013-03-05 13:25:31 +04:00
struct dma_pl330_chan * pch , * _p ;
2010-05-24 07:28:19 +04:00
struct dma_device * pd ;
struct resource * res ;
int i , ret , irq ;
2011-07-26 01:05:04 +04:00
int num_chan ;
2010-05-24 07:28:19 +04:00
2013-07-30 12:09:11 +04:00
pdat = dev_get_platdata ( & adev - > dev ) ;
2010-05-24 07:28:19 +04:00
2013-06-27 13:29:32 +04:00
ret = dma_set_mask_and_coherent ( & adev - > dev , DMA_BIT_MASK ( 32 ) ) ;
if ( ret )
return ret ;
2010-05-24 07:28:19 +04:00
/* Allocate a new DMAC and its Channels */
2014-07-06 22:32:29 +04:00
pl330 = devm_kzalloc ( & adev - > dev , sizeof ( * pl330 ) , GFP_KERNEL ) ;
if ( ! pl330 ) {
2010-05-24 07:28:19 +04:00
dev_err ( & adev - > dev , " unable to allocate mem \n " ) ;
return - ENOMEM ;
}
2014-07-06 22:32:29 +04:00
pl330 - > mcbufsz = pdat ? pdat - > mcbuf_sz : 0 ;
2010-05-24 07:28:19 +04:00
res = & adev - > res ;
2014-07-06 22:32:29 +04:00
pl330 - > base = devm_ioremap_resource ( & adev - > dev , res ) ;
if ( IS_ERR ( pl330 - > base ) )
return PTR_ERR ( pl330 - > base ) ;
2010-05-24 07:28:19 +04:00
2014-07-06 22:32:29 +04:00
amba_set_drvdata ( adev , pl330 ) ;
2011-09-02 04:44:29 +04:00
2013-11-08 13:50:24 +04:00
for ( i = 0 ; i < AMBA_NR_IRQS ; i + + ) {
2013-09-30 10:50:48 +04:00
irq = adev - > irq [ i ] ;
if ( irq ) {
ret = devm_request_irq ( & adev - > dev , irq ,
pl330_irq_handler , 0 ,
2014-07-06 22:32:29 +04:00
dev_name ( & adev - > dev ) , pl330 ) ;
2013-09-30 10:50:48 +04:00
if ( ret )
return ret ;
} else {
break ;
}
}
2010-05-24 07:28:19 +04:00
2014-07-06 22:32:29 +04:00
pcfg = & pl330 - > pcfg ;
pcfg - > periph_id = adev - > periphid ;
ret = pl330_add ( pl330 ) ;
2010-05-24 07:28:19 +04:00
if ( ret )
2013-09-04 18:40:17 +04:00
return ret ;
2010-05-24 07:28:19 +04:00
2014-07-06 22:32:29 +04:00
INIT_LIST_HEAD ( & pl330 - > desc_pool ) ;
spin_lock_init ( & pl330 - > pool_lock ) ;
2010-05-24 07:28:19 +04:00
/* Create a descriptor pool of default size */
2014-07-06 22:32:29 +04:00
if ( ! add_desc ( pl330 , GFP_KERNEL , NR_DEFAULT_DESC ) )
2010-05-24 07:28:19 +04:00
dev_warn ( & adev - > dev , " unable to allocate desc \n " ) ;
2014-07-06 22:32:29 +04:00
pd = & pl330 - > ddma ;
2010-05-24 07:28:19 +04:00
INIT_LIST_HEAD ( & pd - > channels ) ;
/* Initialize channel parameters */
2012-04-09 03:26:19 +04:00
if ( pdat )
2014-07-06 22:32:29 +04:00
num_chan = max_t ( int , pdat - > nr_valid_peri , pcfg - > num_chan ) ;
2012-04-09 03:26:19 +04:00
else
2014-07-06 22:32:29 +04:00
num_chan = max_t ( int , pcfg - > num_peri , pcfg - > num_chan ) ;
2012-04-09 03:26:19 +04:00
2014-07-06 22:32:29 +04:00
pl330 - > num_peripherals = num_chan ;
2014-01-11 23:08:39 +04:00
2014-07-06 22:32:29 +04:00
pl330 - > peripherals = kzalloc ( num_chan * sizeof ( * pch ) , GFP_KERNEL ) ;
if ( ! pl330 - > peripherals ) {
2012-09-17 13:50:23 +04:00
ret = - ENOMEM ;
2014-07-06 22:32:29 +04:00
dev_err ( & adev - > dev , " unable to allocate pl330->peripherals \n " ) ;
2012-11-15 10:27:50 +04:00
goto probe_err2 ;
2012-09-17 13:50:23 +04:00
}
2010-05-24 07:28:19 +04:00
2011-07-26 01:05:04 +04:00
for ( i = 0 ; i < num_chan ; i + + ) {
2014-07-06 22:32:29 +04:00
pch = & pl330 - > peripherals [ i ] ;
2011-10-24 13:43:31 +04:00
if ( ! adev - > dev . of_node )
pch - > chan . private = pdat ? & pdat - > peri_id [ i ] : NULL ;
else
pch - > chan . private = adev - > dev . of_node ;
2010-05-24 07:28:19 +04:00
2014-01-11 23:08:38 +04:00
INIT_LIST_HEAD ( & pch - > submitted_list ) ;
2010-05-24 07:28:19 +04:00
INIT_LIST_HEAD ( & pch - > work_list ) ;
2013-08-27 22:34:05 +04:00
INIT_LIST_HEAD ( & pch - > completed_list ) ;
2010-05-24 07:28:19 +04:00
spin_lock_init ( & pch - > lock ) ;
2014-07-06 22:32:26 +04:00
pch - > thread = NULL ;
2010-05-24 07:28:19 +04:00
pch - > chan . device = pd ;
2014-07-06 22:32:29 +04:00
pch - > dmac = pl330 ;
2010-05-24 07:28:19 +04:00
/* Add the channel to the DMAC list */
list_add_tail ( & pch - > chan . device_node , & pd - > channels ) ;
}
pd - > dev = & adev - > dev ;
2011-10-24 13:43:31 +04:00
if ( pdat ) {
2011-10-24 13:43:11 +04:00
pd - > cap_mask = pdat - > cap_mask ;
2011-10-24 13:43:31 +04:00
} else {
2011-10-24 13:43:11 +04:00
dma_cap_set ( DMA_MEMCPY , pd - > cap_mask ) ;
2014-07-06 22:32:29 +04:00
if ( pcfg - > num_peri ) {
2011-10-24 13:43:31 +04:00
dma_cap_set ( DMA_SLAVE , pd - > cap_mask ) ;
dma_cap_set ( DMA_CYCLIC , pd - > cap_mask ) ;
2012-08-29 08:46:25 +04:00
dma_cap_set ( DMA_PRIVATE , pd - > cap_mask ) ;
2011-10-24 13:43:31 +04:00
}
}
2010-05-24 07:28:19 +04:00
pd - > device_alloc_chan_resources = pl330_alloc_chan_resources ;
pd - > device_free_chan_resources = pl330_free_chan_resources ;
pd - > device_prep_dma_memcpy = pl330_prep_dma_memcpy ;
2011-09-02 04:44:33 +04:00
pd - > device_prep_dma_cyclic = pl330_prep_dma_cyclic ;
2010-05-24 07:28:19 +04:00
pd - > device_tx_status = pl330_tx_status ;
pd - > device_prep_slave_sg = pl330_prep_slave_sg ;
pd - > device_control = pl330_control ;
pd - > device_issue_pending = pl330_issue_pending ;
2013-07-15 19:53:08 +04:00
pd - > device_slave_caps = pl330_dma_device_slave_caps ;
2010-05-24 07:28:19 +04:00
ret = dma_async_device_register ( pd ) ;
if ( ret ) {
dev_err ( & adev - > dev , " unable to register DMAC \n " ) ;
2013-03-05 13:25:31 +04:00
goto probe_err3 ;
}
if ( adev - > dev . of_node ) {
ret = of_dma_controller_register ( adev - > dev . of_node ,
2014-07-06 22:32:29 +04:00
of_dma_pl330_xlate , pl330 ) ;
2013-03-05 13:25:31 +04:00
if ( ret ) {
dev_err ( & adev - > dev ,
" unable to register DMA to the generic DT DMA helpers \n " ) ;
}
2010-05-24 07:28:19 +04:00
}
2013-11-25 19:07:46 +04:00
2014-07-06 22:32:29 +04:00
adev - > dev . dma_parms = & pl330 - > dma_parms ;
2013-11-25 19:07:46 +04:00
2013-09-02 20:24:48 +04:00
/*
* This is the limit for transfers with a buswidth of 1 , larger
* buswidths will have larger limits .
*/
ret = dma_set_max_seg_size ( & adev - > dev , 1900800 ) ;
if ( ret )
dev_err ( & adev - > dev , " unable to set the seg size \n " ) ;
2010-05-24 07:28:19 +04:00
dev_info ( & adev - > dev ,
" Loaded driver for PL330 DMAC-%d \n " , adev - > periphid ) ;
dev_info ( & adev - > dev ,
" \t DBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u \n " ,
2014-07-06 22:32:29 +04:00
pcfg - > data_buf_dep , pcfg - > data_bus_width / 8 , pcfg - > num_chan ,
pcfg - > num_peri , pcfg - > num_events ) ;
2010-05-24 07:28:19 +04:00
return 0 ;
2013-03-05 13:25:31 +04:00
probe_err3 :
/* Idle the DMAC */
2014-07-06 22:32:29 +04:00
list_for_each_entry_safe ( pch , _p , & pl330 - > ddma . channels ,
2013-03-05 13:25:31 +04:00
chan . device_node ) {
/* Remove the channel */
list_del ( & pch - > chan . device_node ) ;
/* Flush the channel */
pl330_control ( & pch - > chan , DMA_TERMINATE_ALL , 0 ) ;
pl330_free_chan_resources ( & pch - > chan ) ;
}
2010-05-24 07:28:19 +04:00
probe_err2 :
2014-07-06 22:32:29 +04:00
pl330_del ( pl330 ) ;
2010-05-24 07:28:19 +04:00
return ret ;
}
2012-12-22 03:09:59 +04:00
static int pl330_remove ( struct amba_device * adev )
2010-05-24 07:28:19 +04:00
{
2014-07-06 22:32:29 +04:00
struct pl330_dmac * pl330 = amba_get_drvdata ( adev ) ;
2010-05-24 07:28:19 +04:00
struct dma_pl330_chan * pch , * _p ;
2013-03-05 13:25:31 +04:00
if ( adev - > dev . of_node )
of_dma_controller_free ( adev - > dev . of_node ) ;
2013-02-14 07:40:07 +04:00
2014-07-06 22:32:29 +04:00
dma_async_device_unregister ( & pl330 - > ddma ) ;
2010-05-24 07:28:19 +04:00
/* Idle the DMAC */
2014-07-06 22:32:29 +04:00
list_for_each_entry_safe ( pch , _p , & pl330 - > ddma . channels ,
2010-05-24 07:28:19 +04:00
chan . device_node ) {
/* Remove the channel */
list_del ( & pch - > chan . device_node ) ;
/* Flush the channel */
pl330_control ( & pch - > chan , DMA_TERMINATE_ALL , 0 ) ;
pl330_free_chan_resources ( & pch - > chan ) ;
}
2014-07-06 22:32:29 +04:00
pl330_del ( pl330 ) ;
2010-05-24 07:28:19 +04:00
return 0 ;
}
static struct amba_id pl330_ids [ ] = {
{
. id = 0x00041330 ,
. mask = 0x000fffff ,
} ,
{ 0 , 0 } ,
} ;
2011-10-05 18:15:20 +04:00
MODULE_DEVICE_TABLE ( amba , pl330_ids ) ;
2010-05-24 07:28:19 +04:00
static struct amba_driver pl330_driver = {
. drv = {
. owner = THIS_MODULE ,
. name = " dma-pl330 " ,
} ,
. id_table = pl330_ids ,
. probe = pl330_probe ,
. remove = pl330_remove ,
} ;
2012-03-15 13:40:38 +04:00
module_amba_driver ( pl330_driver ) ;
2010-05-24 07:28:19 +04:00
MODULE_AUTHOR ( " Jaswinder Singh <jassi.brar@samsung.com> " ) ;
MODULE_DESCRIPTION ( " API Driver for PL330 DMAC " ) ;
MODULE_LICENSE ( " GPL " ) ;