2019-05-29 17:17:58 +03:00
/* SPDX-License-Identifier: GPL-2.0-only */
2016-02-05 07:34:35 +03:00
/*
* Qualcomm Technologies HIDMA data structures
*
2016-05-01 07:25:26 +03:00
* Copyright ( c ) 2014 - 2016 , The Linux Foundation . All rights reserved .
2016-02-05 07:34:35 +03:00
*/
# ifndef QCOM_HIDMA_H
# define QCOM_HIDMA_H
# include <linux/kfifo.h>
# include <linux/interrupt.h>
# include <linux/dmaengine.h>
2016-05-01 07:25:26 +03:00
# define HIDMA_TRE_SIZE 32 /* each TRE is 32 bytes */
# define HIDMA_TRE_CFG_IDX 0
# define HIDMA_TRE_LEN_IDX 1
# define HIDMA_TRE_SRC_LOW_IDX 2
# define HIDMA_TRE_SRC_HI_IDX 3
# define HIDMA_TRE_DEST_LOW_IDX 4
# define HIDMA_TRE_DEST_HI_IDX 5
2016-02-05 07:34:35 +03:00
2017-06-30 05:30:57 +03:00
enum tre_type {
HIDMA_TRE_MEMCPY = 3 ,
HIDMA_TRE_MEMSET = 4 ,
} ;
2016-02-05 07:34:35 +03:00
struct hidma_tre {
atomic_t allocated ; /* if this channel is allocated */
bool queued ; /* flag whether this is pending */
u16 status ; /* status */
2016-05-01 07:25:26 +03:00
u32 idx ; /* index of the tre */
2016-02-05 07:34:35 +03:00
u32 dma_sig ; /* signature of the tre */
const char * dev_name ; /* name of the device */
void ( * callback ) ( void * data ) ; /* requester callback */
void * data ; /* Data associated with this channel*/
struct hidma_lldev * lldev ; /* lldma device pointer */
2016-05-01 07:25:26 +03:00
u32 tre_local [ HIDMA_TRE_SIZE / sizeof ( u32 ) + 1 ] ; /* TRE local copy */
2016-02-05 07:34:35 +03:00
u32 tre_index ; /* the offset where this was written*/
u32 int_flags ; /* interrupt flags */
2016-05-01 07:25:26 +03:00
u8 err_info ; /* error record in this transfer */
u8 err_code ; /* completion code */
2016-02-05 07:34:35 +03:00
} ;
struct hidma_lldev {
2016-10-07 08:25:12 +03:00
bool msi_support ; /* flag indicating MSI support */
2016-02-05 07:34:35 +03:00
bool initialized ; /* initialized flag */
u8 trch_state ; /* trch_state of the device */
u8 evch_state ; /* evch_state of the device */
u8 chidx ; /* channel index in the core */
u32 nr_tres ; /* max number of configs */
spinlock_t lock ; /* reentrancy */
struct hidma_tre * trepool ; /* trepool of user configs */
struct device * dev ; /* device */
void __iomem * trca ; /* Transfer Channel address */
void __iomem * evca ; /* Event Channel address */
struct hidma_tre
* * pending_tre_list ; /* Pointers to pending TREs */
2016-10-21 19:37:56 +03:00
atomic_t pending_tre_count ; /* Number of TREs pending */
2016-02-05 07:34:35 +03:00
void * tre_ring ; /* TRE ring */
2016-05-01 07:25:26 +03:00
dma_addr_t tre_dma ; /* TRE ring to be shared with HW */
2016-02-05 07:34:35 +03:00
u32 tre_ring_size ; /* Byte size of the ring */
u32 tre_processed_off ; /* last processed TRE */
void * evre_ring ; /* EVRE ring */
2016-05-01 07:25:26 +03:00
dma_addr_t evre_dma ; /* EVRE ring to be shared with HW */
2016-02-05 07:34:35 +03:00
u32 evre_ring_size ; /* Byte size of the ring */
u32 evre_processed_off ; /* last processed EVRE */
u32 tre_write_offset ; /* TRE write location */
struct tasklet_struct task ; /* task delivering notifications */
DECLARE_KFIFO_PTR ( handoff_fifo ,
struct hidma_tre * ) ; /* pending TREs FIFO */
} ;
struct hidma_desc {
struct dma_async_tx_descriptor desc ;
/* link list node for this channel*/
struct list_head node ;
u32 tre_ch ;
} ;
struct hidma_chan {
bool paused ;
bool allocated ;
char dbg_name [ 16 ] ;
u32 dma_sig ;
2016-08-31 18:10:29 +03:00
dma_cookie_t last_success ;
2016-02-05 07:34:35 +03:00
/*
* active descriptor on this channel
* It is used by the DMA complete notification to
* locate the descriptor that initiated the transfer .
*/
struct hidma_dev * dmadev ;
struct hidma_desc * running ;
struct dma_chan chan ;
struct list_head free ;
struct list_head prepared ;
2017-06-30 17:43:05 +03:00
struct list_head queued ;
2016-02-05 07:34:35 +03:00
struct list_head active ;
struct list_head completed ;
/* Lock for this structure */
spinlock_t lock ;
} ;
struct hidma_dev {
int irq ;
int chidx ;
u32 nr_descriptors ;
2016-10-21 19:37:59 +03:00
int msi_virqbase ;
2016-02-05 07:34:35 +03:00
struct hidma_lldev * lldev ;
void __iomem * dev_trca ;
struct resource * trca_resource ;
void __iomem * dev_evca ;
struct resource * evca_resource ;
/* used to protect the pending channel list*/
spinlock_t lock ;
struct dma_device ddev ;
struct dentry * debugfs ;
2016-11-14 22:34:53 +03:00
/* sysfs entry for the channel id */
struct device_attribute * chid_attrs ;
2016-02-05 07:34:35 +03:00
/* Task delivering issue_pending */
struct tasklet_struct task ;
} ;
int hidma_ll_request ( struct hidma_lldev * llhndl , u32 dev_id ,
const char * dev_name ,
void ( * callback ) ( void * data ) , void * data , u32 * tre_ch ) ;
void hidma_ll_free ( struct hidma_lldev * llhndl , u32 tre_ch ) ;
enum dma_status hidma_ll_status ( struct hidma_lldev * llhndl , u32 tre_ch ) ;
bool hidma_ll_isenabled ( struct hidma_lldev * llhndl ) ;
void hidma_ll_queue_request ( struct hidma_lldev * llhndl , u32 tre_ch ) ;
void hidma_ll_start ( struct hidma_lldev * llhndl ) ;
2016-05-01 07:25:26 +03:00
int hidma_ll_disable ( struct hidma_lldev * lldev ) ;
int hidma_ll_enable ( struct hidma_lldev * llhndl ) ;
2016-02-05 07:34:35 +03:00
void hidma_ll_set_transfer_params ( struct hidma_lldev * llhndl , u32 tre_ch ,
2017-06-30 05:30:57 +03:00
dma_addr_t src , dma_addr_t dest , u32 len , u32 flags , u32 txntype ) ;
2016-10-07 08:25:12 +03:00
void hidma_ll_setup_irq ( struct hidma_lldev * lldev , bool msi ) ;
2016-02-05 07:34:35 +03:00
int hidma_ll_setup ( struct hidma_lldev * lldev ) ;
struct hidma_lldev * hidma_ll_init ( struct device * dev , u32 max_channels ,
void __iomem * trca , void __iomem * evca ,
u8 chidx ) ;
int hidma_ll_uninit ( struct hidma_lldev * llhndl ) ;
irqreturn_t hidma_ll_inthandler ( int irq , void * arg ) ;
2016-10-21 19:37:59 +03:00
irqreturn_t hidma_ll_inthandler_msi ( int irq , void * arg , int cause ) ;
2016-02-05 07:34:35 +03:00
void hidma_cleanup_pending_tre ( struct hidma_lldev * llhndl , u8 err_info ,
u8 err_code ) ;
2019-06-12 15:25:57 +03:00
void hidma_debug_init ( struct hidma_dev * dmadev ) ;
2016-05-01 07:25:27 +03:00
void hidma_debug_uninit ( struct hidma_dev * dmadev ) ;
2016-02-05 07:34:35 +03:00
# endif