2019-05-19 15:51:37 +02:00
/* SPDX-License-Identifier: GPL-2.0-or-later */
2006-05-23 17:35:34 -07:00
/*
2009-02-26 11:05:43 +01:00
* Copyright ( c ) 2004 - 2009 Intel Corporation . All rights reserved .
2006-05-23 17:35:34 -07:00
*/
# ifndef IOATDMA_H
# define IOATDMA_H
# include <linux/dmaengine.h>
# include <linux/init.h>
# include <linux/dmapool.h>
# include <linux/cache.h>
2006-05-23 17:39:49 -07:00
# include <linux/pci_ids.h>
2015-08-11 08:48:32 -07:00
# include <linux/circ_buf.h>
# include <linux/interrupt.h>
# include "registers.h"
# include "hw.h"
2006-05-23 17:35:34 -07:00
2019-02-22 09:59:54 -07:00
# define IOAT_DMA_VERSION "5.00"
2007-10-18 03:07:13 -07:00
2007-11-14 16:59:51 -08:00
# define IOAT_DMA_DCA_ANY_CPU ~0
2015-08-11 08:48:27 -07:00
# define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
# define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
# define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)
2009-09-08 17:29:02 -07:00
2015-08-11 08:48:27 -07:00
# define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80)
2009-09-08 17:29:02 -07:00
2015-08-11 08:48:49 -07:00
/* ioat hardware assumes at least two sources for raid operations */
# define src_cnt_to_sw(x) ((x) + 2)
# define src_cnt_to_hw(x) ((x) - 2)
# define ndest_to_sw(x) ((x) + 1)
# define ndest_to_hw(x) ((x) - 1)
# define src16_cnt_to_sw(x) ((x) + 9)
# define src16_cnt_to_hw(x) ((x) - 9)
2009-09-08 17:29:02 -07:00
/*
* workaround for IOAT ver .3 .0 null descriptor issue
* ( channel returns error when size is 0 )
*/
# define NULL_DESC_BUFFER_SIZE 1
2013-03-26 15:42:47 -07:00
enum ioat_irq_mode {
IOAT_NOIRQ = 0 ,
IOAT_MSIX ,
IOAT_MSI ,
IOAT_INTX
} ;
2006-05-23 17:35:34 -07:00
/**
2007-10-16 01:27:39 -07:00
* struct ioatdma_device - internal representation of a IOAT device
2006-05-23 17:35:34 -07:00
* @ pdev : PCI - Express device
* @ reg_base : MMIO register space base address
2015-08-11 08:49:05 -07:00
* @ completion_pool : DMA buffers for completion ops
* @ sed_hw_pool : DMA super descriptor pools
2015-08-11 08:48:27 -07:00
* @ dma_dev : embedded struct dma_device
2007-10-16 01:27:39 -07:00
* @ version : version of ioatdma device
2007-11-14 16:59:51 -08:00
* @ msix_entries : irq handlers
* @ idx : per channel data
2009-07-28 14:42:38 -07:00
* @ dca : direct cache access context
2015-08-11 08:49:05 -07:00
* @ irq_mode : interrupt mode ( INTX , MSI , MSIX )
* @ cap : read DMA capabilities register
2006-05-23 17:35:34 -07:00
*/
2007-10-16 01:27:39 -07:00
struct ioatdma_device {
2006-05-23 17:35:34 -07:00
struct pci_dev * pdev ;
2006-10-10 22:45:47 +01:00
void __iomem * reg_base ;
2016-02-10 15:00:21 -07:00
struct dma_pool * completion_pool ;
2013-04-15 10:25:56 -07:00
# define MAX_SED_POOLS 5
struct dma_pool * sed_hw_pool [ MAX_SED_POOLS ] ;
2015-08-11 08:48:27 -07:00
struct dma_device dma_dev ;
2007-10-16 01:27:39 -07:00
u8 version ;
2015-08-26 13:17:24 -07:00
# define IOAT_MAX_CHANS 4
struct msix_entry msix_entries [ IOAT_MAX_CHANS ] ;
struct ioatdma_chan * idx [ IOAT_MAX_CHANS ] ;
2009-07-28 14:42:38 -07:00
struct dca_provider * dca ;
2013-03-26 15:42:47 -07:00
enum ioat_irq_mode irq_mode ;
2013-04-10 16:44:39 -07:00
u32 cap ;
2023-08-15 14:11:51 +08:00
int chancnt ;
2016-03-10 16:18:40 -07:00
/* shadow version for CB3.3 chan reset errata workaround */
u64 msixtba0 ;
u64 msixdata0 ;
u32 msixpba ;
2006-05-23 17:35:34 -07:00
} ;
2020-04-16 20:06:21 +03:00
# define IOAT_MAX_ORDER 16
# define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER)
2020-04-16 20:06:22 +03:00
# define IOAT_CHUNK_SIZE (SZ_512K)
2020-04-16 20:06:21 +03:00
# define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ)
2016-02-10 15:00:32 -07:00
struct ioat_descs {
void * virt ;
dma_addr_t hw ;
} ;
2015-08-11 08:48:21 -07:00
struct ioatdma_chan {
struct dma_chan dma_chan ;
2006-10-10 22:45:47 +01:00
void __iomem * reg_base ;
2012-03-23 13:36:42 -07:00
dma_addr_t last_completion ;
2006-05-23 17:35:34 -07:00
spinlock_t cleanup_lock ;
2009-09-08 12:01:49 -07:00
unsigned long state ;
2015-08-26 13:17:24 -07:00
# define IOAT_CHAN_DOWN 0
2009-09-08 12:01:49 -07:00
# define IOAT_COMPLETION_ACK 1
# define IOAT_RESET_PENDING 2
2009-09-08 17:42:56 -07:00
# define IOAT_KOBJ_INIT_FAIL 3
2010-07-23 15:47:56 -07:00
# define IOAT_RUN 5
2013-02-07 14:38:32 -07:00
# define IOAT_CHAN_ACTIVE 6
2009-09-08 12:01:49 -07:00
struct timer_list timer ;
# define RESET_DELAY msecs_to_jiffies(100)
2015-08-11 08:48:27 -07:00
struct ioatdma_device * ioat_dma ;
2009-09-08 12:01:04 -07:00
dma_addr_t completion_dma ;
u64 * completion ;
2007-10-16 01:27:40 -07:00
struct tasklet_struct cleanup_task ;
2009-09-08 17:42:56 -07:00
struct kobject kobj ;
2015-08-11 08:48:21 -07:00
/* ioat v2 / v3 channel attributes
* @ xfercap_log ; log2 of channel max transfer length ( for fast division )
* @ head : allocated index
* @ issued : hardware notification point
* @ tail : cleanup index
* @ dmacount : identical to ' head ' except for occasionally resetting to zero
* @ alloc_order : log2 of the number of allocated descriptors
* @ produce : number of descriptors to produce at submit time
* @ ring : software ring buffer implementation of hardware ring
* @ prep_lock : serializes descriptor preparation ( producers )
*/
size_t xfercap_log ;
u16 head ;
u16 issued ;
u16 tail ;
u16 dmacount ;
u16 alloc_order ;
u16 produce ;
struct ioat_ring_ent * * ring ;
spinlock_t prep_lock ;
2020-04-16 20:06:21 +03:00
struct ioat_descs descs [ IOAT_MAX_DESCS / IOAT_DESCS_PER_CHUNK ] ;
2016-02-10 15:00:32 -07:00
int desc_chunks ;
2017-08-22 20:31:18 -04:00
int intr_coalesce ;
int prev_intr_coalesce ;
2006-05-23 17:35:34 -07:00
} ;
2009-09-08 17:42:56 -07:00
struct ioat_sysfs_entry {
struct attribute attr ;
ssize_t ( * show ) ( struct dma_chan * , char * ) ;
2017-08-22 20:31:18 -04:00
ssize_t ( * store ) ( struct dma_chan * , const char * , size_t ) ;
2009-09-08 17:42:56 -07:00
} ;
2009-08-26 13:01:44 -07:00
2013-04-15 10:25:56 -07:00
/**
* struct ioat_sed_ent - wrapper around super extended hardware descriptor
* @ hw : hardware SED
2015-08-11 08:49:05 -07:00
* @ dma : dma address for the SED
2013-04-15 10:25:56 -07:00
* @ parent : point to the dma descriptor that ' s the parent
2015-08-11 08:49:05 -07:00
* @ hw_pool : descriptor pool index
2013-04-15 10:25:56 -07:00
*/
struct ioat_sed_ent {
struct ioat_sed_raw_descriptor * hw ;
dma_addr_t dma ;
struct ioat_ring_ent * parent ;
unsigned int hw_pool ;
} ;
2015-08-11 08:48:32 -07:00
/**
* struct ioat_ring_ent - wrapper around hardware descriptor
* @ hw : hardware DMA descriptor ( for memcpy )
* @ xor : hardware xor descriptor
* @ xor_ex : hardware xor extension descriptor
* @ pq : hardware pq descriptor
* @ pq_ex : hardware pq extension descriptor
* @ pqu : hardware pq update descriptor
* @ raw : hardware raw ( un - typed ) descriptor
* @ txd : the generic software descriptor for all engines
* @ len : total transaction length for unmap
* @ result : asynchronous result of validate operations
* @ id : identifier for debug
2015-08-11 08:49:05 -07:00
* @ sed : pointer to super extended descriptor sw desc
2015-08-11 08:48:32 -07:00
*/
struct ioat_ring_ent {
union {
struct ioat_dma_descriptor * hw ;
struct ioat_xor_descriptor * xor ;
struct ioat_xor_ext_descriptor * xor_ex ;
struct ioat_pq_descriptor * pq ;
struct ioat_pq_ext_descriptor * pq_ex ;
struct ioat_pq_update_descriptor * pqu ;
struct ioat_raw_descriptor * raw ;
} ;
size_t len ;
struct dma_async_tx_descriptor txd ;
enum sum_check_flags * result ;
# ifdef DEBUG
int id ;
# endif
struct ioat_sed_ent * sed ;
} ;
2015-08-11 08:48:49 -07:00
extern const struct sysfs_ops ioat_sysfs_ops ;
extern struct ioat_sysfs_entry ioat_version_attr ;
extern struct ioat_sysfs_entry ioat_cap_attr ;
extern int ioat_pending_level ;
extern struct kobj_type ioat_ktype ;
extern struct kmem_cache * ioat_cache ;
extern struct kmem_cache * ioat_sed_cache ;
2015-08-11 08:48:21 -07:00
static inline struct ioatdma_chan * to_ioat_chan ( struct dma_chan * c )
2009-07-28 14:44:50 -07:00
{
2015-08-11 08:48:21 -07:00
return container_of ( c , struct ioatdma_chan , dma_chan ) ;
2009-07-28 14:44:50 -07:00
}
2006-05-23 17:35:34 -07:00
/* wrapper around hardware descriptor format + additional software fields */
2009-09-08 12:00:55 -07:00
# ifdef DEBUG
# define set_desc_id(desc, i) ((desc)->id = (i))
# define desc_id(desc) ((desc)->id)
# else
# define set_desc_id(desc, i)
# define desc_id(desc) (0)
# endif
static inline void
2015-08-11 08:48:21 -07:00
__dump_desc_dbg ( struct ioatdma_chan * ioat_chan , struct ioat_dma_descriptor * hw ,
2009-09-08 12:00:55 -07:00
struct dma_async_tx_descriptor * tx , int id )
{
2015-08-11 08:48:21 -07:00
struct device * dev = to_dev ( ioat_chan ) ;
2009-09-08 12:00:55 -07:00
dev_dbg ( dev , " desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x "
2013-03-04 10:59:54 -07:00
" ctl: %#10.8x (op: %#x int_en: %d compl: %d) \n " , id ,
2009-09-08 12:00:55 -07:00
( unsigned long long ) tx - > phys ,
( unsigned long long ) hw - > next , tx - > cookie , tx - > flags ,
hw - > ctl , hw - > ctl_f . op , hw - > ctl_f . int_en , hw - > ctl_f . compl_write ) ;
}
# define dump_desc_dbg(c, d) \
2015-08-11 08:48:21 -07:00
( { if ( d ) __dump_desc_dbg ( c , d - > hw , & d - > txd , desc_id ( d ) ) ; 0 ; } )
2009-09-08 12:00:55 -07:00
2015-08-11 08:48:21 -07:00
static inline struct ioatdma_chan *
2015-08-11 08:48:27 -07:00
ioat_chan_by_index ( struct ioatdma_device * ioat_dma , int index )
2009-08-26 13:01:44 -07:00
{
2015-08-11 08:48:27 -07:00
return ioat_dma - > idx [ index ] ;
2009-08-26 13:01:44 -07:00
}
2015-08-11 08:48:21 -07:00
static inline u64 ioat_chansts ( struct ioatdma_chan * ioat_chan )
2013-03-26 15:42:41 -07:00
{
2015-11-06 13:24:01 -07:00
return readq ( ioat_chan - > reg_base + IOAT_CHANSTS_OFFSET ) ;
2013-03-26 15:42:41 -07:00
}
2009-09-08 12:01:49 -07:00
static inline u64 ioat_chansts_to_addr ( u64 status )
{
return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR ;
}
2015-08-11 08:48:21 -07:00
static inline u32 ioat_chanerr ( struct ioatdma_chan * ioat_chan )
2009-09-08 12:01:49 -07:00
{
2015-08-11 08:48:21 -07:00
return readl ( ioat_chan - > reg_base + IOAT_CHANERR_OFFSET ) ;
2009-09-08 12:01:49 -07:00
}
2015-08-11 08:48:21 -07:00
static inline void ioat_suspend ( struct ioatdma_chan * ioat_chan )
2009-09-08 12:01:49 -07:00
{
2015-08-11 08:48:27 -07:00
u8 ver = ioat_chan - > ioat_dma - > version ;
2009-09-08 12:01:49 -07:00
2015-08-11 08:48:21 -07:00
writeb ( IOAT_CHANCMD_SUSPEND ,
ioat_chan - > reg_base + IOAT_CHANCMD_OFFSET ( ver ) ) ;
2009-09-08 12:01:49 -07:00
}
2015-08-11 08:48:21 -07:00
static inline void ioat_reset ( struct ioatdma_chan * ioat_chan )
2009-12-19 15:36:02 -07:00
{
2015-08-11 08:48:27 -07:00
u8 ver = ioat_chan - > ioat_dma - > version ;
2009-12-19 15:36:02 -07:00
2015-08-11 08:48:21 -07:00
writeb ( IOAT_CHANCMD_RESET ,
ioat_chan - > reg_base + IOAT_CHANCMD_OFFSET ( ver ) ) ;
2009-12-19 15:36:02 -07:00
}
2015-08-11 08:48:21 -07:00
static inline bool ioat_reset_pending ( struct ioatdma_chan * ioat_chan )
2009-12-19 15:36:02 -07:00
{
2015-08-11 08:48:27 -07:00
u8 ver = ioat_chan - > ioat_dma - > version ;
2009-12-19 15:36:02 -07:00
u8 cmd ;
2015-08-11 08:48:21 -07:00
cmd = readb ( ioat_chan - > reg_base + IOAT_CHANCMD_OFFSET ( ver ) ) ;
2009-12-19 15:36:02 -07:00
return ( cmd & IOAT_CHANCMD_RESET ) = = IOAT_CHANCMD_RESET ;
}
2009-09-08 12:01:49 -07:00
static inline bool is_ioat_active ( unsigned long status )
{
return ( ( status & IOAT_CHANSTS_STATUS ) = = IOAT_CHANSTS_ACTIVE ) ;
}
static inline bool is_ioat_idle ( unsigned long status )
{
return ( ( status & IOAT_CHANSTS_STATUS ) = = IOAT_CHANSTS_DONE ) ;
}
static inline bool is_ioat_halted ( unsigned long status )
{
return ( ( status & IOAT_CHANSTS_STATUS ) = = IOAT_CHANSTS_HALTED ) ;
}
static inline bool is_ioat_suspended ( unsigned long status )
{
return ( ( status & IOAT_CHANSTS_STATUS ) = = IOAT_CHANSTS_SUSPENDED ) ;
}
/* channel was fatally programmed */
static inline bool is_ioat_bug ( unsigned long err )
{
2009-11-19 17:10:07 -07:00
return ! ! err ;
2009-09-08 12:01:49 -07:00
}
2015-08-11 08:48:32 -07:00
static inline u32 ioat_ring_size ( struct ioatdma_chan * ioat_chan )
{
return 1 < < ioat_chan - > alloc_order ;
}
/* count of descriptors in flight with the engine */
static inline u16 ioat_ring_active ( struct ioatdma_chan * ioat_chan )
{
return CIRC_CNT ( ioat_chan - > head , ioat_chan - > tail ,
ioat_ring_size ( ioat_chan ) ) ;
}
/* count of descriptors pending submission to hardware */
static inline u16 ioat_ring_pending ( struct ioatdma_chan * ioat_chan )
{
return CIRC_CNT ( ioat_chan - > head , ioat_chan - > issued ,
ioat_ring_size ( ioat_chan ) ) ;
}
static inline u32 ioat_ring_space ( struct ioatdma_chan * ioat_chan )
{
return ioat_ring_size ( ioat_chan ) - ioat_ring_active ( ioat_chan ) ;
}
static inline u16
ioat_xferlen_to_descs ( struct ioatdma_chan * ioat_chan , size_t len )
{
u16 num_descs = len > > ioat_chan - > xfercap_log ;
num_descs + = ! ! ( len & ( ( 1 < < ioat_chan - > xfercap_log ) - 1 ) ) ;
return num_descs ;
}
static inline struct ioat_ring_ent *
ioat_get_ring_ent ( struct ioatdma_chan * ioat_chan , u16 idx )
{
return ioat_chan - > ring [ idx & ( ioat_ring_size ( ioat_chan ) - 1 ) ] ;
}
static inline void
ioat_set_chainaddr ( struct ioatdma_chan * ioat_chan , u64 addr )
{
writel ( addr & 0x00000000FFFFFFFF ,
ioat_chan - > reg_base + IOAT2_CHAINADDR_OFFSET_LOW ) ;
writel ( addr > > 32 ,
ioat_chan - > reg_base + IOAT2_CHAINADDR_OFFSET_HIGH ) ;
}
2015-08-11 08:48:49 -07:00
/* IOAT Prep functions */
struct dma_async_tx_descriptor *
ioat_dma_prep_memcpy_lock ( struct dma_chan * c , dma_addr_t dma_dest ,
dma_addr_t dma_src , size_t len , unsigned long flags ) ;
2015-08-11 08:48:43 -07:00
struct dma_async_tx_descriptor *
ioat_prep_interrupt_lock ( struct dma_chan * c , unsigned long flags ) ;
struct dma_async_tx_descriptor *
ioat_prep_xor ( struct dma_chan * chan , dma_addr_t dest , dma_addr_t * src ,
unsigned int src_cnt , size_t len , unsigned long flags ) ;
struct dma_async_tx_descriptor *
ioat_prep_xor_val ( struct dma_chan * chan , dma_addr_t * src ,
unsigned int src_cnt , size_t len ,
enum sum_check_flags * result , unsigned long flags ) ;
struct dma_async_tx_descriptor *
ioat_prep_pq ( struct dma_chan * chan , dma_addr_t * dst , dma_addr_t * src ,
unsigned int src_cnt , const unsigned char * scf , size_t len ,
unsigned long flags ) ;
struct dma_async_tx_descriptor *
ioat_prep_pq_val ( struct dma_chan * chan , dma_addr_t * pq , dma_addr_t * src ,
unsigned int src_cnt , const unsigned char * scf , size_t len ,
enum sum_check_flags * pqres , unsigned long flags ) ;
struct dma_async_tx_descriptor *
ioat_prep_pqxor ( struct dma_chan * chan , dma_addr_t dst , dma_addr_t * src ,
unsigned int src_cnt , size_t len , unsigned long flags ) ;
struct dma_async_tx_descriptor *
ioat_prep_pqxor_val ( struct dma_chan * chan , dma_addr_t * src ,
unsigned int src_cnt , size_t len ,
enum sum_check_flags * result , unsigned long flags ) ;
2015-08-11 08:48:49 -07:00
/* IOAT Operation functions */
irqreturn_t ioat_dma_do_interrupt ( int irq , void * data ) ;
irqreturn_t ioat_dma_do_interrupt_msix ( int irq , void * data ) ;
struct ioat_ring_ent * *
ioat_alloc_ring ( struct dma_chan * c , int order , gfp_t flags ) ;
void ioat_start_null_desc ( struct ioatdma_chan * ioat_chan ) ;
void ioat_free_ring_ent ( struct ioat_ring_ent * desc , struct dma_chan * chan ) ;
int ioat_reset_hw ( struct ioatdma_chan * ioat_chan ) ;
2015-08-11 08:48:43 -07:00
enum dma_status
ioat_tx_status ( struct dma_chan * c , dma_cookie_t cookie ,
struct dma_tx_state * txstate ) ;
2020-08-31 16:05:16 +05:30
void ioat_cleanup_event ( struct tasklet_struct * t ) ;
2017-10-24 03:02:23 -07:00
void ioat_timer_event ( struct timer_list * t ) ;
2015-08-11 08:48:32 -07:00
int ioat_check_space_lock ( struct ioatdma_chan * ioat_chan , int num_descs ) ;
void ioat_issue_pending ( struct dma_chan * chan ) ;
2015-08-11 08:48:49 -07:00
/* IOAT Init functions */
bool is_bwd_ioat ( struct pci_dev * pdev ) ;
2015-08-11 08:48:55 -07:00
struct dca_provider * ioat_dca_init ( struct pci_dev * pdev , void __iomem * iobase ) ;
2015-08-11 08:48:49 -07:00
void ioat_kobject_add ( struct ioatdma_device * ioat_dma , struct kobj_type * type ) ;
void ioat_kobject_del ( struct ioatdma_device * ioat_dma ) ;
int ioat_dma_setup_interrupts ( struct ioatdma_device * ioat_dma ) ;
void ioat_stop ( struct ioatdma_chan * ioat_chan ) ;
2006-05-23 17:35:34 -07:00
# endif /* IOATDMA_H */