2006-05-23 17:35:34 -07:00
/*
2009-02-26 11:05:43 +01:00
* Copyright ( c ) 2004 - 2009 Intel Corporation . All rights reserved .
2006-05-23 17:35:34 -07:00
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation ; either version 2 of the License , or ( at your option )
* any later version .
*
* This program is distributed in the hope that it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License along with
* this program ; if not , write to the Free Software Foundation , Inc . , 59
* Temple Place - Suite 330 , Boston , MA 02111 - 1307 , USA .
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING .
*/
# ifndef IOATDMA_H
# define IOATDMA_H
# include <linux/dmaengine.h>
2009-07-28 14:32:12 -07:00
# include "hw.h"
2009-09-08 12:01:49 -07:00
# include "registers.h"
2006-05-23 17:35:34 -07:00
# include <linux/init.h>
# include <linux/dmapool.h>
# include <linux/cache.h>
2006-05-23 17:39:49 -07:00
# include <linux/pci_ids.h>
2008-07-22 17:30:57 -07:00
# include <net/tcp.h>
2006-05-23 17:35:34 -07:00
2009-09-10 11:27:36 -07:00
# define IOAT_DMA_VERSION "4.00"
2007-10-18 03:07:13 -07:00
2006-05-23 17:35:34 -07:00
# define IOAT_LOW_COMPLETION_MASK 0xffffffc0
2007-11-14 16:59:51 -08:00
# define IOAT_DMA_DCA_ANY_CPU ~0
2009-09-08 17:29:02 -07:00
# define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
# define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
2009-07-28 14:33:42 -07:00
# define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd)
# define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev)
2009-09-08 17:29:02 -07:00
# define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
/*
* workaround for IOAT ver .3 .0 null descriptor issue
* ( channel returns error when size is 0 )
*/
# define NULL_DESC_BUFFER_SIZE 1
2006-05-23 17:35:34 -07:00
/**
2007-10-16 01:27:39 -07:00
* struct ioatdma_device - internal representation of a IOAT device
2006-05-23 17:35:34 -07:00
* @ pdev : PCI - Express device
* @ reg_base : MMIO register space base address
* @ dma_pool : for allocating DMA descriptors
* @ common : embedded struct dma_device
2007-10-16 01:27:39 -07:00
* @ version : version of ioatdma device
2007-11-14 16:59:51 -08:00
* @ msix_entries : irq handlers
* @ idx : per channel data
2009-07-28 14:42:38 -07:00
* @ dca : direct cache access context
* @ intr_quirk : interrupt setup quirk ( for ioat_v1 devices )
2009-08-26 13:01:44 -07:00
* @ enumerate_channels : hw version specific channel enumeration
2009-09-08 17:42:55 -07:00
* @ cleanup_tasklet : select between the v2 and v3 cleanup routines
* @ timer_fn : select between the v2 and v3 timer watchdog routines
2009-09-08 17:42:58 -07:00
* @ self_test : hardware version specific self test for each supported op type
2009-09-08 17:42:55 -07:00
*
* Note : the v3 cleanup routine supports raid operations
2006-05-23 17:35:34 -07:00
*/
2007-10-16 01:27:39 -07:00
struct ioatdma_device {
2006-05-23 17:35:34 -07:00
struct pci_dev * pdev ;
2006-10-10 22:45:47 +01:00
void __iomem * reg_base ;
2006-05-23 17:35:34 -07:00
struct pci_pool * dma_pool ;
struct pci_pool * completion_pool ;
struct dma_device common ;
2007-10-16 01:27:39 -07:00
u8 version ;
2007-10-16 01:27:40 -07:00
struct msix_entry msix_entries [ 4 ] ;
2009-07-28 14:44:50 -07:00
struct ioat_chan_common * idx [ 4 ] ;
2009-07-28 14:42:38 -07:00
struct dca_provider * dca ;
void ( * intr_quirk ) ( struct ioatdma_device * device ) ;
2009-08-26 13:01:44 -07:00
int ( * enumerate_channels ) ( struct ioatdma_device * device ) ;
2009-09-08 17:42:55 -07:00
void ( * cleanup_tasklet ) ( unsigned long data ) ;
void ( * timer_fn ) ( unsigned long data ) ;
2009-09-08 17:42:58 -07:00
int ( * self_test ) ( struct ioatdma_device * device ) ;
2006-05-23 17:35:34 -07:00
} ;
2009-07-28 14:44:50 -07:00
struct ioat_chan_common {
2009-09-08 12:01:49 -07:00
struct dma_chan common ;
2006-10-10 22:45:47 +01:00
void __iomem * reg_base ;
2006-05-23 17:35:34 -07:00
unsigned long last_completion ;
spinlock_t cleanup_lock ;
2009-07-28 14:44:50 -07:00
dma_cookie_t completed_cookie ;
2009-09-08 12:01:49 -07:00
unsigned long state ;
# define IOAT_COMPLETION_PENDING 0
# define IOAT_COMPLETION_ACK 1
# define IOAT_RESET_PENDING 2
2009-09-08 17:42:56 -07:00
# define IOAT_KOBJ_INIT_FAIL 3
2009-09-08 12:01:49 -07:00
struct timer_list timer ;
# define COMPLETION_TIMEOUT msecs_to_jiffies(100)
2009-09-08 12:02:01 -07:00
# define IDLE_TIMEOUT msecs_to_jiffies(2000)
2009-09-08 12:01:49 -07:00
# define RESET_DELAY msecs_to_jiffies(100)
2007-10-16 01:27:39 -07:00
struct ioatdma_device * device ;
2009-09-08 12:01:04 -07:00
dma_addr_t completion_dma ;
u64 * completion ;
2007-10-16 01:27:40 -07:00
struct tasklet_struct cleanup_task ;
2009-09-08 17:42:56 -07:00
struct kobject kobj ;
2006-05-23 17:35:34 -07:00
} ;
2009-09-08 17:42:56 -07:00
struct ioat_sysfs_entry {
struct attribute attr ;
ssize_t ( * show ) ( struct dma_chan * , char * ) ;
} ;
2009-08-26 13:01:44 -07:00
2009-07-28 14:44:50 -07:00
/**
* struct ioat_dma_chan - internal representation of a DMA channel
*/
struct ioat_dma_chan {
struct ioat_chan_common base ;
size_t xfercap ; /* XFERCAP register value expanded out */
spinlock_t desc_lock ;
struct list_head free_desc ;
struct list_head used_desc ;
int pending ;
u16 desccount ;
2009-09-08 17:42:56 -07:00
u16 active ;
2009-07-28 14:44:50 -07:00
} ;
static inline struct ioat_chan_common * to_chan_common ( struct dma_chan * c )
{
return container_of ( c , struct ioat_chan_common , common ) ;
}
static inline struct ioat_dma_chan * to_ioat_chan ( struct dma_chan * c )
{
struct ioat_chan_common * chan = to_chan_common ( c ) ;
return container_of ( chan , struct ioat_dma_chan , base ) ;
}
2009-08-26 13:01:44 -07:00
/**
* ioat_is_complete - poll the status of an ioat transaction
* @ c : channel handle
* @ cookie : transaction identifier
* @ done : if set , updated with last completed transaction
* @ used : if set , updated with last used transaction
*/
static inline enum dma_status
ioat_is_complete ( struct dma_chan * c , dma_cookie_t cookie ,
dma_cookie_t * done , dma_cookie_t * used )
{
struct ioat_chan_common * chan = to_chan_common ( c ) ;
dma_cookie_t last_used ;
dma_cookie_t last_complete ;
last_used = c - > cookie ;
last_complete = chan - > completed_cookie ;
if ( done )
* done = last_complete ;
if ( used )
* used = last_used ;
return dma_async_is_complete ( cookie , last_complete , last_used ) ;
}
2006-05-23 17:35:34 -07:00
/* wrapper around hardware descriptor format + additional software fields */
/**
* struct ioat_desc_sw - wrapper around hardware descriptor
2009-09-08 17:42:54 -07:00
* @ hw : hardware DMA descriptor ( for memcpy )
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 11:10:43 -07:00
* @ node : this descriptor will either be on the free list ,
2009-09-08 17:53:02 -07:00
* or attached to a transaction list ( tx_list )
2009-07-28 14:33:42 -07:00
* @ txd : the generic software descriptor for all engines
2009-09-08 12:00:55 -07:00
* @ id : identifier for debug
2006-05-23 17:35:34 -07:00
*/
struct ioat_desc_sw {
struct ioat_dma_descriptor * hw ;
struct list_head node ;
2007-10-18 03:07:14 -07:00
size_t len ;
2009-09-08 17:53:02 -07:00
struct list_head tx_list ;
2009-07-28 14:33:42 -07:00
struct dma_async_tx_descriptor txd ;
2009-09-08 12:00:55 -07:00
# ifdef DEBUG
int id ;
# endif
2006-05-23 17:35:34 -07:00
} ;
2009-09-08 12:00:55 -07:00
# ifdef DEBUG
# define set_desc_id(desc, i) ((desc)->id = (i))
# define desc_id(desc) ((desc)->id)
# else
# define set_desc_id(desc, i)
# define desc_id(desc) (0)
# endif
static inline void
__dump_desc_dbg ( struct ioat_chan_common * chan , struct ioat_dma_descriptor * hw ,
struct dma_async_tx_descriptor * tx , int id )
{
struct device * dev = to_dev ( chan ) ;
dev_dbg ( dev , " desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x "
" ctl: %#x (op: %d int_en: %d compl: %d) \n " , id ,
( unsigned long long ) tx - > phys ,
( unsigned long long ) hw - > next , tx - > cookie , tx - > flags ,
hw - > ctl , hw - > ctl_f . op , hw - > ctl_f . int_en , hw - > ctl_f . compl_write ) ;
}
# define dump_desc_dbg(c, d) \
( { if ( d ) __dump_desc_dbg ( & c - > base , d - > hw , & d - > txd , desc_id ( d ) ) ; 0 ; } )
2009-07-28 14:42:38 -07:00
static inline void ioat_set_tcp_copy_break ( unsigned long copybreak )
2008-07-22 17:30:57 -07:00
{
# ifdef CONFIG_NET_DMA
2009-07-28 14:42:38 -07:00
sysctl_tcp_dma_copybreak = copybreak ;
2008-07-22 17:30:57 -07:00
# endif
}
2009-08-26 13:01:44 -07:00
static inline struct ioat_chan_common *
ioat_chan_by_index ( struct ioatdma_device * device , int index )
{
return device - > idx [ index ] ;
}
2009-09-08 12:01:49 -07:00
static inline u64 ioat_chansts ( struct ioat_chan_common * chan )
{
u8 ver = chan - > device - > version ;
u64 status ;
u32 status_lo ;
/* We need to read the low address first as this causes the
* chipset to latch the upper bits for the subsequent read
*/
status_lo = readl ( chan - > reg_base + IOAT_CHANSTS_OFFSET_LOW ( ver ) ) ;
status = readl ( chan - > reg_base + IOAT_CHANSTS_OFFSET_HIGH ( ver ) ) ;
status < < = 32 ;
status | = status_lo ;
return status ;
}
static inline void ioat_start ( struct ioat_chan_common * chan )
{
u8 ver = chan - > device - > version ;
writeb ( IOAT_CHANCMD_START , chan - > reg_base + IOAT_CHANCMD_OFFSET ( ver ) ) ;
}
static inline u64 ioat_chansts_to_addr ( u64 status )
{
return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR ;
}
static inline u32 ioat_chanerr ( struct ioat_chan_common * chan )
{
return readl ( chan - > reg_base + IOAT_CHANERR_OFFSET ) ;
}
static inline void ioat_suspend ( struct ioat_chan_common * chan )
{
u8 ver = chan - > device - > version ;
writeb ( IOAT_CHANCMD_SUSPEND , chan - > reg_base + IOAT_CHANCMD_OFFSET ( ver ) ) ;
}
static inline void ioat_set_chainaddr ( struct ioat_dma_chan * ioat , u64 addr )
{
struct ioat_chan_common * chan = & ioat - > base ;
writel ( addr & 0x00000000FFFFFFFF ,
chan - > reg_base + IOAT1_CHAINADDR_OFFSET_LOW ) ;
writel ( addr > > 32 ,
chan - > reg_base + IOAT1_CHAINADDR_OFFSET_HIGH ) ;
}
static inline bool is_ioat_active ( unsigned long status )
{
return ( ( status & IOAT_CHANSTS_STATUS ) = = IOAT_CHANSTS_ACTIVE ) ;
}
static inline bool is_ioat_idle ( unsigned long status )
{
return ( ( status & IOAT_CHANSTS_STATUS ) = = IOAT_CHANSTS_DONE ) ;
}
static inline bool is_ioat_halted ( unsigned long status )
{
return ( ( status & IOAT_CHANSTS_STATUS ) = = IOAT_CHANSTS_HALTED ) ;
}
static inline bool is_ioat_suspended ( unsigned long status )
{
return ( ( status & IOAT_CHANSTS_STATUS ) = = IOAT_CHANSTS_SUSPENDED ) ;
}
/* channel was fatally programmed */
static inline bool is_ioat_bug ( unsigned long err )
{
2009-11-19 17:10:07 -07:00
return ! ! err ;
2009-09-08 12:01:49 -07:00
}
2009-09-08 17:42:55 -07:00
static inline void ioat_unmap ( struct pci_dev * pdev , dma_addr_t addr , size_t len ,
int direction , enum dma_ctrl_flags flags , bool dst )
{
if ( ( dst & & ( flags & DMA_COMPL_DEST_UNMAP_SINGLE ) ) | |
( ! dst & & ( flags & DMA_COMPL_SRC_UNMAP_SINGLE ) ) )
pci_unmap_single ( pdev , addr , len , direction ) ;
else
pci_unmap_page ( pdev , addr , len , direction ) ;
}
2009-09-08 12:01:30 -07:00
int __devinit ioat_probe ( struct ioatdma_device * device ) ;
int __devinit ioat_register ( struct ioatdma_device * device ) ;
int __devinit ioat1_dma_probe ( struct ioatdma_device * dev , int dca ) ;
2009-09-08 17:42:58 -07:00
int __devinit ioat_dma_self_test ( struct ioatdma_device * device ) ;
2009-09-08 12:01:30 -07:00
void __devexit ioat_dma_remove ( struct ioatdma_device * device ) ;
struct dca_provider * __devinit ioat_dca_init ( struct pci_dev * pdev ,
void __iomem * iobase ) ;
2009-08-26 13:01:44 -07:00
unsigned long ioat_get_current_completion ( struct ioat_chan_common * chan ) ;
void ioat_init_channel ( struct ioatdma_device * device ,
struct ioat_chan_common * chan , int idx ,
2009-09-08 12:01:49 -07:00
void ( * timer_fn ) ( unsigned long ) ,
void ( * tasklet ) ( unsigned long ) ,
unsigned long ioat ) ;
2009-08-26 13:01:44 -07:00
void ioat_dma_unmap ( struct ioat_chan_common * chan , enum dma_ctrl_flags flags ,
size_t len , struct ioat_dma_descriptor * hw ) ;
2009-09-08 12:01:49 -07:00
bool ioat_cleanup_preamble ( struct ioat_chan_common * chan ,
unsigned long * phys_complete ) ;
2009-09-08 17:42:56 -07:00
void ioat_kobject_add ( struct ioatdma_device * device , struct kobj_type * type ) ;
void ioat_kobject_del ( struct ioatdma_device * device ) ;
extern struct sysfs_ops ioat_sysfs_ops ;
extern struct ioat_sysfs_entry ioat_version_attr ;
extern struct ioat_sysfs_entry ioat_cap_attr ;
2006-05-23 17:35:34 -07:00
# endif /* IOATDMA_H */