2006-05-24 04:18:44 +04:00
/*
* Copyright ( c ) 2004 - 2006 Intel Corporation . All rights reserved .
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation ; either version 2 of the License , or ( at your option )
* any later version .
*
* This program is distributed in the hope that it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License along with
* this program ; if not , write to the Free Software Foundation , Inc . , 59
* Temple Place - Suite 330 , Boston , MA 02111 - 1307 , USA .
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING .
*/
/*
* This code implements the DMA subsystem . It provides a HW - neutral interface
* for other kernel code to use asynchronous memory copy capabilities ,
* if present , and allows different HW DMA drivers to register as providing
* this capability .
*
* Due to the fact we are accelerating what is already a relatively fast
* operation , the code goes to great lengths to avoid additional overhead ,
* such as locking .
*
* LOCKING :
*
* The subsystem keeps two global lists , dma_device_list and dma_client_list .
* Both of these are protected by a mutex , dma_list_mutex .
*
* Each device has a channels list , which runs unlocked but is never modified
* once the device is registered , it ' s just setup by the driver .
*
2007-07-09 22:56:42 +04:00
* Each client is responsible for keeping track of the channels it uses . See
* the definition of dma_event_callback in dmaengine . h .
2006-05-24 04:18:44 +04:00
*
* Each device has a kref , which is initialized to 1 when the device is
2007-07-09 22:56:42 +04:00
* registered . A kref_get is done for each class_device registered . When the
2006-05-24 04:18:44 +04:00
* class_device is released , the coresponding kref_put is done in the release
* method . Every time one of the device ' s channels is allocated to a client ,
* a kref_get occurs . When the channel is freed , the coresponding kref_put
* happens . The device ' s release function does a completion , so
* unregister_device does a remove event , class_device_unregister , a kref_put
* for the first reference , then waits on the completion for all other
* references to finish .
*
* Each channel has an open - coded implementation of Rusty Russell ' s " bigref, "
2007-07-09 22:56:42 +04:00
* with a kref and a per_cpu local_t . A dma_chan_get is called when a client
* signals that it wants to use a channel , and dma_chan_put is called when
* a channel is removed or a client using it is unregesitered . A client can
* take extra references per outstanding transaction , as is the case with
* the NET DMA client . The release function does a kref_put on the device .
* - ChrisL , DanW
2006-05-24 04:18:44 +04:00
*/
# include <linux/init.h>
# include <linux/module.h>
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 21:10:43 +03:00
# include <linux/mm.h>
2006-05-24 04:18:44 +04:00
# include <linux/device.h>
# include <linux/dmaengine.h>
# include <linux/hardirq.h>
# include <linux/spinlock.h>
# include <linux/percpu.h>
# include <linux/rcupdate.h>
# include <linux/mutex.h>
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 21:10:43 +03:00
# include <linux/jiffies.h>
2006-05-24 04:18:44 +04:00
static DEFINE_MUTEX ( dma_list_mutex ) ;
static LIST_HEAD ( dma_device_list ) ;
static LIST_HEAD ( dma_client_list ) ;
/* --- sysfs implementation --- */
static ssize_t show_memcpy_count ( struct class_device * cd , char * buf )
{
struct dma_chan * chan = container_of ( cd , struct dma_chan , class_dev ) ;
unsigned long count = 0 ;
int i ;
2006-05-26 00:26:53 +04:00
for_each_possible_cpu ( i )
2006-05-24 04:18:44 +04:00
count + = per_cpu_ptr ( chan - > local , i ) - > memcpy_count ;
return sprintf ( buf , " %lu \n " , count ) ;
}
static ssize_t show_bytes_transferred ( struct class_device * cd , char * buf )
{
struct dma_chan * chan = container_of ( cd , struct dma_chan , class_dev ) ;
unsigned long count = 0 ;
int i ;
2006-05-26 00:26:53 +04:00
for_each_possible_cpu ( i )
2006-05-24 04:18:44 +04:00
count + = per_cpu_ptr ( chan - > local , i ) - > bytes_transferred ;
return sprintf ( buf , " %lu \n " , count ) ;
}
static ssize_t show_in_use ( struct class_device * cd , char * buf )
{
struct dma_chan * chan = container_of ( cd , struct dma_chan , class_dev ) ;
2007-07-09 22:56:42 +04:00
int in_use = 0 ;
if ( unlikely ( chan - > slow_ref ) & &
atomic_read ( & chan - > refcount . refcount ) > 1 )
in_use = 1 ;
else {
if ( local_read ( & ( per_cpu_ptr ( chan - > local ,
get_cpu ( ) ) - > refcount ) ) > 0 )
in_use = 1 ;
put_cpu ( ) ;
}
2006-05-24 04:18:44 +04:00
2007-07-09 22:56:42 +04:00
return sprintf ( buf , " %d \n " , in_use ) ;
2006-05-24 04:18:44 +04:00
}
static struct class_device_attribute dma_class_attrs [ ] = {
__ATTR ( memcpy_count , S_IRUGO , show_memcpy_count , NULL ) ,
__ATTR ( bytes_transferred , S_IRUGO , show_bytes_transferred , NULL ) ,
__ATTR ( in_use , S_IRUGO , show_in_use , NULL ) ,
__ATTR_NULL
} ;
static void dma_async_device_cleanup ( struct kref * kref ) ;
static void dma_class_dev_release ( struct class_device * cd )
{
struct dma_chan * chan = container_of ( cd , struct dma_chan , class_dev ) ;
kref_put ( & chan - > device - > refcount , dma_async_device_cleanup ) ;
}
static struct class dma_devclass = {
. name = " dma " ,
. class_dev_attrs = dma_class_attrs ,
. release = dma_class_dev_release ,
} ;
/* --- client and device registration --- */
2007-07-09 22:56:42 +04:00
# define dma_chan_satisfies_mask(chan, mask) \
__dma_chan_satisfies_mask ( ( chan ) , & ( mask ) )
static int
__dma_chan_satisfies_mask ( struct dma_chan * chan , dma_cap_mask_t * want )
{
dma_cap_mask_t has ;
bitmap_and ( has . bits , want - > bits , chan - > device - > cap_mask . bits ,
DMA_TX_TYPE_END ) ;
return bitmap_equal ( want - > bits , has . bits , DMA_TX_TYPE_END ) ;
}
2006-05-24 04:18:44 +04:00
/**
2007-07-09 22:56:42 +04:00
* dma_client_chan_alloc - try to allocate channels to a client
2006-05-24 04:18:44 +04:00
* @ client : & dma_client
*
* Called with dma_list_mutex held .
*/
2007-07-09 22:56:42 +04:00
static void dma_client_chan_alloc ( struct dma_client * client )
2006-05-24 04:18:44 +04:00
{
struct dma_device * device ;
struct dma_chan * chan ;
int desc ; /* allocated descriptor count */
2007-07-09 22:56:42 +04:00
enum dma_state_client ack ;
2006-05-24 04:18:44 +04:00
2007-07-09 22:56:42 +04:00
/* Find a channel */
list_for_each_entry ( device , & dma_device_list , global_node )
2006-05-24 04:18:44 +04:00
list_for_each_entry ( chan , & device - > channels , device_node ) {
2007-07-09 22:56:42 +04:00
if ( ! dma_chan_satisfies_mask ( chan , client - > cap_mask ) )
2006-05-24 04:18:44 +04:00
continue ;
desc = chan - > device - > device_alloc_chan_resources ( chan ) ;
if ( desc > = 0 ) {
2007-07-09 22:56:42 +04:00
ack = client - > event_callback ( client ,
chan ,
DMA_RESOURCE_AVAILABLE ) ;
/* we are done once this client rejects
* an available resource
*/
2007-11-15 03:59:27 +03:00
if ( ack = = DMA_ACK )
2007-07-09 22:56:42 +04:00
dma_chan_get ( chan ) ;
2007-11-15 03:59:27 +03:00
else if ( ack = = DMA_NAK )
2007-07-09 22:56:42 +04:00
return ;
2006-05-24 04:18:44 +04:00
}
}
}
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 21:10:43 +03:00
enum dma_status dma_sync_wait ( struct dma_chan * chan , dma_cookie_t cookie )
{
enum dma_status status ;
unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies ( 5000 ) ;
dma_async_issue_pending ( chan ) ;
do {
status = dma_async_is_tx_complete ( chan , cookie , NULL , NULL ) ;
if ( time_after_eq ( jiffies , dma_sync_wait_timeout ) ) {
printk ( KERN_ERR " dma_sync_wait_timeout! \n " ) ;
return DMA_ERROR ;
}
} while ( status = = DMA_IN_PROGRESS ) ;
return status ;
}
EXPORT_SYMBOL ( dma_sync_wait ) ;
2006-05-24 04:18:44 +04:00
/**
2006-07-04 06:45:31 +04:00
* dma_chan_cleanup - release a DMA channel ' s resources
* @ kref : kernel reference structure that contains the DMA channel device
2006-05-24 04:18:44 +04:00
*/
void dma_chan_cleanup ( struct kref * kref )
{
struct dma_chan * chan = container_of ( kref , struct dma_chan , refcount ) ;
chan - > device - > device_free_chan_resources ( chan ) ;
kref_put ( & chan - > device - > refcount , dma_async_device_cleanup ) ;
}
2007-03-17 00:38:05 +03:00
EXPORT_SYMBOL ( dma_chan_cleanup ) ;
2006-05-24 04:18:44 +04:00
static void dma_chan_free_rcu ( struct rcu_head * rcu )
{
struct dma_chan * chan = container_of ( rcu , struct dma_chan , rcu ) ;
int bias = 0x7FFFFFFF ;
int i ;
2006-05-26 00:26:53 +04:00
for_each_possible_cpu ( i )
2006-05-24 04:18:44 +04:00
bias - = local_read ( & per_cpu_ptr ( chan - > local , i ) - > refcount ) ;
atomic_sub ( bias , & chan - > refcount . refcount ) ;
kref_put ( & chan - > refcount , dma_chan_cleanup ) ;
}
2007-07-09 22:56:42 +04:00
static void dma_chan_release ( struct dma_chan * chan )
2006-05-24 04:18:44 +04:00
{
atomic_add ( 0x7FFFFFFF , & chan - > refcount . refcount ) ;
chan - > slow_ref = 1 ;
call_rcu ( & chan - > rcu , dma_chan_free_rcu ) ;
}
/**
2007-07-09 22:56:42 +04:00
* dma_chans_notify_available - broadcast available channels to the clients
2006-05-24 04:18:44 +04:00
*/
2007-07-09 22:56:42 +04:00
static void dma_clients_notify_available ( void )
2006-05-24 04:18:44 +04:00
{
struct dma_client * client ;
mutex_lock ( & dma_list_mutex ) ;
2007-07-09 22:56:42 +04:00
list_for_each_entry ( client , & dma_client_list , global_node )
dma_client_chan_alloc ( client ) ;
2006-05-24 04:18:44 +04:00
mutex_unlock ( & dma_list_mutex ) ;
}
/**
2007-07-09 22:56:42 +04:00
* dma_chans_notify_available - tell the clients that a channel is going away
* @ chan : channel on its way out
2006-05-24 04:18:44 +04:00
*/
2007-07-09 22:56:42 +04:00
static void dma_clients_notify_removed ( struct dma_chan * chan )
2006-05-24 04:18:44 +04:00
{
struct dma_client * client ;
2007-07-09 22:56:42 +04:00
enum dma_state_client ack ;
2006-05-24 04:18:44 +04:00
2007-07-09 22:56:42 +04:00
mutex_lock ( & dma_list_mutex ) ;
list_for_each_entry ( client , & dma_client_list , global_node ) {
ack = client - > event_callback ( client , chan ,
DMA_RESOURCE_REMOVED ) ;
/* client was holding resources for this channel so
* free it
*/
2007-11-15 03:59:27 +03:00
if ( ack = = DMA_ACK )
2007-07-09 22:56:42 +04:00
dma_chan_put ( chan ) ;
}
2006-05-24 04:18:44 +04:00
2007-07-09 22:56:42 +04:00
mutex_unlock ( & dma_list_mutex ) ;
}
2006-05-24 04:18:44 +04:00
2007-07-09 22:56:42 +04:00
/**
* dma_async_client_register - register a & dma_client
* @ client : ptr to a client structure with valid ' event_callback ' and ' cap_mask '
*/
void dma_async_client_register ( struct dma_client * client )
{
2006-05-24 04:18:44 +04:00
mutex_lock ( & dma_list_mutex ) ;
list_add_tail ( & client - > global_node , & dma_client_list ) ;
mutex_unlock ( & dma_list_mutex ) ;
}
2007-03-17 00:38:05 +03:00
EXPORT_SYMBOL ( dma_async_client_register ) ;
2006-05-24 04:18:44 +04:00
/**
* dma_async_client_unregister - unregister a client and free the & dma_client
2006-07-04 06:45:31 +04:00
* @ client : & dma_client to free
2006-05-24 04:18:44 +04:00
*
* Force frees any allocated DMA channels , frees the & dma_client memory
*/
void dma_async_client_unregister ( struct dma_client * client )
{
2007-07-09 22:56:42 +04:00
struct dma_device * device ;
2006-05-24 04:18:44 +04:00
struct dma_chan * chan ;
2007-07-09 22:56:42 +04:00
enum dma_state_client ack ;
2006-05-24 04:18:44 +04:00
if ( ! client )
return ;
mutex_lock ( & dma_list_mutex ) ;
2007-07-09 22:56:42 +04:00
/* free all channels the client is holding */
list_for_each_entry ( device , & dma_device_list , global_node )
list_for_each_entry ( chan , & device - > channels , device_node ) {
ack = client - > event_callback ( client , chan ,
DMA_RESOURCE_REMOVED ) ;
2007-11-15 03:59:27 +03:00
if ( ack = = DMA_ACK )
2007-07-09 22:56:42 +04:00
dma_chan_put ( chan ) ;
}
2006-05-24 04:18:44 +04:00
list_del ( & client - > global_node ) ;
mutex_unlock ( & dma_list_mutex ) ;
}
2007-03-17 00:38:05 +03:00
EXPORT_SYMBOL ( dma_async_client_unregister ) ;
2006-05-24 04:18:44 +04:00
/**
2007-07-09 22:56:42 +04:00
* dma_async_client_chan_request - send all available channels to the
* client that satisfy the capability mask
* @ client - requester
2006-05-24 04:18:44 +04:00
*/
2007-07-09 22:56:42 +04:00
void dma_async_client_chan_request ( struct dma_client * client )
2006-05-24 04:18:44 +04:00
{
2007-07-09 22:56:42 +04:00
mutex_lock ( & dma_list_mutex ) ;
dma_client_chan_alloc ( client ) ;
mutex_unlock ( & dma_list_mutex ) ;
2006-05-24 04:18:44 +04:00
}
2007-03-17 00:38:05 +03:00
EXPORT_SYMBOL ( dma_async_client_chan_request ) ;
2006-05-24 04:18:44 +04:00
/**
2006-07-04 06:45:31 +04:00
* dma_async_device_register - registers DMA devices found
2006-05-24 04:18:44 +04:00
* @ device : & dma_device
*/
int dma_async_device_register ( struct dma_device * device )
{
static int id ;
2007-03-08 20:57:34 +03:00
int chancnt = 0 , rc ;
2006-05-24 04:18:44 +04:00
struct dma_chan * chan ;
if ( ! device )
return - ENODEV ;
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 21:10:43 +03:00
/* validate device routines */
BUG_ON ( dma_has_cap ( DMA_MEMCPY , device - > cap_mask ) & &
! device - > device_prep_dma_memcpy ) ;
BUG_ON ( dma_has_cap ( DMA_XOR , device - > cap_mask ) & &
! device - > device_prep_dma_xor ) ;
BUG_ON ( dma_has_cap ( DMA_ZERO_SUM , device - > cap_mask ) & &
! device - > device_prep_dma_zero_sum ) ;
BUG_ON ( dma_has_cap ( DMA_MEMSET , device - > cap_mask ) & &
! device - > device_prep_dma_memset ) ;
BUG_ON ( dma_has_cap ( DMA_ZERO_SUM , device - > cap_mask ) & &
! device - > device_prep_dma_interrupt ) ;
BUG_ON ( ! device - > device_alloc_chan_resources ) ;
BUG_ON ( ! device - > device_free_chan_resources ) ;
BUG_ON ( ! device - > device_dependency_added ) ;
BUG_ON ( ! device - > device_is_tx_complete ) ;
BUG_ON ( ! device - > device_issue_pending ) ;
BUG_ON ( ! device - > dev ) ;
2006-05-24 04:18:44 +04:00
init_completion ( & device - > done ) ;
kref_init ( & device - > refcount ) ;
device - > dev_id = id + + ;
/* represent channels in sysfs. Probably want devs too */
list_for_each_entry ( chan , & device - > channels , device_node ) {
chan - > local = alloc_percpu ( typeof ( * chan - > local ) ) ;
if ( chan - > local = = NULL )
continue ;
chan - > chan_id = chancnt + + ;
chan - > class_dev . class = & dma_devclass ;
chan - > class_dev . dev = NULL ;
snprintf ( chan - > class_dev . class_id , BUS_ID_SIZE , " dma%dchan%d " ,
device - > dev_id , chan - > chan_id ) ;
2007-03-08 20:57:34 +03:00
rc = class_device_register ( & chan - > class_dev ) ;
if ( rc ) {
chancnt - - ;
free_percpu ( chan - > local ) ;
chan - > local = NULL ;
goto err_out ;
}
2007-11-15 03:59:27 +03:00
/* One for the channel, one of the class device */
kref_get ( & device - > refcount ) ;
2006-05-24 04:18:44 +04:00
kref_get ( & device - > refcount ) ;
2007-07-09 22:56:42 +04:00
kref_init ( & chan - > refcount ) ;
chan - > slow_ref = 0 ;
INIT_RCU_HEAD ( & chan - > rcu ) ;
2006-05-24 04:18:44 +04:00
}
mutex_lock ( & dma_list_mutex ) ;
list_add_tail ( & device - > global_node , & dma_device_list ) ;
mutex_unlock ( & dma_list_mutex ) ;
2007-07-09 22:56:42 +04:00
dma_clients_notify_available ( ) ;
2006-05-24 04:18:44 +04:00
return 0 ;
2007-03-08 20:57:34 +03:00
err_out :
list_for_each_entry ( chan , & device - > channels , device_node ) {
if ( chan - > local = = NULL )
continue ;
kref_put ( & device - > refcount , dma_async_device_cleanup ) ;
class_device_unregister ( & chan - > class_dev ) ;
chancnt - - ;
free_percpu ( chan - > local ) ;
}
return rc ;
2006-05-24 04:18:44 +04:00
}
2007-03-17 00:38:05 +03:00
EXPORT_SYMBOL ( dma_async_device_register ) ;
2006-05-24 04:18:44 +04:00
/**
2006-07-04 06:45:31 +04:00
* dma_async_device_cleanup - function called when all references are released
* @ kref : kernel reference object
2006-05-24 04:18:44 +04:00
*/
static void dma_async_device_cleanup ( struct kref * kref )
{
struct dma_device * device ;
device = container_of ( kref , struct dma_device , refcount ) ;
complete ( & device - > done ) ;
}
2006-07-04 06:45:31 +04:00
/**
* dma_async_device_unregister - unregisters DMA devices
* @ device : & dma_device
*/
void dma_async_device_unregister ( struct dma_device * device )
2006-05-24 04:18:44 +04:00
{
struct dma_chan * chan ;
mutex_lock ( & dma_list_mutex ) ;
list_del ( & device - > global_node ) ;
mutex_unlock ( & dma_list_mutex ) ;
list_for_each_entry ( chan , & device - > channels , device_node ) {
2007-07-09 22:56:42 +04:00
dma_clients_notify_removed ( chan ) ;
2006-05-24 04:18:44 +04:00
class_device_unregister ( & chan - > class_dev ) ;
2007-07-09 22:56:42 +04:00
dma_chan_release ( chan ) ;
2006-05-24 04:18:44 +04:00
}
kref_put ( & device - > refcount , dma_async_device_cleanup ) ;
wait_for_completion ( & device - > done ) ;
}
2007-03-17 00:38:05 +03:00
EXPORT_SYMBOL ( dma_async_device_unregister ) ;
2006-05-24 04:18:44 +04:00
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 21:10:43 +03:00
/**
* dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
* @ chan : DMA channel to offload copy to
* @ dest : destination address ( virtual )
* @ src : source address ( virtual )
* @ len : length
*
* Both @ dest and @ src must be mappable to a bus address according to the
* DMA mapping API rules for streaming mappings .
* Both @ dest and @ src must stay memory resident ( kernel memory or locked
* user space pages ) .
*/
dma_cookie_t
dma_async_memcpy_buf_to_buf ( struct dma_chan * chan , void * dest ,
void * src , size_t len )
{
struct dma_device * dev = chan - > device ;
struct dma_async_tx_descriptor * tx ;
dma_addr_t addr ;
dma_cookie_t cookie ;
int cpu ;
tx = dev - > device_prep_dma_memcpy ( chan , len , 0 ) ;
if ( ! tx )
return - ENOMEM ;
tx - > ack = 1 ;
tx - > callback = NULL ;
addr = dma_map_single ( dev - > dev , src , len , DMA_TO_DEVICE ) ;
tx - > tx_set_src ( addr , tx , 0 ) ;
addr = dma_map_single ( dev - > dev , dest , len , DMA_FROM_DEVICE ) ;
tx - > tx_set_dest ( addr , tx , 0 ) ;
cookie = tx - > tx_submit ( tx ) ;
cpu = get_cpu ( ) ;
per_cpu_ptr ( chan - > local , cpu ) - > bytes_transferred + = len ;
per_cpu_ptr ( chan - > local , cpu ) - > memcpy_count + + ;
put_cpu ( ) ;
return cookie ;
}
EXPORT_SYMBOL ( dma_async_memcpy_buf_to_buf ) ;
/**
* dma_async_memcpy_buf_to_pg - offloaded copy from address to page
* @ chan : DMA channel to offload copy to
* @ page : destination page
* @ offset : offset in page to copy to
* @ kdata : source address ( virtual )
* @ len : length
*
* Both @ page / @ offset and @ kdata must be mappable to a bus address according
* to the DMA mapping API rules for streaming mappings .
* Both @ page / @ offset and @ kdata must stay memory resident ( kernel memory or
* locked user space pages )
*/
dma_cookie_t
dma_async_memcpy_buf_to_pg ( struct dma_chan * chan , struct page * page ,
unsigned int offset , void * kdata , size_t len )
{
struct dma_device * dev = chan - > device ;
struct dma_async_tx_descriptor * tx ;
dma_addr_t addr ;
dma_cookie_t cookie ;
int cpu ;
tx = dev - > device_prep_dma_memcpy ( chan , len , 0 ) ;
if ( ! tx )
return - ENOMEM ;
tx - > ack = 1 ;
tx - > callback = NULL ;
addr = dma_map_single ( dev - > dev , kdata , len , DMA_TO_DEVICE ) ;
tx - > tx_set_src ( addr , tx , 0 ) ;
addr = dma_map_page ( dev - > dev , page , offset , len , DMA_FROM_DEVICE ) ;
tx - > tx_set_dest ( addr , tx , 0 ) ;
cookie = tx - > tx_submit ( tx ) ;
cpu = get_cpu ( ) ;
per_cpu_ptr ( chan - > local , cpu ) - > bytes_transferred + = len ;
per_cpu_ptr ( chan - > local , cpu ) - > memcpy_count + + ;
put_cpu ( ) ;
return cookie ;
}
EXPORT_SYMBOL ( dma_async_memcpy_buf_to_pg ) ;
/**
* dma_async_memcpy_pg_to_pg - offloaded copy from page to page
* @ chan : DMA channel to offload copy to
* @ dest_pg : destination page
* @ dest_off : offset in page to copy to
* @ src_pg : source page
* @ src_off : offset in page to copy from
* @ len : length
*
* Both @ dest_page / @ dest_off and @ src_page / @ src_off must be mappable to a bus
* address according to the DMA mapping API rules for streaming mappings .
* Both @ dest_page / @ dest_off and @ src_page / @ src_off must stay memory resident
* ( kernel memory or locked user space pages ) .
*/
dma_cookie_t
dma_async_memcpy_pg_to_pg ( struct dma_chan * chan , struct page * dest_pg ,
unsigned int dest_off , struct page * src_pg , unsigned int src_off ,
size_t len )
{
struct dma_device * dev = chan - > device ;
struct dma_async_tx_descriptor * tx ;
dma_addr_t addr ;
dma_cookie_t cookie ;
int cpu ;
tx = dev - > device_prep_dma_memcpy ( chan , len , 0 ) ;
if ( ! tx )
return - ENOMEM ;
tx - > ack = 1 ;
tx - > callback = NULL ;
addr = dma_map_page ( dev - > dev , src_pg , src_off , len , DMA_TO_DEVICE ) ;
tx - > tx_set_src ( addr , tx , 0 ) ;
addr = dma_map_page ( dev - > dev , dest_pg , dest_off , len , DMA_FROM_DEVICE ) ;
tx - > tx_set_dest ( addr , tx , 0 ) ;
cookie = tx - > tx_submit ( tx ) ;
cpu = get_cpu ( ) ;
per_cpu_ptr ( chan - > local , cpu ) - > bytes_transferred + = len ;
per_cpu_ptr ( chan - > local , cpu ) - > memcpy_count + + ;
put_cpu ( ) ;
return cookie ;
}
EXPORT_SYMBOL ( dma_async_memcpy_pg_to_pg ) ;
void dma_async_tx_descriptor_init ( struct dma_async_tx_descriptor * tx ,
struct dma_chan * chan )
{
tx - > chan = chan ;
spin_lock_init ( & tx - > lock ) ;
INIT_LIST_HEAD ( & tx - > depend_node ) ;
INIT_LIST_HEAD ( & tx - > depend_list ) ;
}
EXPORT_SYMBOL ( dma_async_tx_descriptor_init ) ;
2006-05-24 04:18:44 +04:00
static int __init dma_bus_init ( void )
{
mutex_init ( & dma_list_mutex ) ;
return class_register ( & dma_devclass ) ;
}
subsys_initcall ( dma_bus_init ) ;