2012-04-13 15:07:23 +04:00
/*
* Virtual DMA channel support for DMAengine
*
* Copyright ( C ) 2012 Russell King
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/device.h>
# include <linux/dmaengine.h>
# include <linux/module.h>
# include <linux/spinlock.h>
# include "virt-dma.h"
static struct virt_dma_desc * to_virt_desc ( struct dma_async_tx_descriptor * tx )
{
return container_of ( tx , struct virt_dma_desc , tx ) ;
}
dma_cookie_t vchan_tx_submit ( struct dma_async_tx_descriptor * tx )
{
struct virt_dma_chan * vc = to_virt_chan ( tx - > chan ) ;
struct virt_dma_desc * vd = to_virt_desc ( tx ) ;
unsigned long flags ;
dma_cookie_t cookie ;
spin_lock_irqsave ( & vc - > lock , flags ) ;
cookie = dma_cookie_assign ( tx ) ;
2015-10-13 22:54:28 +03:00
list_move_tail ( & vd - > node , & vc - > desc_submitted ) ;
2012-04-13 15:07:23 +04:00
spin_unlock_irqrestore ( & vc - > lock , flags ) ;
dev_dbg ( vc - > chan . device - > dev , " vchan %p: txd %p[%x]: submitted \n " ,
vc , vd , cookie ) ;
return cookie ;
}
EXPORT_SYMBOL_GPL ( vchan_tx_submit ) ;
2015-10-13 22:54:28 +03:00
/**
* vchan_tx_desc_free - free a reusable descriptor
* @ tx : the transfer
*
* This function frees a previously allocated reusable descriptor . The only
* other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
* transfer .
*
* Returns 0 upon success
*/
int vchan_tx_desc_free ( struct dma_async_tx_descriptor * tx )
{
struct virt_dma_chan * vc = to_virt_chan ( tx - > chan ) ;
struct virt_dma_desc * vd = to_virt_desc ( tx ) ;
unsigned long flags ;
spin_lock_irqsave ( & vc - > lock , flags ) ;
list_del ( & vd - > node ) ;
spin_unlock_irqrestore ( & vc - > lock , flags ) ;
dev_dbg ( vc - > chan . device - > dev , " vchan %p: txd %p[%x]: freeing \n " ,
vc , vd , vd - > tx . cookie ) ;
vc - > desc_free ( vd ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( vchan_tx_desc_free ) ;
2012-05-11 02:39:27 +04:00
struct virt_dma_desc * vchan_find_desc ( struct virt_dma_chan * vc ,
dma_cookie_t cookie )
{
struct virt_dma_desc * vd ;
list_for_each_entry ( vd , & vc - > desc_issued , node )
if ( vd - > tx . cookie = = cookie )
return vd ;
return NULL ;
}
EXPORT_SYMBOL_GPL ( vchan_find_desc ) ;
2012-04-13 15:07:23 +04:00
/*
* This tasklet handles the completion of a DMA descriptor by
* calling its callback and freeing it .
*/
static void vchan_complete ( unsigned long arg )
{
struct virt_dma_chan * vc = ( struct virt_dma_chan * ) arg ;
2017-04-25 22:36:38 +03:00
struct virt_dma_desc * vd , * _vd ;
2016-07-20 23:13:33 +03:00
struct dmaengine_desc_callback cb ;
2012-04-13 15:07:23 +04:00
LIST_HEAD ( head ) ;
spin_lock_irq ( & vc - > lock ) ;
list_splice_tail_init ( & vc - > desc_completed , & head ) ;
2012-05-14 18:17:20 +04:00
vd = vc - > cyclic ;
if ( vd ) {
vc - > cyclic = NULL ;
2016-07-20 23:13:33 +03:00
dmaengine_desc_get_callback ( & vd - > tx , & cb ) ;
} else {
memset ( & cb , 0 , sizeof ( cb ) ) ;
2012-05-14 18:17:20 +04:00
}
2012-04-13 15:07:23 +04:00
spin_unlock_irq ( & vc - > lock ) ;
2016-07-20 23:13:33 +03:00
dmaengine_desc_callback_invoke ( & cb , NULL ) ;
2012-05-14 18:17:20 +04:00
2017-04-25 22:36:38 +03:00
list_for_each_entry_safe ( vd , _vd , & head , node ) {
2016-07-20 23:13:33 +03:00
dmaengine_desc_get_callback ( & vd - > tx , & cb ) ;
2012-04-13 15:07:23 +04:00
list_del ( & vd - > node ) ;
2015-10-13 22:54:28 +03:00
if ( dmaengine_desc_test_reuse ( & vd - > tx ) )
list_add ( & vd - > node , & vc - > desc_allocated ) ;
else
vc - > desc_free ( vd ) ;
2012-04-13 15:07:23 +04:00
2016-07-20 23:13:33 +03:00
dmaengine_desc_callback_invoke ( & cb , NULL ) ;
2012-04-13 15:07:23 +04:00
}
}
void vchan_dma_desc_free_list ( struct virt_dma_chan * vc , struct list_head * head )
{
2017-04-25 22:36:38 +03:00
struct virt_dma_desc * vd , * _vd ;
list_for_each_entry_safe ( vd , _vd , head , node ) {
2015-10-13 22:54:28 +03:00
if ( dmaengine_desc_test_reuse ( & vd - > tx ) ) {
list_move_tail ( & vd - > node , & vc - > desc_allocated ) ;
} else {
dev_dbg ( vc - > chan . device - > dev , " txd %p: freeing \n " , vd ) ;
list_del ( & vd - > node ) ;
vc - > desc_free ( vd ) ;
}
2012-04-13 15:07:23 +04:00
}
}
EXPORT_SYMBOL_GPL ( vchan_dma_desc_free_list ) ;
void vchan_init ( struct virt_dma_chan * vc , struct dma_device * dmadev )
{
dma_cookie_init ( & vc - > chan ) ;
spin_lock_init ( & vc - > lock ) ;
2015-10-13 22:54:28 +03:00
INIT_LIST_HEAD ( & vc - > desc_allocated ) ;
2012-04-13 15:07:23 +04:00
INIT_LIST_HEAD ( & vc - > desc_submitted ) ;
INIT_LIST_HEAD ( & vc - > desc_issued ) ;
INIT_LIST_HEAD ( & vc - > desc_completed ) ;
tasklet_init ( & vc - > task , vchan_complete , ( unsigned long ) vc ) ;
vc - > chan . device = dmadev ;
list_add_tail ( & vc - > chan . device_node , & dmadev - > channels ) ;
}
EXPORT_SYMBOL_GPL ( vchan_init ) ;
MODULE_AUTHOR ( " Russell King " ) ;
MODULE_LICENSE ( " GPL " ) ;