2019-06-04 11:11:33 +03:00
/* SPDX-License-Identifier: GPL-2.0-only */
2012-04-13 15:07:23 +04:00
/*
* Virtual DMA channel support for DMAengine
*
* Copyright ( C ) 2012 Russell King
*/
# ifndef VIRT_DMA_H
# define VIRT_DMA_H
# include <linux/dmaengine.h>
# include <linux/interrupt.h>
# include "dmaengine.h"
struct virt_dma_desc {
struct dma_async_tx_descriptor tx ;
2019-06-06 13:45:47 +03:00
struct dmaengine_result tx_result ;
2012-04-13 15:07:23 +04:00
/* protected by vc.lock */
struct list_head node ;
} ;
struct virt_dma_chan {
struct dma_chan chan ;
struct tasklet_struct task ;
void ( * desc_free ) ( struct virt_dma_desc * ) ;
spinlock_t lock ;
/* protected by vc.lock */
2015-10-13 22:54:28 +03:00
struct list_head desc_allocated ;
2012-04-13 15:07:23 +04:00
struct list_head desc_submitted ;
struct list_head desc_issued ;
struct list_head desc_completed ;
2019-12-16 13:53:23 +03:00
struct list_head desc_terminated ;
2012-05-14 18:17:20 +04:00
struct virt_dma_desc * cyclic ;
2012-04-13 15:07:23 +04:00
} ;
static inline struct virt_dma_chan * to_virt_chan ( struct dma_chan * chan )
{
return container_of ( chan , struct virt_dma_chan , chan ) ;
}
void vchan_dma_desc_free_list ( struct virt_dma_chan * vc , struct list_head * head ) ;
void vchan_init ( struct virt_dma_chan * vc , struct dma_device * dmadev ) ;
2012-05-11 02:39:27 +04:00
struct virt_dma_desc * vchan_find_desc ( struct virt_dma_chan * , dma_cookie_t ) ;
2016-09-24 07:37:05 +03:00
extern dma_cookie_t vchan_tx_submit ( struct dma_async_tx_descriptor * ) ;
extern int vchan_tx_desc_free ( struct dma_async_tx_descriptor * ) ;
2012-04-13 15:07:23 +04:00
/**
* vchan_tx_prep - prepare a descriptor
2015-10-20 14:14:45 +03:00
* @ vc : virtual channel allocating this descriptor
* @ vd : virtual descriptor to prepare
* @ tx_flags : flags argument passed in to prepare function
2012-04-13 15:07:23 +04:00
*/
static inline struct dma_async_tx_descriptor * vchan_tx_prep ( struct virt_dma_chan * vc ,
struct virt_dma_desc * vd , unsigned long tx_flags )
{
2015-10-13 22:54:28 +03:00
unsigned long flags ;
2012-04-13 15:07:23 +04:00
dma_async_tx_descriptor_init ( & vd - > tx , & vc - > chan ) ;
vd - > tx . flags = tx_flags ;
vd - > tx . tx_submit = vchan_tx_submit ;
2015-10-13 22:54:28 +03:00
vd - > tx . desc_free = vchan_tx_desc_free ;
2019-06-06 13:45:47 +03:00
vd - > tx_result . result = DMA_TRANS_NOERROR ;
vd - > tx_result . residue = 0 ;
2015-10-13 22:54:28 +03:00
spin_lock_irqsave ( & vc - > lock , flags ) ;
list_add_tail ( & vd - > node , & vc - > desc_allocated ) ;
spin_unlock_irqrestore ( & vc - > lock , flags ) ;
2012-04-13 15:07:23 +04:00
return & vd - > tx ;
}
/**
* vchan_issue_pending - move submitted descriptors to issued list
2015-10-20 14:14:45 +03:00
* @ vc : virtual channel to update
2012-04-13 15:07:23 +04:00
*
* vc . lock must be held by caller
*/
static inline bool vchan_issue_pending ( struct virt_dma_chan * vc )
{
list_splice_tail_init ( & vc - > desc_submitted , & vc - > desc_issued ) ;
return ! list_empty ( & vc - > desc_issued ) ;
}
/**
* vchan_cookie_complete - report completion of a descriptor
2015-10-20 14:14:45 +03:00
* @ vd : virtual descriptor to update
2012-04-13 15:07:23 +04:00
*
* vc . lock must be held by caller
*/
static inline void vchan_cookie_complete ( struct virt_dma_desc * vd )
{
struct virt_dma_chan * vc = to_virt_chan ( vd - > tx . chan ) ;
2013-12-06 19:42:09 +04:00
dma_cookie_t cookie ;
2012-04-13 15:07:23 +04:00
2013-12-06 19:42:09 +04:00
cookie = vd - > tx . cookie ;
2012-04-13 15:07:23 +04:00
dma_cookie_complete ( & vd - > tx ) ;
dev_vdbg ( vc - > chan . device - > dev , " txd %p[%x]: marked complete \n " ,
2013-12-06 19:42:09 +04:00
vd , cookie ) ;
2012-04-13 15:07:23 +04:00
list_add_tail ( & vd - > node , & vc - > desc_completed ) ;
tasklet_schedule ( & vc - > task ) ;
}
2017-11-14 17:32:03 +03:00
/**
* vchan_vdesc_fini - Free or reuse a descriptor
* @ vd : virtual descriptor to free / reuse
*/
static inline void vchan_vdesc_fini ( struct virt_dma_desc * vd )
{
struct virt_dma_chan * vc = to_virt_chan ( vd - > tx . chan ) ;
2019-12-16 13:53:24 +03:00
if ( dmaengine_desc_test_reuse ( & vd - > tx ) ) {
unsigned long flags ;
spin_lock_irqsave ( & vc - > lock , flags ) ;
2017-11-14 17:32:03 +03:00
list_add ( & vd - > node , & vc - > desc_allocated ) ;
2019-12-16 13:53:24 +03:00
spin_unlock_irqrestore ( & vc - > lock , flags ) ;
} else {
2017-11-14 17:32:03 +03:00
vc - > desc_free ( vd ) ;
2019-12-16 13:53:24 +03:00
}
2017-11-14 17:32:03 +03:00
}
2012-05-14 18:17:20 +04:00
/**
* vchan_cyclic_callback - report the completion of a period
2015-10-20 14:14:45 +03:00
* @ vd : virtual descriptor
2012-05-14 18:17:20 +04:00
*/
static inline void vchan_cyclic_callback ( struct virt_dma_desc * vd )
{
struct virt_dma_chan * vc = to_virt_chan ( vd - > tx . chan ) ;
vc - > cyclic = vd ;
tasklet_schedule ( & vc - > task ) ;
}
2017-11-14 17:32:04 +03:00
/**
* vchan_terminate_vdesc - Disable pending cyclic callback
* @ vd : virtual descriptor to be terminated
*
* vc . lock must be held by caller
*/
static inline void vchan_terminate_vdesc ( struct virt_dma_desc * vd )
{
struct virt_dma_chan * vc = to_virt_chan ( vd - > tx . chan ) ;
2019-12-16 13:53:23 +03:00
list_add_tail ( & vd - > node , & vc - > desc_terminated ) ;
2017-11-14 17:32:04 +03:00
if ( vc - > cyclic = = vd )
vc - > cyclic = NULL ;
}
2012-04-13 15:07:23 +04:00
/**
* vchan_next_desc - peek at the next descriptor to be processed
2015-10-20 14:14:45 +03:00
* @ vc : virtual channel to obtain descriptor from
2012-04-13 15:07:23 +04:00
*
* vc . lock must be held by caller
*/
static inline struct virt_dma_desc * vchan_next_desc ( struct virt_dma_chan * vc )
{
2016-09-12 21:08:17 +03:00
return list_first_entry_or_null ( & vc - > desc_issued ,
struct virt_dma_desc , node ) ;
2012-04-13 15:07:23 +04:00
}
/**
2015-07-10 15:02:49 +03:00
* vchan_get_all_descriptors - obtain all submitted and issued descriptors
2015-10-20 14:14:45 +03:00
* @ vc : virtual channel to get descriptors from
* @ head : list of descriptors found
2012-04-13 15:07:23 +04:00
*
* vc . lock must be held by caller
*
* Removes all submitted and issued descriptors from internal lists , and
* provides a list of all descriptors found
*/
static inline void vchan_get_all_descriptors ( struct virt_dma_chan * vc ,
struct list_head * head )
{
2015-10-13 22:54:28 +03:00
list_splice_tail_init ( & vc - > desc_allocated , head ) ;
2012-04-13 15:07:23 +04:00
list_splice_tail_init ( & vc - > desc_submitted , head ) ;
list_splice_tail_init ( & vc - > desc_issued , head ) ;
list_splice_tail_init ( & vc - > desc_completed , head ) ;
2019-12-16 13:53:23 +03:00
list_splice_tail_init ( & vc - > desc_terminated , head ) ;
2012-04-13 15:07:23 +04:00
}
static inline void vchan_free_chan_resources ( struct virt_dma_chan * vc )
{
2015-10-13 22:54:28 +03:00
struct virt_dma_desc * vd ;
2012-04-13 15:07:23 +04:00
unsigned long flags ;
LIST_HEAD ( head ) ;
spin_lock_irqsave ( & vc - > lock , flags ) ;
vchan_get_all_descriptors ( vc , & head ) ;
2015-10-13 22:54:28 +03:00
list_for_each_entry ( vd , & head , node )
dmaengine_desc_clear_reuse ( & vd - > tx ) ;
2012-04-13 15:07:23 +04:00
spin_unlock_irqrestore ( & vc - > lock , flags ) ;
vchan_dma_desc_free_list ( vc , & head ) ;
}
2015-10-20 12:46:29 +03:00
/**
* vchan_synchronize ( ) - synchronize callback execution to the current context
* @ vc : virtual channel to synchronize
*
* Makes sure that all scheduled or active callbacks have finished running . For
* proper operation the caller has to ensure that no new callbacks are scheduled
* after the invocation of this function started .
2017-11-14 17:32:04 +03:00
* Free up the terminated cyclic descriptor to prevent memory leakage .
2015-10-20 12:46:29 +03:00
*/
static inline void vchan_synchronize ( struct virt_dma_chan * vc )
{
2019-12-16 13:53:23 +03:00
LIST_HEAD ( head ) ;
2017-11-14 17:32:04 +03:00
unsigned long flags ;
2015-10-20 12:46:29 +03:00
tasklet_kill ( & vc - > task ) ;
2017-11-14 17:32:04 +03:00
spin_lock_irqsave ( & vc - > lock , flags ) ;
2019-12-16 13:53:23 +03:00
list_splice_tail_init ( & vc - > desc_terminated , & head ) ;
2017-11-14 17:32:04 +03:00
spin_unlock_irqrestore ( & vc - > lock , flags ) ;
2019-12-16 13:53:23 +03:00
vchan_dma_desc_free_list ( vc , & head ) ;
2015-10-20 12:46:29 +03:00
}
2012-04-13 15:07:23 +04:00
# endif