Merge branch 'topic/async' into for-linus
This commit is contained in:
commit
d3f1e93ce8
@ -117,7 +117,7 @@ The slave DMA usage consists of following steps:
|
||||
transaction.
|
||||
|
||||
For cyclic DMA, a callback function may wish to terminate the
|
||||
DMA via dmaengine_terminate_all().
|
||||
DMA via dmaengine_terminate_async().
|
||||
|
||||
Therefore, it is important that DMA engine drivers drop any
|
||||
locks before calling the callback function which may cause a
|
||||
@ -155,12 +155,29 @@ The slave DMA usage consists of following steps:
|
||||
|
||||
Further APIs:
|
||||
|
||||
1. int dmaengine_terminate_all(struct dma_chan *chan)
|
||||
1. int dmaengine_terminate_sync(struct dma_chan *chan)
|
||||
int dmaengine_terminate_async(struct dma_chan *chan)
|
||||
int dmaengine_terminate_all(struct dma_chan *chan) /* DEPRECATED */
|
||||
|
||||
This causes all activity for the DMA channel to be stopped, and may
|
||||
discard data in the DMA FIFO which hasn't been fully transferred.
|
||||
No callback functions will be called for any incomplete transfers.
|
||||
|
||||
Two variants of this function are available.
|
||||
|
||||
dmaengine_terminate_async() might not wait until the DMA has been fully
|
||||
stopped or until any running complete callbacks have finished. But it is
|
||||
possible to call dmaengine_terminate_async() from atomic context or from
|
||||
within a complete callback. dmaengine_synchronize() must be called before it
|
||||
is safe to free the memory accessed by the DMA transfer or free resources
|
||||
accessed from within the complete callback.
|
||||
|
||||
dmaengine_terminate_sync() will wait for the transfer and any running
|
||||
complete callbacks to finish before it returns. But the function must not be
|
||||
called from atomic context or from within a complete callback.
|
||||
|
||||
dmaengine_terminate_all() is deprecated and should not be used in new code.
|
||||
|
||||
2. int dmaengine_pause(struct dma_chan *chan)
|
||||
|
||||
This pauses activity on the DMA channel without data loss.
|
||||
@ -186,3 +203,20 @@ Further APIs:
|
||||
a running DMA channel. It is recommended that DMA engine users
|
||||
pause or stop (via dmaengine_terminate_all()) the channel before
|
||||
using this API.
|
||||
|
||||
5. void dmaengine_synchronize(struct dma_chan *chan)
|
||||
|
||||
Synchronize the termination of the DMA channel to the current context.
|
||||
|
||||
This function should be used after dmaengine_terminate_async() to synchronize
|
||||
the termination of the DMA channel to the current context. The function will
|
||||
wait for the transfer and any running complete callbacks to finish before it
|
||||
returns.
|
||||
|
||||
If dmaengine_terminate_async() is used to stop the DMA channel this function
|
||||
must be called before it is safe to free memory accessed by previously
|
||||
submitted descriptors or to free any resources accessed within the complete
|
||||
callback of previously submitted descriptors.
|
||||
|
||||
The behavior of this function is undefined if dma_async_issue_pending() has
|
||||
been called between dmaengine_terminate_async() and this function.
|
||||
|
@ -327,8 +327,24 @@ supported.
|
||||
|
||||
* device_terminate_all
|
||||
- Aborts all the pending and ongoing transfers on the channel
|
||||
- This command should operate synchronously on the channel,
|
||||
terminating right away all the channels
|
||||
- For aborted transfers the complete callback should not be called
|
||||
- Can be called from atomic context or from within a complete
|
||||
callback of a descriptor. Must not sleep. Drivers must be able
|
||||
to handle this correctly.
|
||||
- Termination may be asynchronous. The driver does not have to
|
||||
wait until the currently active transfer has completely stopped.
|
||||
See device_synchronize.
|
||||
|
||||
* device_synchronize
|
||||
- Must synchronize the termination of a channel to the current
|
||||
context.
|
||||
- Must make sure that memory for previously submitted
|
||||
descriptors is no longer accessed by the DMA controller.
|
||||
- Must make sure that all complete callbacks for previously
|
||||
submitted descriptors have finished running and none are
|
||||
scheduled to run.
|
||||
- May sleep.
|
||||
|
||||
|
||||
Misc notes (stuff that should be documented, but don't really know
|
||||
where to put them)
|
||||
|
@ -307,6 +307,13 @@ static int axi_dmac_terminate_all(struct dma_chan *c)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void axi_dmac_synchronize(struct dma_chan *c)
|
||||
{
|
||||
struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
|
||||
|
||||
vchan_synchronize(&chan->vchan);
|
||||
}
|
||||
|
||||
static void axi_dmac_issue_pending(struct dma_chan *c)
|
||||
{
|
||||
struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
|
||||
@ -613,6 +620,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
|
||||
dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic;
|
||||
dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
|
||||
dma_dev->device_terminate_all = axi_dmac_terminate_all;
|
||||
dma_dev->device_synchronize = axi_dmac_synchronize;
|
||||
dma_dev->dev = &pdev->dev;
|
||||
dma_dev->chancnt = 1;
|
||||
dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
|
||||
|
@ -266,8 +266,11 @@ static void dma_chan_put(struct dma_chan *chan)
|
||||
module_put(dma_chan_to_owner(chan));
|
||||
|
||||
/* This channel is not in use anymore, free it */
|
||||
if (!chan->client_count && chan->device->device_free_chan_resources)
|
||||
if (!chan->client_count && chan->device->device_free_chan_resources) {
|
||||
/* Make sure all operations have completed */
|
||||
dmaengine_synchronize(chan);
|
||||
chan->device->device_free_chan_resources(chan);
|
||||
}
|
||||
|
||||
/* If the channel is used via a DMA request router, free the mapping */
|
||||
if (chan->router && chan->router->route_free) {
|
||||
|
@ -163,4 +163,17 @@ static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
|
||||
vchan_dma_desc_free_list(vc, &head);
|
||||
}
|
||||
|
||||
/**
|
||||
* vchan_synchronize() - synchronize callback execution to the current context
|
||||
* @vc: virtual channel to synchronize
|
||||
*
|
||||
* Makes sure that all scheduled or active callbacks have finished running. For
|
||||
* proper operation the caller has to ensure that no new callbacks are scheduled
|
||||
* after the invocation of this function started.
|
||||
*/
|
||||
static inline void vchan_synchronize(struct virt_dma_chan *vc)
|
||||
{
|
||||
tasklet_kill(&vc->task);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -681,6 +681,8 @@ struct dma_filter {
|
||||
* paused. Returns 0 or an error code
|
||||
* @device_terminate_all: Aborts all transfers on a channel. Returns 0
|
||||
* or an error code
|
||||
* @device_synchronize: Synchronizes the termination of a transfers to the
|
||||
* current context.
|
||||
* @device_tx_status: poll for transaction completion, the optional
|
||||
* txstate parameter can be supplied with a pointer to get a
|
||||
* struct with auxiliary transfer status information, otherwise the call
|
||||
@ -767,6 +769,7 @@ struct dma_device {
|
||||
int (*device_pause)(struct dma_chan *chan);
|
||||
int (*device_resume)(struct dma_chan *chan);
|
||||
int (*device_terminate_all)(struct dma_chan *chan);
|
||||
void (*device_synchronize)(struct dma_chan *chan);
|
||||
|
||||
enum dma_status (*device_tx_status)(struct dma_chan *chan,
|
||||
dma_cookie_t cookie,
|
||||
@ -858,6 +861,13 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
|
||||
src_sg, src_nents, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* dmaengine_terminate_all() - Terminate all active DMA transfers
|
||||
* @chan: The channel for which to terminate the transfers
|
||||
*
|
||||
* This function is DEPRECATED use either dmaengine_terminate_sync() or
|
||||
* dmaengine_terminate_async() instead.
|
||||
*/
|
||||
static inline int dmaengine_terminate_all(struct dma_chan *chan)
|
||||
{
|
||||
if (chan->device->device_terminate_all)
|
||||
@ -866,6 +876,88 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan)
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
/**
|
||||
* dmaengine_terminate_async() - Terminate all active DMA transfers
|
||||
* @chan: The channel for which to terminate the transfers
|
||||
*
|
||||
* Calling this function will terminate all active and pending descriptors
|
||||
* that have previously been submitted to the channel. It is not guaranteed
|
||||
* though that the transfer for the active descriptor has stopped when the
|
||||
* function returns. Furthermore it is possible the complete callback of a
|
||||
* submitted transfer is still running when this function returns.
|
||||
*
|
||||
* dmaengine_synchronize() needs to be called before it is safe to free
|
||||
* any memory that is accessed by previously submitted descriptors or before
|
||||
* freeing any resources accessed from within the completion callback of any
|
||||
* perviously submitted descriptors.
|
||||
*
|
||||
* This function can be called from atomic context as well as from within a
|
||||
* complete callback of a descriptor submitted on the same channel.
|
||||
*
|
||||
* If none of the two conditions above apply consider using
|
||||
* dmaengine_terminate_sync() instead.
|
||||
*/
|
||||
static inline int dmaengine_terminate_async(struct dma_chan *chan)
|
||||
{
|
||||
if (chan->device->device_terminate_all)
|
||||
return chan->device->device_terminate_all(chan);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* dmaengine_synchronize() - Synchronize DMA channel termination
|
||||
* @chan: The channel to synchronize
|
||||
*
|
||||
* Synchronizes to the DMA channel termination to the current context. When this
|
||||
* function returns it is guaranteed that all transfers for previously issued
|
||||
* descriptors have stopped and and it is safe to free the memory assoicated
|
||||
* with them. Furthermore it is guaranteed that all complete callback functions
|
||||
* for a previously submitted descriptor have finished running and it is safe to
|
||||
* free resources accessed from within the complete callbacks.
|
||||
*
|
||||
* The behavior of this function is undefined if dma_async_issue_pending() has
|
||||
* been called between dmaengine_terminate_async() and this function.
|
||||
*
|
||||
* This function must only be called from non-atomic context and must not be
|
||||
* called from within a complete callback of a descriptor submitted on the same
|
||||
* channel.
|
||||
*/
|
||||
static inline void dmaengine_synchronize(struct dma_chan *chan)
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
if (chan->device->device_synchronize)
|
||||
chan->device->device_synchronize(chan);
|
||||
}
|
||||
|
||||
/**
|
||||
* dmaengine_terminate_sync() - Terminate all active DMA transfers
|
||||
* @chan: The channel for which to terminate the transfers
|
||||
*
|
||||
* Calling this function will terminate all active and pending transfers
|
||||
* that have previously been submitted to the channel. It is similar to
|
||||
* dmaengine_terminate_async() but guarantees that the DMA transfer has actually
|
||||
* stopped and that all complete callbacks have finished running when the
|
||||
* function returns.
|
||||
*
|
||||
* This function must only be called from non-atomic context and must not be
|
||||
* called from within a complete callback of a descriptor submitted on the same
|
||||
* channel.
|
||||
*/
|
||||
static inline int dmaengine_terminate_sync(struct dma_chan *chan)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = dmaengine_terminate_async(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dmaengine_synchronize(chan);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int dmaengine_pause(struct dma_chan *chan)
|
||||
{
|
||||
if (chan->device->device_pause)
|
||||
|
@ -202,13 +202,13 @@ int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
|
||||
if (runtime->info & SNDRV_PCM_INFO_PAUSE)
|
||||
dmaengine_pause(prtd->dma_chan);
|
||||
else
|
||||
dmaengine_terminate_all(prtd->dma_chan);
|
||||
dmaengine_terminate_async(prtd->dma_chan);
|
||||
break;
|
||||
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
|
||||
dmaengine_pause(prtd->dma_chan);
|
||||
break;
|
||||
case SNDRV_PCM_TRIGGER_STOP:
|
||||
dmaengine_terminate_all(prtd->dma_chan);
|
||||
dmaengine_terminate_async(prtd->dma_chan);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
@ -346,6 +346,7 @@ int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream)
|
||||
{
|
||||
struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
|
||||
|
||||
dmaengine_synchronize(prtd->dma_chan);
|
||||
kfree(prtd);
|
||||
|
||||
return 0;
|
||||
@ -362,9 +363,11 @@ int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream)
|
||||
{
|
||||
struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
|
||||
|
||||
dmaengine_synchronize(prtd->dma_chan);
|
||||
dma_release_channel(prtd->dma_chan);
|
||||
kfree(prtd);
|
||||
|
||||
return snd_dmaengine_pcm_close(substream);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user