dmaengine: qcom_bam_dma: Add descriptor flags
This patch adds support for end of transaction (EOT) and notify when done (NWD) hardware descriptor flags. The EOT flag requests that the peripheral assert an end of transaction interrupt when that descriptor is complete. It also results in special signaling protocol that is used between the attached peripheral and the core using the DMA controller. Clients will specify DMA_PREP_INTERRUPT to enable this flag. The NWD flag requests that the peripheral wait until the data has been fully processed by the peripheral before moving on to the next descriptor. Clients will specify DMA_PREP_FENCE to enable this flag. Signed-off-by: Andy Gross <agross@codeaurora.org> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
This commit is contained in:
parent
43452fadd6
commit
89751d0a2a
@ -61,12 +61,17 @@ struct bam_desc_hw {
|
|||||||
#define DESC_FLAG_INT BIT(15)
|
#define DESC_FLAG_INT BIT(15)
|
||||||
#define DESC_FLAG_EOT BIT(14)
|
#define DESC_FLAG_EOT BIT(14)
|
||||||
#define DESC_FLAG_EOB BIT(13)
|
#define DESC_FLAG_EOB BIT(13)
|
||||||
|
#define DESC_FLAG_NWD BIT(12)
|
||||||
|
|
||||||
struct bam_async_desc {
|
struct bam_async_desc {
|
||||||
struct virt_dma_desc vd;
|
struct virt_dma_desc vd;
|
||||||
|
|
||||||
u32 num_desc;
|
u32 num_desc;
|
||||||
u32 xfer_len;
|
u32 xfer_len;
|
||||||
|
|
||||||
|
/* transaction flags, EOT|EOB|NWD */
|
||||||
|
u16 flags;
|
||||||
|
|
||||||
struct bam_desc_hw *curr_desc;
|
struct bam_desc_hw *curr_desc;
|
||||||
|
|
||||||
enum dma_transfer_direction dir;
|
enum dma_transfer_direction dir;
|
||||||
@ -490,6 +495,14 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
|
|||||||
if (!async_desc)
|
if (!async_desc)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
|
if (flags & DMA_PREP_FENCE)
|
||||||
|
async_desc->flags |= DESC_FLAG_NWD;
|
||||||
|
|
||||||
|
if (flags & DMA_PREP_INTERRUPT)
|
||||||
|
async_desc->flags |= DESC_FLAG_EOT;
|
||||||
|
else
|
||||||
|
async_desc->flags |= DESC_FLAG_INT;
|
||||||
|
|
||||||
async_desc->num_desc = num_alloc;
|
async_desc->num_desc = num_alloc;
|
||||||
async_desc->curr_desc = async_desc->desc;
|
async_desc->curr_desc = async_desc->desc;
|
||||||
async_desc->dir = direction;
|
async_desc->dir = direction;
|
||||||
@ -793,8 +806,11 @@ static void bam_start_dma(struct bam_chan *bchan)
|
|||||||
else
|
else
|
||||||
async_desc->xfer_len = async_desc->num_desc;
|
async_desc->xfer_len = async_desc->num_desc;
|
||||||
|
|
||||||
/* set INT on last descriptor */
|
/* set any special flags on the last descriptor */
|
||||||
desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT;
|
if (async_desc->num_desc == async_desc->xfer_len)
|
||||||
|
desc[async_desc->xfer_len - 1].flags = async_desc->flags;
|
||||||
|
else
|
||||||
|
desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT;
|
||||||
|
|
||||||
if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
|
if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
|
||||||
u32 partial = MAX_DESCRIPTORS - bchan->tail;
|
u32 partial = MAX_DESCRIPTORS - bchan->tail;
|
||||||
|
Loading…
Reference in New Issue
Block a user