spi: rspi: Fix leaking of unused DMA descriptors

If dmaengine_prep_slave_sg() or dmaengine_submit() fail, we may leak
unused DMA descriptors.

As per Documentation/dmaengine.txt, once a DMA descriptor has been
obtained, it must be submitted. Hence:
  - First prepare and submit all DMA descriptors,
  - Prepare the SPI controller for DMA,
  - Start DMA by calling dma_async_issue_pending(),
  - Make sure to call dmaengine_terminate_all() on all descriptors that
    haven't completed.

Reported-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Mark Brown <broonie@linaro.org>
This commit is contained in:
Geert Uytterhoeven 2014-08-06 14:58:58 +02:00 committed by Mark Brown
parent 7d1311b93e
commit 3819bc8752

View File

@ -472,25 +472,52 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
dma_cookie_t cookie;
int ret;
if (tx) {
desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx,
tx->sgl, tx->nents, DMA_TO_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx)
goto no_dma;
irq_mask |= SPCR_SPTIE;
}
/* First prepare and submit the DMA request(s), as this may fail */
if (rx) {
desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx,
rx->sgl, rx->nents, DMA_FROM_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx)
goto no_dma;
if (!desc_rx) {
ret = -EAGAIN;
goto no_dma_rx;
}
desc_rx->callback = rspi_dma_complete;
desc_rx->callback_param = rspi;
cookie = dmaengine_submit(desc_rx);
if (dma_submit_error(cookie)) {
ret = cookie;
goto no_dma_rx;
}
irq_mask |= SPCR_SPRIE;
}
if (tx) {
desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx,
tx->sgl, tx->nents, DMA_TO_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
ret = -EAGAIN;
goto no_dma_tx;
}
if (rx) {
/* No callback */
desc_tx->callback = NULL;
} else {
desc_tx->callback = rspi_dma_complete;
desc_tx->callback_param = rspi;
}
cookie = dmaengine_submit(desc_tx);
if (dma_submit_error(cookie)) {
ret = cookie;
goto no_dma_tx;
}
irq_mask |= SPCR_SPTIE;
}
/*
* DMAC needs SPxIE, but if SPxIE is set, the IRQ routine will be
* called. So, this driver disables the IRQ while DMA transfer.
@ -503,34 +530,24 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
rspi_enable_irq(rspi, irq_mask);
rspi->dma_callbacked = 0;
if (rx) {
desc_rx->callback = rspi_dma_complete;
desc_rx->callback_param = rspi;
cookie = dmaengine_submit(desc_rx);
if (dma_submit_error(cookie))
return cookie;
/* Now start DMA */
if (rx)
dma_async_issue_pending(rspi->master->dma_rx);
}
if (tx) {
if (rx) {
/* No callback */
desc_tx->callback = NULL;
} else {
desc_tx->callback = rspi_dma_complete;
desc_tx->callback_param = rspi;
}
cookie = dmaengine_submit(desc_tx);
if (dma_submit_error(cookie))
return cookie;
if (tx)
dma_async_issue_pending(rspi->master->dma_tx);
}
ret = wait_event_interruptible_timeout(rspi->wait,
rspi->dma_callbacked, HZ);
if (ret > 0 && rspi->dma_callbacked)
ret = 0;
else if (!ret)
else if (!ret) {
dev_err(&rspi->master->dev, "DMA timeout\n");
ret = -ETIMEDOUT;
if (tx)
dmaengine_terminate_all(rspi->master->dma_tx);
if (rx)
dmaengine_terminate_all(rspi->master->dma_rx);
}
rspi_disable_irq(rspi, irq_mask);
@ -541,11 +558,16 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
return ret;
no_dma:
pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
dev_driver_string(&rspi->master->dev),
dev_name(&rspi->master->dev));
return -EAGAIN;
no_dma_tx:
if (rx)
dmaengine_terminate_all(rspi->master->dma_rx);
no_dma_rx:
if (ret == -EAGAIN) {
pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
dev_driver_string(&rspi->master->dev),
dev_name(&rspi->master->dev));
}
return ret;
}
static void rspi_receive_init(const struct rspi_data *rspi)