spi: Check if transfer is mapped before calling DMA sync APIs
The resent update to remove the orig_nents checks revealed that not all DMA sync backends can cope with the unallocated SG list, while supplying orig_nents == 0 (the commit 861370f49ce4 ("iommu/dma: force bouncing if the size is not cacheline-aligned"), for example, makes that happen for the IOMMU case). It means we have to check if the buffers are DMA mapped before trying to sync them. Re-introduce that check in a form of calling ->can_dma() in the same way as it's done in the DMA mapping loop for the SPI transfers. Reported-by: Nícolas F. R. A. Prado <nfraprado@collabora.com> Reported-by: Neil Armstrong <neil.armstrong@linaro.org> Closes: https://lore.kernel.org/r/8ae675b5-fcf9-4c9b-b06a-4462f70e1322@linaro.org Closes: https://lore.kernel.org/all/d3679496-2e4e-4a7c-97ed-f193bd53af1d@notapiano Fixes: 8cc3bad9d9d6 ("spi: Remove unneded check for orig_nents") Suggested-by: Nícolas F. R. A. Prado <nfraprado@collabora.com> Tested-by: Nícolas F. R. A. Prado <nfraprado@collabora.com> Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Link: https://msgid.link/r/20240522171018.3362521-3-andriy.shevchenko@linux.intel.com Signed-off-by: Mark Brown <broonie@kernel.org>
This commit is contained in:
parent
9f788ba457
commit
da560097c0
@ -1311,7 +1311,7 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void spi_dma_sync_for_device(struct spi_controller *ctlr,
|
||||
static void spi_dma_sync_for_device(struct spi_controller *ctlr, struct spi_message *msg,
|
||||
struct spi_transfer *xfer)
|
||||
{
|
||||
struct device *rx_dev = ctlr->cur_rx_dma_dev;
|
||||
@ -1320,11 +1320,14 @@ static void spi_dma_sync_for_device(struct spi_controller *ctlr,
|
||||
if (!ctlr->cur_msg_mapped)
|
||||
return;
|
||||
|
||||
if (!ctlr->can_dma(ctlr, msg->spi, xfer))
|
||||
return;
|
||||
|
||||
dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
|
||||
dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
|
||||
static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, struct spi_message *msg,
|
||||
struct spi_transfer *xfer)
|
||||
{
|
||||
struct device *rx_dev = ctlr->cur_rx_dma_dev;
|
||||
@ -1333,6 +1336,9 @@ static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
|
||||
if (!ctlr->cur_msg_mapped)
|
||||
return;
|
||||
|
||||
if (!ctlr->can_dma(ctlr, msg->spi, xfer))
|
||||
return;
|
||||
|
||||
dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
|
||||
dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
|
||||
}
|
||||
@ -1350,11 +1356,13 @@ static inline int __spi_unmap_msg(struct spi_controller *ctlr,
|
||||
}
|
||||
|
||||
static void spi_dma_sync_for_device(struct spi_controller *ctrl,
|
||||
struct spi_message *msg,
|
||||
struct spi_transfer *xfer)
|
||||
{
|
||||
}
|
||||
|
||||
static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
|
||||
struct spi_message *msg,
|
||||
struct spi_transfer *xfer)
|
||||
{
|
||||
}
|
||||
@ -1626,10 +1634,10 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
|
||||
reinit_completion(&ctlr->xfer_completion);
|
||||
|
||||
fallback_pio:
|
||||
spi_dma_sync_for_device(ctlr, xfer);
|
||||
spi_dma_sync_for_device(ctlr, msg, xfer);
|
||||
ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
|
||||
if (ret < 0) {
|
||||
spi_dma_sync_for_cpu(ctlr, xfer);
|
||||
spi_dma_sync_for_cpu(ctlr, msg, xfer);
|
||||
|
||||
if (ctlr->cur_msg_mapped &&
|
||||
(xfer->error & SPI_TRANS_FAIL_NO_START)) {
|
||||
@ -1654,7 +1662,7 @@ fallback_pio:
|
||||
msg->status = ret;
|
||||
}
|
||||
|
||||
spi_dma_sync_for_cpu(ctlr, xfer);
|
||||
spi_dma_sync_for_cpu(ctlr, msg, xfer);
|
||||
} else {
|
||||
if (xfer->len)
|
||||
dev_err(&msg->spi->dev,
|
||||
|
Loading…
x
Reference in New Issue
Block a user