2019-05-29 17:18:02 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2010-12-24 08:59:11 +03:00
/*
2020-05-29 16:11:59 +03:00
* Special handling for DW DMA core
2010-12-24 08:59:11 +03:00
*
2014-09-12 16:12:01 +04:00
* Copyright ( c ) 2009 , 2014 Intel Corporation .
2010-12-24 08:59:11 +03:00
*/
2020-05-29 16:11:52 +03:00
# include <linux/completion.h>
2020-05-06 18:30:22 +03:00
# include <linux/dma-mapping.h>
# include <linux/dmaengine.h>
2020-05-06 18:30:21 +03:00
# include <linux/irqreturn.h>
2020-05-29 16:11:52 +03:00
# include <linux/jiffies.h>
2021-11-15 21:19:11 +03:00
# include <linux/module.h>
2010-12-24 08:59:11 +03:00
# include <linux/pci.h>
2015-03-09 17:48:50 +03:00
# include <linux/platform_data/dma-dw.h>
2020-05-29 16:11:59 +03:00
# include <linux/spi/spi.h>
# include <linux/types.h>
# include "spi-dw.h"
2010-12-24 08:59:11 +03:00
2021-11-15 21:19:13 +03:00
# define DW_SPI_RX_BUSY 0
# define DW_SPI_RX_BURST_LEVEL 16
# define DW_SPI_TX_BUSY 1
# define DW_SPI_TX_BURST_LEVEL 16
2014-10-28 19:25:02 +03:00
2020-05-29 16:12:02 +03:00
static bool dw_spi_dma_chan_filter ( struct dma_chan * chan , void * param )
2010-12-24 08:59:11 +03:00
{
2015-03-09 17:48:50 +03:00
struct dw_dma_slave * s = param ;
if ( s - > dma_dev ! = chan - > device - > dev )
return false ;
2010-12-24 08:59:11 +03:00
2015-03-09 17:48:50 +03:00
chan - > private = s ;
return true ;
2010-12-24 08:59:11 +03:00
}
2020-05-29 16:12:02 +03:00
static void dw_spi_dma_maxburst_init ( struct dw_spi * dws )
2020-05-29 16:11:56 +03:00
{
struct dma_slave_caps caps ;
u32 max_burst , def_burst ;
int ret ;
def_burst = dws - > fifo_len / 2 ;
ret = dma_get_slave_caps ( dws - > rxchan , & caps ) ;
if ( ! ret & & caps . max_burst )
max_burst = caps . max_burst ;
else
2021-11-15 21:19:13 +03:00
max_burst = DW_SPI_RX_BURST_LEVEL ;
2020-05-29 16:11:56 +03:00
dws - > rxburst = min ( max_burst , def_burst ) ;
2020-09-20 14:23:12 +03:00
dw_writel ( dws , DW_SPI_DMARDLR , dws - > rxburst - 1 ) ;
2020-05-29 16:11:56 +03:00
ret = dma_get_slave_caps ( dws - > txchan , & caps ) ;
if ( ! ret & & caps . max_burst )
max_burst = caps . max_burst ;
else
2021-11-15 21:19:13 +03:00
max_burst = DW_SPI_TX_BURST_LEVEL ;
2020-05-29 16:11:56 +03:00
2020-09-20 14:23:12 +03:00
/*
* Having a Rx DMA channel serviced with higher priority than a Tx DMA
* channel might not be enough to provide a well balanced DMA - based
* SPI transfer interface . There might still be moments when the Tx DMA
* channel is occasionally handled faster than the Rx DMA channel .
* That in its turn will eventually cause the SPI Rx FIFO overflow if
* SPI bus speed is high enough to fill the SPI Rx FIFO in before it ' s
* cleared by the Rx DMA channel . In order to fix the problem the Tx
* DMA activity is intentionally slowed down by limiting the SPI Tx
* FIFO depth with a value twice bigger than the Tx burst length .
*/
2020-05-29 16:11:56 +03:00
dws - > txburst = min ( max_burst , def_burst ) ;
2020-09-20 14:23:12 +03:00
dw_writel ( dws , DW_SPI_DMATDLR , dws - > txburst ) ;
2020-05-29 16:11:56 +03:00
}
2020-09-20 14:23:22 +03:00
static void dw_spi_dma_sg_burst_init ( struct dw_spi * dws )
{
struct dma_slave_caps tx = { 0 } , rx = { 0 } ;
dma_get_slave_caps ( dws - > txchan , & tx ) ;
dma_get_slave_caps ( dws - > rxchan , & rx ) ;
if ( tx . max_sg_burst > 0 & & rx . max_sg_burst > 0 )
dws - > dma_sg_burst = min ( tx . max_sg_burst , rx . max_sg_burst ) ;
else if ( tx . max_sg_burst > 0 )
dws - > dma_sg_burst = tx . max_sg_burst ;
else if ( rx . max_sg_burst > 0 )
dws - > dma_sg_burst = rx . max_sg_burst ;
else
dws - > dma_sg_burst = 0 ;
}
2020-05-29 16:12:02 +03:00
static int dw_spi_dma_init_mfld ( struct device * dev , struct dw_spi * dws )
2010-12-24 08:59:11 +03:00
{
2020-05-29 21:31:49 +03:00
struct dw_dma_slave dma_tx = { . dst_id = 1 } , * tx = & dma_tx ;
struct dw_dma_slave dma_rx = { . src_id = 0 } , * rx = & dma_rx ;
2014-09-12 16:12:00 +04:00
struct pci_dev * dma_dev ;
2010-12-24 08:59:11 +03:00
dma_cap_mask_t mask ;
/*
* Get pci device for DMA controller , currently it could only
2014-09-12 16:11:59 +04:00
* be the DMA controller of Medfield
2010-12-24 08:59:11 +03:00
*/
2014-09-12 16:12:00 +04:00
dma_dev = pci_get_device ( PCI_VENDOR_ID_INTEL , 0x0827 , NULL ) ;
if ( ! dma_dev )
return - ENODEV ;
2010-12-24 08:59:11 +03:00
dma_cap_zero ( mask ) ;
dma_cap_set ( DMA_SLAVE , mask ) ;
/* 1. Init rx channel */
2020-05-29 21:31:49 +03:00
rx - > dma_dev = & dma_dev - > dev ;
dws - > rxchan = dma_request_channel ( mask , dw_spi_dma_chan_filter , rx ) ;
2010-12-24 08:59:11 +03:00
if ( ! dws - > rxchan )
goto err_exit ;
/* 2. Init tx channel */
2020-05-29 21:31:49 +03:00
tx - > dma_dev = & dma_dev - > dev ;
dws - > txchan = dma_request_channel ( mask , dw_spi_dma_chan_filter , tx ) ;
2010-12-24 08:59:11 +03:00
if ( ! dws - > txchan )
goto free_rxchan ;
2020-05-07 14:54:49 +03:00
dws - > master - > dma_rx = dws - > rxchan ;
2015-03-09 17:48:49 +03:00
dws - > master - > dma_tx = dws - > txchan ;
2010-12-24 08:59:11 +03:00
2020-05-29 16:11:52 +03:00
init_completion ( & dws - > dma_completion ) ;
2020-05-29 16:12:02 +03:00
dw_spi_dma_maxburst_init ( dws ) ;
2020-05-29 16:11:56 +03:00
2020-09-20 14:23:22 +03:00
dw_spi_dma_sg_burst_init ( dws ) ;
2010-12-24 08:59:11 +03:00
return 0 ;
free_rxchan :
dma_release_channel ( dws - > rxchan ) ;
2020-05-07 14:54:49 +03:00
dws - > rxchan = NULL ;
2010-12-24 08:59:11 +03:00
err_exit :
2014-09-12 16:12:00 +04:00
return - EBUSY ;
2010-12-24 08:59:11 +03:00
}
2020-05-29 16:12:02 +03:00
static int dw_spi_dma_init_generic ( struct device * dev , struct dw_spi * dws )
2020-05-06 18:30:25 +03:00
{
2022-06-25 00:06:23 +03:00
int ret ;
2020-05-06 18:30:25 +03:00
2022-06-25 00:06:23 +03:00
dws - > rxchan = dma_request_chan ( dev , " rx " ) ;
if ( IS_ERR ( dws - > rxchan ) ) {
ret = PTR_ERR ( dws - > rxchan ) ;
2020-05-07 14:54:49 +03:00
dws - > rxchan = NULL ;
2022-06-25 00:06:23 +03:00
goto err_exit ;
}
dws - > txchan = dma_request_chan ( dev , " tx " ) ;
if ( IS_ERR ( dws - > txchan ) ) {
ret = PTR_ERR ( dws - > txchan ) ;
dws - > txchan = NULL ;
goto free_rxchan ;
2020-05-06 18:30:25 +03:00
}
2020-05-07 14:54:49 +03:00
dws - > master - > dma_rx = dws - > rxchan ;
2020-05-06 18:30:25 +03:00
dws - > master - > dma_tx = dws - > txchan ;
2020-05-29 16:11:52 +03:00
init_completion ( & dws - > dma_completion ) ;
2020-05-29 16:12:02 +03:00
dw_spi_dma_maxburst_init ( dws ) ;
2020-05-29 16:11:56 +03:00
2020-09-20 14:23:22 +03:00
dw_spi_dma_sg_burst_init ( dws ) ;
2020-05-06 18:30:25 +03:00
return 0 ;
2022-06-25 00:06:23 +03:00
free_rxchan :
dma_release_channel ( dws - > rxchan ) ;
dws - > rxchan = NULL ;
err_exit :
return ret ;
2020-05-06 18:30:25 +03:00
}
2020-05-29 16:12:02 +03:00
static void dw_spi_dma_exit ( struct dw_spi * dws )
2010-12-24 08:59:11 +03:00
{
2020-05-07 14:54:49 +03:00
if ( dws - > txchan ) {
dmaengine_terminate_sync ( dws - > txchan ) ;
dma_release_channel ( dws - > txchan ) ;
}
2014-09-18 21:08:53 +04:00
2020-05-07 14:54:49 +03:00
if ( dws - > rxchan ) {
dmaengine_terminate_sync ( dws - > rxchan ) ;
dma_release_channel ( dws - > rxchan ) ;
}
2010-12-24 08:59:11 +03:00
}
2020-05-29 16:12:02 +03:00
static irqreturn_t dw_spi_dma_transfer_handler ( struct dw_spi * dws )
2015-03-09 17:48:47 +03:00
{
2020-10-08 02:55:05 +03:00
dw_spi_check_status ( dws , false ) ;
2015-03-09 17:48:47 +03:00
2020-05-29 16:11:52 +03:00
complete ( & dws - > dma_completion ) ;
2020-10-08 02:55:05 +03:00
2015-03-09 17:48:47 +03:00
return IRQ_HANDLED ;
}
2020-05-29 16:12:02 +03:00
static bool dw_spi_can_dma ( struct spi_controller * master ,
struct spi_device * spi , struct spi_transfer * xfer )
2015-03-09 17:48:49 +03:00
{
2018-02-01 18:17:29 +03:00
struct dw_spi * dws = spi_controller_get_devdata ( master ) ;
2015-03-09 17:48:49 +03:00
return xfer - > len > dws - > fifo_len ;
}
2020-05-29 16:12:02 +03:00
static enum dma_slave_buswidth dw_spi_dma_convert_width ( u8 n_bytes )
{
2020-05-22 03:07:54 +03:00
if ( n_bytes = = 1 )
2015-03-09 17:48:45 +03:00
return DMA_SLAVE_BUSWIDTH_1_BYTE ;
2020-05-22 03:07:54 +03:00
else if ( n_bytes = = 2 )
2015-03-09 17:48:45 +03:00
return DMA_SLAVE_BUSWIDTH_2_BYTES ;
return DMA_SLAVE_BUSWIDTH_UNDEFINED ;
}
2020-09-20 14:23:21 +03:00
static int dw_spi_dma_wait ( struct dw_spi * dws , unsigned int len , u32 speed )
2020-05-29 16:11:52 +03:00
{
unsigned long long ms ;
2020-09-20 14:23:21 +03:00
ms = len * MSEC_PER_SEC * BITS_PER_BYTE ;
do_div ( ms , speed ) ;
2020-05-29 16:11:52 +03:00
ms + = ms + 200 ;
if ( ms > UINT_MAX )
ms = UINT_MAX ;
ms = wait_for_completion_timeout ( & dws - > dma_completion ,
msecs_to_jiffies ( ms ) ) ;
if ( ms = = 0 ) {
dev_err ( & dws - > master - > cur_msg - > spi - > dev ,
" DMA transaction timed out \n " ) ;
return - ETIMEDOUT ;
}
return 0 ;
}
2020-05-29 16:11:53 +03:00
static inline bool dw_spi_dma_tx_busy ( struct dw_spi * dws )
{
2021-11-15 21:19:13 +03:00
return ! ( dw_readl ( dws , DW_SPI_SR ) & DW_SPI_SR_TF_EMPT ) ;
2020-05-29 16:11:53 +03:00
}
static int dw_spi_dma_wait_tx_done ( struct dw_spi * dws ,
struct spi_transfer * xfer )
{
2021-11-15 21:19:13 +03:00
int retry = DW_SPI_WAIT_RETRIES ;
2020-05-29 16:11:53 +03:00
struct spi_delay delay ;
u32 nents ;
nents = dw_readl ( dws , DW_SPI_TXFLR ) ;
delay . unit = SPI_DELAY_UNIT_SCK ;
delay . value = nents * dws - > n_bytes * BITS_PER_BYTE ;
while ( dw_spi_dma_tx_busy ( dws ) & & retry - - )
spi_delay_exec ( & delay , xfer ) ;
if ( retry < 0 ) {
dev_err ( & dws - > master - > dev , " Tx hanged up \n " ) ;
return - EIO ;
}
return 0 ;
}
2010-12-24 08:59:11 +03:00
/*
2014-10-28 19:25:02 +03:00
* dws - > dma_chan_busy is set before the dma transfer starts , callback for tx
* channel will clear a corresponding bit .
2010-12-24 08:59:11 +03:00
*/
2014-10-28 19:25:02 +03:00
static void dw_spi_dma_tx_done ( void * arg )
2010-12-24 08:59:11 +03:00
{
struct dw_spi * dws = arg ;
2021-11-15 21:19:13 +03:00
clear_bit ( DW_SPI_TX_BUSY , & dws - > dma_chan_busy ) ;
if ( test_bit ( DW_SPI_RX_BUSY , & dws - > dma_chan_busy ) )
2010-12-24 08:59:11 +03:00
return ;
2020-05-15 13:47:42 +03:00
2020-05-29 16:11:52 +03:00
complete ( & dws - > dma_completion ) ;
2010-12-24 08:59:11 +03:00
}
2020-09-20 14:23:14 +03:00
static int dw_spi_dma_config_tx ( struct dw_spi * dws )
2010-12-24 08:59:11 +03:00
{
2014-10-28 19:25:01 +03:00
struct dma_slave_config txconf ;
2010-12-24 08:59:11 +03:00
2020-05-06 18:30:18 +03:00
memset ( & txconf , 0 , sizeof ( txconf ) ) ;
2011-10-14 09:17:38 +04:00
txconf . direction = DMA_MEM_TO_DEV ;
2010-12-24 08:59:11 +03:00
txconf . dst_addr = dws - > dma_addr ;
2020-05-29 16:11:56 +03:00
txconf . dst_maxburst = dws - > txburst ;
2010-12-24 08:59:11 +03:00
txconf . src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
2020-05-29 16:12:02 +03:00
txconf . dst_addr_width = dw_spi_dma_convert_width ( dws - > n_bytes ) ;
2012-02-01 14:42:19 +04:00
txconf . device_fc = false ;
2010-12-24 08:59:11 +03:00
2020-09-20 14:23:14 +03:00
return dmaengine_slave_config ( dws - > txchan , & txconf ) ;
}
2020-09-20 14:23:21 +03:00
static int dw_spi_dma_submit_tx ( struct dw_spi * dws , struct scatterlist * sgl ,
unsigned int nents )
2020-09-20 14:23:14 +03:00
{
struct dma_async_tx_descriptor * txdesc ;
2020-09-20 14:23:17 +03:00
dma_cookie_t cookie ;
int ret ;
2010-12-24 08:59:11 +03:00
2020-09-20 14:23:21 +03:00
txdesc = dmaengine_prep_slave_sg ( dws - > txchan , sgl , nents ,
DMA_MEM_TO_DEV ,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
2015-03-02 21:15:58 +03:00
if ( ! txdesc )
2020-09-20 14:23:18 +03:00
return - ENOMEM ;
2015-03-02 21:15:58 +03:00
2014-10-28 19:25:02 +03:00
txdesc - > callback = dw_spi_dma_tx_done ;
2010-12-24 08:59:11 +03:00
txdesc - > callback_param = dws ;
2020-09-20 14:23:17 +03:00
cookie = dmaengine_submit ( txdesc ) ;
ret = dma_submit_error ( cookie ) ;
if ( ret ) {
dmaengine_terminate_sync ( dws - > txchan ) ;
2020-09-20 14:23:18 +03:00
return ret ;
2020-09-20 14:23:17 +03:00
}
2021-11-15 21:19:13 +03:00
set_bit ( DW_SPI_TX_BUSY , & dws - > dma_chan_busy ) ;
2020-09-20 14:23:16 +03:00
2020-09-20 14:23:18 +03:00
return 0 ;
2014-10-28 19:25:01 +03:00
}
2020-05-29 16:11:54 +03:00
static inline bool dw_spi_dma_rx_busy ( struct dw_spi * dws )
{
2021-11-15 21:19:13 +03:00
return ! ! ( dw_readl ( dws , DW_SPI_SR ) & DW_SPI_SR_RF_NOT_EMPT ) ;
2020-05-29 16:11:54 +03:00
}
static int dw_spi_dma_wait_rx_done ( struct dw_spi * dws )
{
2021-11-15 21:19:13 +03:00
int retry = DW_SPI_WAIT_RETRIES ;
2020-05-29 16:11:54 +03:00
struct spi_delay delay ;
unsigned long ns , us ;
u32 nents ;
/*
* It ' s unlikely that DMA engine is still doing the data fetching , but
* if it ' s let ' s give it some reasonable time . The timeout calculation
* is based on the synchronous APB / SSI reference clock rate , on a
* number of data entries left in the Rx FIFO , times a number of clock
* periods normally needed for a single APB read / write transaction
* without PREADY signal utilized ( which is true for the DW APB SSI
* controller ) .
*/
nents = dw_readl ( dws , DW_SPI_RXFLR ) ;
ns = 4U * NSEC_PER_SEC / dws - > max_freq * nents ;
if ( ns < = NSEC_PER_USEC ) {
delay . unit = SPI_DELAY_UNIT_NSECS ;
delay . value = ns ;
} else {
us = DIV_ROUND_UP ( ns , NSEC_PER_USEC ) ;
delay . unit = SPI_DELAY_UNIT_USECS ;
delay . value = clamp_val ( us , 0 , USHRT_MAX ) ;
}
while ( dw_spi_dma_rx_busy ( dws ) & & retry - - )
spi_delay_exec ( & delay , NULL ) ;
if ( retry < 0 ) {
dev_err ( & dws - > master - > dev , " Rx hanged up \n " ) ;
return - EIO ;
}
return 0 ;
}
2014-10-28 19:25:02 +03:00
/*
* dws - > dma_chan_busy is set before the dma transfer starts , callback for rx
* channel will clear a corresponding bit .
*/
static void dw_spi_dma_rx_done ( void * arg )
{
struct dw_spi * dws = arg ;
2021-11-15 21:19:13 +03:00
clear_bit ( DW_SPI_RX_BUSY , & dws - > dma_chan_busy ) ;
if ( test_bit ( DW_SPI_TX_BUSY , & dws - > dma_chan_busy ) )
2014-10-28 19:25:02 +03:00
return ;
2020-05-15 13:47:42 +03:00
2020-05-29 16:11:52 +03:00
complete ( & dws - > dma_completion ) ;
2014-10-28 19:25:02 +03:00
}
2020-09-20 14:23:14 +03:00
static int dw_spi_dma_config_rx ( struct dw_spi * dws )
2014-10-28 19:25:01 +03:00
{
struct dma_slave_config rxconf ;
2014-10-28 19:25:02 +03:00
2020-05-06 18:30:18 +03:00
memset ( & rxconf , 0 , sizeof ( rxconf ) ) ;
2011-10-14 09:17:38 +04:00
rxconf . direction = DMA_DEV_TO_MEM ;
2010-12-24 08:59:11 +03:00
rxconf . src_addr = dws - > dma_addr ;
2020-05-29 16:11:56 +03:00
rxconf . src_maxburst = dws - > rxburst ;
2010-12-24 08:59:11 +03:00
rxconf . dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
2020-05-29 16:12:02 +03:00
rxconf . src_addr_width = dw_spi_dma_convert_width ( dws - > n_bytes ) ;
2012-02-01 14:42:19 +04:00
rxconf . device_fc = false ;
2010-12-24 08:59:11 +03:00
2020-09-20 14:23:14 +03:00
return dmaengine_slave_config ( dws - > rxchan , & rxconf ) ;
}
2020-09-20 14:23:21 +03:00
static int dw_spi_dma_submit_rx ( struct dw_spi * dws , struct scatterlist * sgl ,
unsigned int nents )
2020-09-20 14:23:14 +03:00
{
struct dma_async_tx_descriptor * rxdesc ;
2020-09-20 14:23:17 +03:00
dma_cookie_t cookie ;
int ret ;
2020-09-20 14:23:14 +03:00
2020-09-20 14:23:21 +03:00
rxdesc = dmaengine_prep_slave_sg ( dws - > rxchan , sgl , nents ,
DMA_DEV_TO_MEM ,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
2015-03-02 21:15:58 +03:00
if ( ! rxdesc )
2020-09-20 14:23:18 +03:00
return - ENOMEM ;
2015-03-02 21:15:58 +03:00
2014-10-28 19:25:02 +03:00
rxdesc - > callback = dw_spi_dma_rx_done ;
2010-12-24 08:59:11 +03:00
rxdesc - > callback_param = dws ;
2020-09-20 14:23:17 +03:00
cookie = dmaengine_submit ( rxdesc ) ;
ret = dma_submit_error ( cookie ) ;
if ( ret ) {
dmaengine_terminate_sync ( dws - > rxchan ) ;
2020-09-20 14:23:18 +03:00
return ret ;
2020-09-20 14:23:17 +03:00
}
2021-11-15 21:19:13 +03:00
set_bit ( DW_SPI_RX_BUSY , & dws - > dma_chan_busy ) ;
2020-09-20 14:23:16 +03:00
2020-09-20 14:23:18 +03:00
return 0 ;
2014-10-28 19:25:01 +03:00
}
2020-05-29 16:12:02 +03:00
static int dw_spi_dma_setup ( struct dw_spi * dws , struct spi_transfer * xfer )
2014-10-28 19:25:01 +03:00
{
2020-09-20 14:23:13 +03:00
u16 imr , dma_ctrl ;
2020-09-20 14:23:14 +03:00
int ret ;
2014-10-28 19:25:01 +03:00
2020-09-20 14:23:13 +03:00
if ( ! xfer - > tx_buf )
return - EINVAL ;
2020-09-20 14:23:14 +03:00
/* Setup DMA channels */
ret = dw_spi_dma_config_tx ( dws ) ;
if ( ret )
return ret ;
if ( xfer - > rx_buf ) {
ret = dw_spi_dma_config_rx ( dws ) ;
if ( ret )
return ret ;
}
2020-09-20 14:23:13 +03:00
/* Set the DMA handshaking interface */
2021-11-15 21:19:13 +03:00
dma_ctrl = DW_SPI_DMACR_TDMAE ;
2020-05-29 21:31:50 +03:00
if ( xfer - > rx_buf )
2021-11-15 21:19:13 +03:00
dma_ctrl | = DW_SPI_DMACR_RDMAE ;
2015-03-12 22:19:31 +03:00
dw_writel ( dws , DW_SPI_DMACR , dma_ctrl ) ;
2014-10-28 19:25:01 +03:00
2015-03-09 17:48:47 +03:00
/* Set the interrupt mask */
2021-11-15 21:19:13 +03:00
imr = DW_SPI_INT_TXOI ;
2020-05-29 21:31:50 +03:00
if ( xfer - > rx_buf )
2021-11-15 21:19:13 +03:00
imr | = DW_SPI_INT_RXUI | DW_SPI_INT_RXOI ;
dw_spi_umask_intr ( dws , imr ) ;
2015-03-09 17:48:47 +03:00
2020-05-29 16:11:52 +03:00
reinit_completion ( & dws - > dma_completion ) ;
2020-05-29 16:12:02 +03:00
dws - > transfer_handler = dw_spi_dma_transfer_handler ;
2015-03-09 17:48:47 +03:00
2015-03-09 17:48:46 +03:00
return 0 ;
2014-10-28 19:25:01 +03:00
}
2020-09-20 14:23:19 +03:00
static int dw_spi_dma_transfer_all ( struct dw_spi * dws ,
struct spi_transfer * xfer )
2014-10-28 19:25:01 +03:00
{
2020-05-29 16:11:52 +03:00
int ret ;
2014-10-28 19:25:01 +03:00
2020-09-20 14:23:16 +03:00
/* Submit the DMA Tx transfer */
2020-09-20 14:23:21 +03:00
ret = dw_spi_dma_submit_tx ( dws , xfer - > tx_sg . sgl , xfer - > tx_sg . nents ) ;
2020-09-20 14:23:18 +03:00
if ( ret )
2020-09-20 14:23:20 +03:00
goto err_clear_dmac ;
2014-10-28 19:25:01 +03:00
2020-09-20 14:23:16 +03:00
/* Submit the DMA Rx transfer if required */
2020-09-20 14:23:15 +03:00
if ( xfer - > rx_buf ) {
2020-09-20 14:23:21 +03:00
ret = dw_spi_dma_submit_rx ( dws , xfer - > rx_sg . sgl ,
xfer - > rx_sg . nents ) ;
2020-09-20 14:23:18 +03:00
if ( ret )
2020-09-20 14:23:20 +03:00
goto err_clear_dmac ;
2014-10-28 19:25:01 +03:00
2020-09-20 14:23:15 +03:00
/* rx must be started before tx due to spi instinct */
2014-10-28 19:25:02 +03:00
dma_async_issue_pending ( dws - > rxchan ) ;
}
2020-09-20 14:23:13 +03:00
dma_async_issue_pending ( dws - > txchan ) ;
2014-10-02 17:31:09 +04:00
2020-09-20 14:23:21 +03:00
ret = dw_spi_dma_wait ( dws , xfer - > len , xfer - > effective_speed_hz ) ;
2020-09-20 14:23:20 +03:00
err_clear_dmac :
dw_writel ( dws , DW_SPI_DMACR , 0 ) ;
return ret ;
2020-09-20 14:23:19 +03:00
}
2020-09-20 14:23:22 +03:00
/*
* In case if at least one of the requested DMA channels doesn ' t support the
* hardware accelerated SG list entries traverse , the DMA driver will most
* likely work that around by performing the IRQ - based SG list entries
* resubmission . That might and will cause a problem if the DMA Tx channel is
* recharged and re - executed before the Rx DMA channel . Due to
* non - deterministic IRQ - handler execution latency the DMA Tx channel will
* start pushing data to the SPI bus before the Rx DMA channel is even
* reinitialized with the next inbound SG list entry . By doing so the DMA Tx
* channel will implicitly start filling the DW APB SSI Rx FIFO up , which while
* the DMA Rx channel being recharged and re - executed will eventually be
* overflown .
*
* In order to solve the problem we have to feed the DMA engine with SG list
* entries one - by - one . It shall keep the DW APB SSI Tx and Rx FIFOs
* synchronized and prevent the Rx FIFO overflow . Since in general the tx_sg
* and rx_sg lists may have different number of entries of different lengths
* ( though total length should match ) let ' s virtually split the SG - lists to the
* set of DMA transfers , which length is a minimum of the ordered SG - entries
* lengths . An ASCII - sketch of the implemented algo is following :
* xfer - > len
* | ___________ |
* tx_sg list : | ___ | ____ | __ |
* rx_sg list : | _ | ____ | ____ |
* DMA transfers : | _ | _ | __ | _ | __ |
*
* Note in order to have this workaround solving the denoted problem the DMA
* engine driver should properly initialize the max_sg_burst capability and set
* the DMA device max segment size parameter with maximum data block size the
* DMA engine supports .
*/
static int dw_spi_dma_transfer_one ( struct dw_spi * dws ,
struct spi_transfer * xfer )
{
struct scatterlist * tx_sg = NULL , * rx_sg = NULL , tx_tmp , rx_tmp ;
unsigned int tx_len = 0 , rx_len = 0 ;
unsigned int base , len ;
int ret ;
sg_init_table ( & tx_tmp , 1 ) ;
sg_init_table ( & rx_tmp , 1 ) ;
for ( base = 0 , len = 0 ; base < xfer - > len ; base + = len ) {
/* Fetch next Tx DMA data chunk */
if ( ! tx_len ) {
tx_sg = ! tx_sg ? & xfer - > tx_sg . sgl [ 0 ] : sg_next ( tx_sg ) ;
sg_dma_address ( & tx_tmp ) = sg_dma_address ( tx_sg ) ;
tx_len = sg_dma_len ( tx_sg ) ;
}
/* Fetch next Rx DMA data chunk */
if ( ! rx_len ) {
rx_sg = ! rx_sg ? & xfer - > rx_sg . sgl [ 0 ] : sg_next ( rx_sg ) ;
sg_dma_address ( & rx_tmp ) = sg_dma_address ( rx_sg ) ;
rx_len = sg_dma_len ( rx_sg ) ;
}
len = min ( tx_len , rx_len ) ;
sg_dma_len ( & tx_tmp ) = len ;
sg_dma_len ( & rx_tmp ) = len ;
/* Submit DMA Tx transfer */
ret = dw_spi_dma_submit_tx ( dws , & tx_tmp , 1 ) ;
if ( ret )
break ;
/* Submit DMA Rx transfer */
ret = dw_spi_dma_submit_rx ( dws , & rx_tmp , 1 ) ;
if ( ret )
break ;
/* Rx must be started before Tx due to SPI instinct */
dma_async_issue_pending ( dws - > rxchan ) ;
dma_async_issue_pending ( dws - > txchan ) ;
/*
* Here we only need to wait for the DMA transfer to be
* finished since SPI controller is kept enabled during the
* procedure this loop implements and there is no risk to lose
* data left in the Tx / Rx FIFOs .
*/
ret = dw_spi_dma_wait ( dws , len , xfer - > effective_speed_hz ) ;
if ( ret )
break ;
reinit_completion ( & dws - > dma_completion ) ;
sg_dma_address ( & tx_tmp ) + = len ;
sg_dma_address ( & rx_tmp ) + = len ;
tx_len - = len ;
rx_len - = len ;
}
dw_writel ( dws , DW_SPI_DMACR , 0 ) ;
return ret ;
}
2020-09-20 14:23:19 +03:00
static int dw_spi_dma_transfer ( struct dw_spi * dws , struct spi_transfer * xfer )
{
2020-09-20 14:23:22 +03:00
unsigned int nents ;
2020-09-20 14:23:19 +03:00
int ret ;
2020-09-20 14:23:22 +03:00
nents = max ( xfer - > tx_sg . nents , xfer - > rx_sg . nents ) ;
/*
* Execute normal DMA - based transfer ( which submits the Rx and Tx SG
* lists directly to the DMA engine at once ) if either full hardware
* accelerated SG list traverse is supported by both channels , or the
* Tx - only SPI transfer is requested , or the DMA engine is capable to
* handle both SG lists on hardware accelerated basis .
*/
if ( ! dws - > dma_sg_burst | | ! xfer - > rx_buf | | nents < = dws - > dma_sg_burst )
ret = dw_spi_dma_transfer_all ( dws , xfer ) ;
else
ret = dw_spi_dma_transfer_one ( dws , xfer ) ;
2020-05-29 16:11:52 +03:00
if ( ret )
return ret ;
2020-09-20 14:23:13 +03:00
if ( dws - > master - > cur_msg - > status = = - EINPROGRESS ) {
2020-05-29 16:11:53 +03:00
ret = dw_spi_dma_wait_tx_done ( dws , xfer ) ;
if ( ret )
return ret ;
}
2020-09-20 14:23:15 +03:00
if ( xfer - > rx_buf & & dws - > master - > cur_msg - > status = = - EINPROGRESS )
2020-05-29 16:11:54 +03:00
ret = dw_spi_dma_wait_rx_done ( dws ) ;
return ret ;
2010-12-24 08:59:11 +03:00
}
2020-05-29 16:12:02 +03:00
static void dw_spi_dma_stop ( struct dw_spi * dws )
2015-03-09 17:48:48 +03:00
{
2021-11-15 21:19:13 +03:00
if ( test_bit ( DW_SPI_TX_BUSY , & dws - > dma_chan_busy ) ) {
2017-01-03 16:48:20 +03:00
dmaengine_terminate_sync ( dws - > txchan ) ;
2021-11-15 21:19:13 +03:00
clear_bit ( DW_SPI_TX_BUSY , & dws - > dma_chan_busy ) ;
2015-03-09 17:48:48 +03:00
}
2021-11-15 21:19:13 +03:00
if ( test_bit ( DW_SPI_RX_BUSY , & dws - > dma_chan_busy ) ) {
2017-01-03 16:48:20 +03:00
dmaengine_terminate_sync ( dws - > rxchan ) ;
2021-11-15 21:19:13 +03:00
clear_bit ( DW_SPI_RX_BUSY , & dws - > dma_chan_busy ) ;
2015-03-09 17:48:48 +03:00
}
}
2020-05-29 16:12:02 +03:00
static const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = {
. dma_init = dw_spi_dma_init_mfld ,
. dma_exit = dw_spi_dma_exit ,
. dma_setup = dw_spi_dma_setup ,
. can_dma = dw_spi_can_dma ,
. dma_transfer = dw_spi_dma_transfer ,
. dma_stop = dw_spi_dma_stop ,
2010-12-24 08:59:11 +03:00
} ;
2020-05-06 18:30:23 +03:00
2020-05-29 16:12:02 +03:00
void dw_spi_dma_setup_mfld ( struct dw_spi * dws )
2020-05-06 18:30:23 +03:00
{
2020-05-29 16:12:02 +03:00
dws - > dma_ops = & dw_spi_dma_mfld_ops ;
2020-05-06 18:30:23 +03:00
}
2021-11-15 21:19:11 +03:00
EXPORT_SYMBOL_NS_GPL ( dw_spi_dma_setup_mfld , SPI_DW_CORE ) ;
2020-05-29 16:12:02 +03:00
static const struct dw_spi_dma_ops dw_spi_dma_generic_ops = {
. dma_init = dw_spi_dma_init_generic ,
. dma_exit = dw_spi_dma_exit ,
. dma_setup = dw_spi_dma_setup ,
. can_dma = dw_spi_can_dma ,
. dma_transfer = dw_spi_dma_transfer ,
. dma_stop = dw_spi_dma_stop ,
2020-05-06 18:30:25 +03:00
} ;
2020-05-29 16:12:02 +03:00
void dw_spi_dma_setup_generic ( struct dw_spi * dws )
2020-05-06 18:30:25 +03:00
{
2020-05-29 16:12:02 +03:00
dws - > dma_ops = & dw_spi_dma_generic_ops ;
2020-05-06 18:30:25 +03:00
}
2021-11-15 21:19:11 +03:00
EXPORT_SYMBOL_NS_GPL ( dw_spi_dma_setup_generic , SPI_DW_CORE ) ;