2019-05-29 17:18:02 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2010-12-24 08:59:11 +03:00
/*
2020-05-29 16:11:59 +03:00
* Special handling for DW DMA core
2010-12-24 08:59:11 +03:00
*
2014-09-12 16:12:01 +04:00
* Copyright ( c ) 2009 , 2014 Intel Corporation .
2010-12-24 08:59:11 +03:00
*/
2020-05-29 16:11:52 +03:00
# include <linux/completion.h>
2020-05-06 18:30:22 +03:00
# include <linux/dma-mapping.h>
# include <linux/dmaengine.h>
2020-05-06 18:30:21 +03:00
# include <linux/irqreturn.h>
2020-05-29 16:11:52 +03:00
# include <linux/jiffies.h>
2010-12-24 08:59:11 +03:00
# include <linux/pci.h>
2015-03-09 17:48:50 +03:00
# include <linux/platform_data/dma-dw.h>
2020-05-29 16:11:59 +03:00
# include <linux/spi/spi.h>
# include <linux/types.h>
# include "spi-dw.h"
2010-12-24 08:59:11 +03:00
2020-05-29 16:11:53 +03:00
# define WAIT_RETRIES 5
2014-10-28 19:25:02 +03:00
# define RX_BUSY 0
2020-05-29 16:11:55 +03:00
# define RX_BURST_LEVEL 16
2014-10-28 19:25:02 +03:00
# define TX_BUSY 1
2020-05-29 16:11:55 +03:00
# define TX_BURST_LEVEL 16
2014-10-28 19:25:02 +03:00
2020-05-29 16:12:02 +03:00
static bool dw_spi_dma_chan_filter ( struct dma_chan * chan , void * param )
2010-12-24 08:59:11 +03:00
{
2015-03-09 17:48:50 +03:00
struct dw_dma_slave * s = param ;
if ( s - > dma_dev ! = chan - > device - > dev )
return false ;
2010-12-24 08:59:11 +03:00
2015-03-09 17:48:50 +03:00
chan - > private = s ;
return true ;
2010-12-24 08:59:11 +03:00
}
2020-05-29 16:12:02 +03:00
static void dw_spi_dma_maxburst_init ( struct dw_spi * dws )
2020-05-29 16:11:56 +03:00
{
struct dma_slave_caps caps ;
u32 max_burst , def_burst ;
int ret ;
def_burst = dws - > fifo_len / 2 ;
ret = dma_get_slave_caps ( dws - > rxchan , & caps ) ;
if ( ! ret & & caps . max_burst )
max_burst = caps . max_burst ;
else
max_burst = RX_BURST_LEVEL ;
dws - > rxburst = min ( max_burst , def_burst ) ;
2020-09-20 14:23:12 +03:00
dw_writel ( dws , DW_SPI_DMARDLR , dws - > rxburst - 1 ) ;
2020-05-29 16:11:56 +03:00
ret = dma_get_slave_caps ( dws - > txchan , & caps ) ;
if ( ! ret & & caps . max_burst )
max_burst = caps . max_burst ;
else
max_burst = TX_BURST_LEVEL ;
2020-09-20 14:23:12 +03:00
/*
* Having a Rx DMA channel serviced with higher priority than a Tx DMA
* channel might not be enough to provide a well balanced DMA - based
* SPI transfer interface . There might still be moments when the Tx DMA
* channel is occasionally handled faster than the Rx DMA channel .
* That in its turn will eventually cause the SPI Rx FIFO overflow if
* SPI bus speed is high enough to fill the SPI Rx FIFO in before it ' s
* cleared by the Rx DMA channel . In order to fix the problem the Tx
* DMA activity is intentionally slowed down by limiting the SPI Tx
* FIFO depth with a value twice bigger than the Tx burst length .
*/
2020-05-29 16:11:56 +03:00
dws - > txburst = min ( max_burst , def_burst ) ;
2020-09-20 14:23:12 +03:00
dw_writel ( dws , DW_SPI_DMATDLR , dws - > txburst ) ;
2020-05-29 16:11:56 +03:00
}
2020-05-29 16:12:02 +03:00
static int dw_spi_dma_init_mfld ( struct device * dev , struct dw_spi * dws )
2010-12-24 08:59:11 +03:00
{
2020-05-29 21:31:49 +03:00
struct dw_dma_slave dma_tx = { . dst_id = 1 } , * tx = & dma_tx ;
struct dw_dma_slave dma_rx = { . src_id = 0 } , * rx = & dma_rx ;
2014-09-12 16:12:00 +04:00
struct pci_dev * dma_dev ;
2010-12-24 08:59:11 +03:00
dma_cap_mask_t mask ;
/*
* Get pci device for DMA controller , currently it could only
2014-09-12 16:11:59 +04:00
* be the DMA controller of Medfield
2010-12-24 08:59:11 +03:00
*/
2014-09-12 16:12:00 +04:00
dma_dev = pci_get_device ( PCI_VENDOR_ID_INTEL , 0x0827 , NULL ) ;
if ( ! dma_dev )
return - ENODEV ;
2010-12-24 08:59:11 +03:00
dma_cap_zero ( mask ) ;
dma_cap_set ( DMA_SLAVE , mask ) ;
/* 1. Init rx channel */
2020-05-29 21:31:49 +03:00
rx - > dma_dev = & dma_dev - > dev ;
dws - > rxchan = dma_request_channel ( mask , dw_spi_dma_chan_filter , rx ) ;
2010-12-24 08:59:11 +03:00
if ( ! dws - > rxchan )
goto err_exit ;
/* 2. Init tx channel */
2020-05-29 21:31:49 +03:00
tx - > dma_dev = & dma_dev - > dev ;
dws - > txchan = dma_request_channel ( mask , dw_spi_dma_chan_filter , tx ) ;
2010-12-24 08:59:11 +03:00
if ( ! dws - > txchan )
goto free_rxchan ;
2020-05-07 14:54:49 +03:00
dws - > master - > dma_rx = dws - > rxchan ;
2015-03-09 17:48:49 +03:00
dws - > master - > dma_tx = dws - > txchan ;
2010-12-24 08:59:11 +03:00
2020-05-29 16:11:52 +03:00
init_completion ( & dws - > dma_completion ) ;
2020-05-29 16:12:02 +03:00
dw_spi_dma_maxburst_init ( dws ) ;
2020-05-29 16:11:56 +03:00
2010-12-24 08:59:11 +03:00
return 0 ;
free_rxchan :
dma_release_channel ( dws - > rxchan ) ;
2020-05-07 14:54:49 +03:00
dws - > rxchan = NULL ;
2010-12-24 08:59:11 +03:00
err_exit :
2014-09-12 16:12:00 +04:00
return - EBUSY ;
2010-12-24 08:59:11 +03:00
}
2020-05-29 16:12:02 +03:00
static int dw_spi_dma_init_generic ( struct device * dev , struct dw_spi * dws )
2020-05-06 18:30:25 +03:00
{
dws - > rxchan = dma_request_slave_channel ( dev , " rx " ) ;
if ( ! dws - > rxchan )
return - ENODEV ;
dws - > txchan = dma_request_slave_channel ( dev , " tx " ) ;
if ( ! dws - > txchan ) {
dma_release_channel ( dws - > rxchan ) ;
2020-05-07 14:54:49 +03:00
dws - > rxchan = NULL ;
2020-05-06 18:30:25 +03:00
return - ENODEV ;
}
2020-05-07 14:54:49 +03:00
dws - > master - > dma_rx = dws - > rxchan ;
2020-05-06 18:30:25 +03:00
dws - > master - > dma_tx = dws - > txchan ;
2020-05-29 16:11:52 +03:00
init_completion ( & dws - > dma_completion ) ;
2020-05-29 16:12:02 +03:00
dw_spi_dma_maxburst_init ( dws ) ;
2020-05-29 16:11:56 +03:00
2020-05-06 18:30:25 +03:00
return 0 ;
}
2020-05-29 16:12:02 +03:00
static void dw_spi_dma_exit ( struct dw_spi * dws )
2010-12-24 08:59:11 +03:00
{
2020-05-07 14:54:49 +03:00
if ( dws - > txchan ) {
dmaengine_terminate_sync ( dws - > txchan ) ;
dma_release_channel ( dws - > txchan ) ;
}
2014-09-18 21:08:53 +04:00
2020-05-07 14:54:49 +03:00
if ( dws - > rxchan ) {
dmaengine_terminate_sync ( dws - > rxchan ) ;
dma_release_channel ( dws - > rxchan ) ;
}
2010-12-24 08:59:11 +03:00
}
2020-05-29 16:12:02 +03:00
static irqreturn_t dw_spi_dma_transfer_handler ( struct dw_spi * dws )
2015-03-09 17:48:47 +03:00
{
2015-03-12 22:19:31 +03:00
u16 irq_status = dw_readl ( dws , DW_SPI_ISR ) ;
2015-03-09 17:48:47 +03:00
if ( ! irq_status )
return IRQ_NONE ;
2015-03-12 22:19:31 +03:00
dw_readl ( dws , DW_SPI_ICR ) ;
2015-03-09 17:48:47 +03:00
spi_reset_chip ( dws ) ;
dev_err ( & dws - > master - > dev , " %s: FIFO overrun/underrun \n " , __func__ ) ;
dws - > master - > cur_msg - > status = - EIO ;
2020-05-29 16:11:52 +03:00
complete ( & dws - > dma_completion ) ;
2015-03-09 17:48:47 +03:00
return IRQ_HANDLED ;
}
2020-05-29 16:12:02 +03:00
static bool dw_spi_can_dma ( struct spi_controller * master ,
struct spi_device * spi , struct spi_transfer * xfer )
2015-03-09 17:48:49 +03:00
{
2018-02-01 18:17:29 +03:00
struct dw_spi * dws = spi_controller_get_devdata ( master ) ;
2015-03-09 17:48:49 +03:00
return xfer - > len > dws - > fifo_len ;
}
2020-05-29 16:12:02 +03:00
static enum dma_slave_buswidth dw_spi_dma_convert_width ( u8 n_bytes )
{
2020-05-22 03:07:54 +03:00
if ( n_bytes = = 1 )
2015-03-09 17:48:45 +03:00
return DMA_SLAVE_BUSWIDTH_1_BYTE ;
2020-05-22 03:07:54 +03:00
else if ( n_bytes = = 2 )
2015-03-09 17:48:45 +03:00
return DMA_SLAVE_BUSWIDTH_2_BYTES ;
return DMA_SLAVE_BUSWIDTH_UNDEFINED ;
}
2020-09-20 14:23:21 +03:00
static int dw_spi_dma_wait ( struct dw_spi * dws , unsigned int len , u32 speed )
2020-05-29 16:11:52 +03:00
{
unsigned long long ms ;
2020-09-20 14:23:21 +03:00
ms = len * MSEC_PER_SEC * BITS_PER_BYTE ;
do_div ( ms , speed ) ;
2020-05-29 16:11:52 +03:00
ms + = ms + 200 ;
if ( ms > UINT_MAX )
ms = UINT_MAX ;
ms = wait_for_completion_timeout ( & dws - > dma_completion ,
msecs_to_jiffies ( ms ) ) ;
if ( ms = = 0 ) {
dev_err ( & dws - > master - > cur_msg - > spi - > dev ,
" DMA transaction timed out \n " ) ;
return - ETIMEDOUT ;
}
return 0 ;
}
2020-05-29 16:11:53 +03:00
static inline bool dw_spi_dma_tx_busy ( struct dw_spi * dws )
{
return ! ( dw_readl ( dws , DW_SPI_SR ) & SR_TF_EMPT ) ;
}
static int dw_spi_dma_wait_tx_done ( struct dw_spi * dws ,
struct spi_transfer * xfer )
{
int retry = WAIT_RETRIES ;
struct spi_delay delay ;
u32 nents ;
nents = dw_readl ( dws , DW_SPI_TXFLR ) ;
delay . unit = SPI_DELAY_UNIT_SCK ;
delay . value = nents * dws - > n_bytes * BITS_PER_BYTE ;
while ( dw_spi_dma_tx_busy ( dws ) & & retry - - )
spi_delay_exec ( & delay , xfer ) ;
if ( retry < 0 ) {
dev_err ( & dws - > master - > dev , " Tx hanged up \n " ) ;
return - EIO ;
}
return 0 ;
}
2010-12-24 08:59:11 +03:00
/*
2014-10-28 19:25:02 +03:00
* dws - > dma_chan_busy is set before the dma transfer starts , callback for tx
* channel will clear a corresponding bit .
2010-12-24 08:59:11 +03:00
*/
2014-10-28 19:25:02 +03:00
static void dw_spi_dma_tx_done ( void * arg )
2010-12-24 08:59:11 +03:00
{
struct dw_spi * dws = arg ;
2015-03-06 15:42:01 +03:00
clear_bit ( TX_BUSY , & dws - > dma_chan_busy ) ;
if ( test_bit ( RX_BUSY , & dws - > dma_chan_busy ) )
2010-12-24 08:59:11 +03:00
return ;
2020-05-15 13:47:42 +03:00
2020-05-29 16:11:52 +03:00
complete ( & dws - > dma_completion ) ;
2010-12-24 08:59:11 +03:00
}
2020-09-20 14:23:14 +03:00
static int dw_spi_dma_config_tx ( struct dw_spi * dws )
2010-12-24 08:59:11 +03:00
{
2014-10-28 19:25:01 +03:00
struct dma_slave_config txconf ;
2010-12-24 08:59:11 +03:00
2020-05-06 18:30:18 +03:00
memset ( & txconf , 0 , sizeof ( txconf ) ) ;
2011-10-14 09:17:38 +04:00
txconf . direction = DMA_MEM_TO_DEV ;
2010-12-24 08:59:11 +03:00
txconf . dst_addr = dws - > dma_addr ;
2020-05-29 16:11:56 +03:00
txconf . dst_maxburst = dws - > txburst ;
2010-12-24 08:59:11 +03:00
txconf . src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
2020-05-29 16:12:02 +03:00
txconf . dst_addr_width = dw_spi_dma_convert_width ( dws - > n_bytes ) ;
2012-02-01 14:42:19 +04:00
txconf . device_fc = false ;
2010-12-24 08:59:11 +03:00
2020-09-20 14:23:14 +03:00
return dmaengine_slave_config ( dws - > txchan , & txconf ) ;
}
2020-09-20 14:23:21 +03:00
static int dw_spi_dma_submit_tx ( struct dw_spi * dws , struct scatterlist * sgl ,
unsigned int nents )
2020-09-20 14:23:14 +03:00
{
struct dma_async_tx_descriptor * txdesc ;
2020-09-20 14:23:17 +03:00
dma_cookie_t cookie ;
int ret ;
2010-12-24 08:59:11 +03:00
2020-09-20 14:23:21 +03:00
txdesc = dmaengine_prep_slave_sg ( dws - > txchan , sgl , nents ,
DMA_MEM_TO_DEV ,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
2015-03-02 21:15:58 +03:00
if ( ! txdesc )
2020-09-20 14:23:18 +03:00
return - ENOMEM ;
2015-03-02 21:15:58 +03:00
2014-10-28 19:25:02 +03:00
txdesc - > callback = dw_spi_dma_tx_done ;
2010-12-24 08:59:11 +03:00
txdesc - > callback_param = dws ;
2020-09-20 14:23:17 +03:00
cookie = dmaengine_submit ( txdesc ) ;
ret = dma_submit_error ( cookie ) ;
if ( ret ) {
dmaengine_terminate_sync ( dws - > txchan ) ;
2020-09-20 14:23:18 +03:00
return ret ;
2020-09-20 14:23:17 +03:00
}
2020-09-20 14:23:16 +03:00
set_bit ( TX_BUSY , & dws - > dma_chan_busy ) ;
2020-09-20 14:23:18 +03:00
return 0 ;
2014-10-28 19:25:01 +03:00
}
2020-05-29 16:11:54 +03:00
static inline bool dw_spi_dma_rx_busy ( struct dw_spi * dws )
{
return ! ! ( dw_readl ( dws , DW_SPI_SR ) & SR_RF_NOT_EMPT ) ;
}
static int dw_spi_dma_wait_rx_done ( struct dw_spi * dws )
{
int retry = WAIT_RETRIES ;
struct spi_delay delay ;
unsigned long ns , us ;
u32 nents ;
/*
* It ' s unlikely that DMA engine is still doing the data fetching , but
* if it ' s let ' s give it some reasonable time . The timeout calculation
* is based on the synchronous APB / SSI reference clock rate , on a
* number of data entries left in the Rx FIFO , times a number of clock
* periods normally needed for a single APB read / write transaction
* without PREADY signal utilized ( which is true for the DW APB SSI
* controller ) .
*/
nents = dw_readl ( dws , DW_SPI_RXFLR ) ;
ns = 4U * NSEC_PER_SEC / dws - > max_freq * nents ;
if ( ns < = NSEC_PER_USEC ) {
delay . unit = SPI_DELAY_UNIT_NSECS ;
delay . value = ns ;
} else {
us = DIV_ROUND_UP ( ns , NSEC_PER_USEC ) ;
delay . unit = SPI_DELAY_UNIT_USECS ;
delay . value = clamp_val ( us , 0 , USHRT_MAX ) ;
}
while ( dw_spi_dma_rx_busy ( dws ) & & retry - - )
spi_delay_exec ( & delay , NULL ) ;
if ( retry < 0 ) {
dev_err ( & dws - > master - > dev , " Rx hanged up \n " ) ;
return - EIO ;
}
return 0 ;
}
2014-10-28 19:25:02 +03:00
/*
* dws - > dma_chan_busy is set before the dma transfer starts , callback for rx
* channel will clear a corresponding bit .
*/
static void dw_spi_dma_rx_done ( void * arg )
{
struct dw_spi * dws = arg ;
2015-03-06 15:42:01 +03:00
clear_bit ( RX_BUSY , & dws - > dma_chan_busy ) ;
if ( test_bit ( TX_BUSY , & dws - > dma_chan_busy ) )
2014-10-28 19:25:02 +03:00
return ;
2020-05-15 13:47:42 +03:00
2020-05-29 16:11:52 +03:00
complete ( & dws - > dma_completion ) ;
2014-10-28 19:25:02 +03:00
}
2020-09-20 14:23:14 +03:00
static int dw_spi_dma_config_rx ( struct dw_spi * dws )
2014-10-28 19:25:01 +03:00
{
struct dma_slave_config rxconf ;
2014-10-28 19:25:02 +03:00
2020-05-06 18:30:18 +03:00
memset ( & rxconf , 0 , sizeof ( rxconf ) ) ;
2011-10-14 09:17:38 +04:00
rxconf . direction = DMA_DEV_TO_MEM ;
2010-12-24 08:59:11 +03:00
rxconf . src_addr = dws - > dma_addr ;
2020-05-29 16:11:56 +03:00
rxconf . src_maxburst = dws - > rxburst ;
2010-12-24 08:59:11 +03:00
rxconf . dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
2020-05-29 16:12:02 +03:00
rxconf . src_addr_width = dw_spi_dma_convert_width ( dws - > n_bytes ) ;
2012-02-01 14:42:19 +04:00
rxconf . device_fc = false ;
2010-12-24 08:59:11 +03:00
2020-09-20 14:23:14 +03:00
return dmaengine_slave_config ( dws - > rxchan , & rxconf ) ;
}
2020-09-20 14:23:21 +03:00
static int dw_spi_dma_submit_rx ( struct dw_spi * dws , struct scatterlist * sgl ,
unsigned int nents )
2020-09-20 14:23:14 +03:00
{
struct dma_async_tx_descriptor * rxdesc ;
2020-09-20 14:23:17 +03:00
dma_cookie_t cookie ;
int ret ;
2020-09-20 14:23:14 +03:00
2020-09-20 14:23:21 +03:00
rxdesc = dmaengine_prep_slave_sg ( dws - > rxchan , sgl , nents ,
DMA_DEV_TO_MEM ,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
2015-03-02 21:15:58 +03:00
if ( ! rxdesc )
2020-09-20 14:23:18 +03:00
return - ENOMEM ;
2015-03-02 21:15:58 +03:00
2014-10-28 19:25:02 +03:00
rxdesc - > callback = dw_spi_dma_rx_done ;
2010-12-24 08:59:11 +03:00
rxdesc - > callback_param = dws ;
2020-09-20 14:23:17 +03:00
cookie = dmaengine_submit ( rxdesc ) ;
ret = dma_submit_error ( cookie ) ;
if ( ret ) {
dmaengine_terminate_sync ( dws - > rxchan ) ;
2020-09-20 14:23:18 +03:00
return ret ;
2020-09-20 14:23:17 +03:00
}
2020-09-20 14:23:16 +03:00
set_bit ( RX_BUSY , & dws - > dma_chan_busy ) ;
2020-09-20 14:23:18 +03:00
return 0 ;
2014-10-28 19:25:01 +03:00
}
2020-05-29 16:12:02 +03:00
static int dw_spi_dma_setup ( struct dw_spi * dws , struct spi_transfer * xfer )
2014-10-28 19:25:01 +03:00
{
2020-09-20 14:23:13 +03:00
u16 imr , dma_ctrl ;
2020-09-20 14:23:14 +03:00
int ret ;
2014-10-28 19:25:01 +03:00
2020-09-20 14:23:13 +03:00
if ( ! xfer - > tx_buf )
return - EINVAL ;
2020-09-20 14:23:14 +03:00
/* Setup DMA channels */
ret = dw_spi_dma_config_tx ( dws ) ;
if ( ret )
return ret ;
if ( xfer - > rx_buf ) {
ret = dw_spi_dma_config_rx ( dws ) ;
if ( ret )
return ret ;
}
2020-09-20 14:23:13 +03:00
/* Set the DMA handshaking interface */
dma_ctrl = SPI_DMA_TDMAE ;
2020-05-29 21:31:50 +03:00
if ( xfer - > rx_buf )
2014-10-28 19:25:01 +03:00
dma_ctrl | = SPI_DMA_RDMAE ;
2015-03-12 22:19:31 +03:00
dw_writel ( dws , DW_SPI_DMACR , dma_ctrl ) ;
2014-10-28 19:25:01 +03:00
2015-03-09 17:48:47 +03:00
/* Set the interrupt mask */
2020-09-20 14:23:13 +03:00
imr = SPI_INT_TXOI ;
2020-05-29 21:31:50 +03:00
if ( xfer - > rx_buf )
imr | = SPI_INT_RXUI | SPI_INT_RXOI ;
2020-05-22 03:07:51 +03:00
spi_umask_intr ( dws , imr ) ;
2015-03-09 17:48:47 +03:00
2020-05-29 16:11:52 +03:00
reinit_completion ( & dws - > dma_completion ) ;
2020-05-29 16:12:02 +03:00
dws - > transfer_handler = dw_spi_dma_transfer_handler ;
2015-03-09 17:48:47 +03:00
2015-03-09 17:48:46 +03:00
return 0 ;
2014-10-28 19:25:01 +03:00
}
2020-09-20 14:23:19 +03:00
static int dw_spi_dma_transfer_all ( struct dw_spi * dws ,
struct spi_transfer * xfer )
2014-10-28 19:25:01 +03:00
{
2020-05-29 16:11:52 +03:00
int ret ;
2014-10-28 19:25:01 +03:00
2020-09-20 14:23:16 +03:00
/* Submit the DMA Tx transfer */
2020-09-20 14:23:21 +03:00
ret = dw_spi_dma_submit_tx ( dws , xfer - > tx_sg . sgl , xfer - > tx_sg . nents ) ;
2020-09-20 14:23:18 +03:00
if ( ret )
2020-09-20 14:23:20 +03:00
goto err_clear_dmac ;
2014-10-28 19:25:01 +03:00
2020-09-20 14:23:16 +03:00
/* Submit the DMA Rx transfer if required */
2020-09-20 14:23:15 +03:00
if ( xfer - > rx_buf ) {
2020-09-20 14:23:21 +03:00
ret = dw_spi_dma_submit_rx ( dws , xfer - > rx_sg . sgl ,
xfer - > rx_sg . nents ) ;
2020-09-20 14:23:18 +03:00
if ( ret )
2020-09-20 14:23:20 +03:00
goto err_clear_dmac ;
2014-10-28 19:25:01 +03:00
2020-09-20 14:23:15 +03:00
/* rx must be started before tx due to spi instinct */
2014-10-28 19:25:02 +03:00
dma_async_issue_pending ( dws - > rxchan ) ;
}
2020-09-20 14:23:13 +03:00
dma_async_issue_pending ( dws - > txchan ) ;
2014-10-02 17:31:09 +04:00
2020-09-20 14:23:21 +03:00
ret = dw_spi_dma_wait ( dws , xfer - > len , xfer - > effective_speed_hz ) ;
2020-09-20 14:23:20 +03:00
err_clear_dmac :
dw_writel ( dws , DW_SPI_DMACR , 0 ) ;
return ret ;
2020-09-20 14:23:19 +03:00
}
static int dw_spi_dma_transfer ( struct dw_spi * dws , struct spi_transfer * xfer )
{
int ret ;
ret = dw_spi_dma_transfer_all ( dws , xfer ) ;
2020-05-29 16:11:52 +03:00
if ( ret )
return ret ;
2020-09-20 14:23:13 +03:00
if ( dws - > master - > cur_msg - > status = = - EINPROGRESS ) {
2020-05-29 16:11:53 +03:00
ret = dw_spi_dma_wait_tx_done ( dws , xfer ) ;
if ( ret )
return ret ;
}
2020-09-20 14:23:15 +03:00
if ( xfer - > rx_buf & & dws - > master - > cur_msg - > status = = - EINPROGRESS )
2020-05-29 16:11:54 +03:00
ret = dw_spi_dma_wait_rx_done ( dws ) ;
return ret ;
2010-12-24 08:59:11 +03:00
}
2020-05-29 16:12:02 +03:00
static void dw_spi_dma_stop ( struct dw_spi * dws )
2015-03-09 17:48:48 +03:00
{
if ( test_bit ( TX_BUSY , & dws - > dma_chan_busy ) ) {
2017-01-03 16:48:20 +03:00
dmaengine_terminate_sync ( dws - > txchan ) ;
2015-03-09 17:48:48 +03:00
clear_bit ( TX_BUSY , & dws - > dma_chan_busy ) ;
}
if ( test_bit ( RX_BUSY , & dws - > dma_chan_busy ) ) {
2017-01-03 16:48:20 +03:00
dmaengine_terminate_sync ( dws - > rxchan ) ;
2015-03-09 17:48:48 +03:00
clear_bit ( RX_BUSY , & dws - > dma_chan_busy ) ;
}
}
2020-05-29 16:12:02 +03:00
static const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = {
. dma_init = dw_spi_dma_init_mfld ,
. dma_exit = dw_spi_dma_exit ,
. dma_setup = dw_spi_dma_setup ,
. can_dma = dw_spi_can_dma ,
. dma_transfer = dw_spi_dma_transfer ,
. dma_stop = dw_spi_dma_stop ,
2010-12-24 08:59:11 +03:00
} ;
2020-05-06 18:30:23 +03:00
2020-05-29 16:12:02 +03:00
void dw_spi_dma_setup_mfld ( struct dw_spi * dws )
2020-05-06 18:30:23 +03:00
{
2020-05-29 16:12:02 +03:00
dws - > dma_ops = & dw_spi_dma_mfld_ops ;
2020-05-06 18:30:23 +03:00
}
2020-05-29 16:12:02 +03:00
EXPORT_SYMBOL_GPL ( dw_spi_dma_setup_mfld ) ;
static const struct dw_spi_dma_ops dw_spi_dma_generic_ops = {
. dma_init = dw_spi_dma_init_generic ,
. dma_exit = dw_spi_dma_exit ,
. dma_setup = dw_spi_dma_setup ,
. can_dma = dw_spi_can_dma ,
. dma_transfer = dw_spi_dma_transfer ,
. dma_stop = dw_spi_dma_stop ,
2020-05-06 18:30:25 +03:00
} ;
2020-05-29 16:12:02 +03:00
void dw_spi_dma_setup_generic ( struct dw_spi * dws )
2020-05-06 18:30:25 +03:00
{
2020-05-29 16:12:02 +03:00
dws - > dma_ops = & dw_spi_dma_generic_ops ;
2020-05-06 18:30:25 +03:00
}
2020-05-29 16:12:02 +03:00
EXPORT_SYMBOL_GPL ( dw_spi_dma_setup_generic ) ;