2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2013-01-22 14:26:29 +04:00
/*
* PXA2xx SPI DMA engine support .
*
2021-05-17 17:03:49 +03:00
* Copyright ( C ) 2013 , 2021 Intel Corporation
2013-01-22 14:26:29 +04:00
* Author : Mika Westerberg < mika . westerberg @ linux . intel . com >
*/
# include <linux/device.h>
# include <linux/dma-mapping.h>
# include <linux/dmaengine.h>
# include <linux/scatterlist.h>
# include <linux/sizes.h>
2021-04-23 21:24:31 +03:00
2013-01-22 14:26:29 +04:00
# include <linux/spi/pxa2xx_spi.h>
2021-04-23 21:24:31 +03:00
# include <linux/spi/spi.h>
2013-01-22 14:26:29 +04:00
# include "spi-pxa2xx.h"
static void pxa2xx_spi_dma_transfer_complete ( struct driver_data * drv_data ,
bool error )
{
2019-01-16 18:13:31 +03:00
struct spi_message * msg = drv_data - > controller - > cur_msg ;
2013-01-22 14:26:29 +04:00
/*
* It is possible that one CPU is handling ROR interrupt and other
* just gets DMA completion . Calling pump_transfers ( ) twice for the
* same transfer leads to problems thus we prevent concurrent calls
2021-05-17 17:03:49 +03:00
* by using dma_running .
2013-01-22 14:26:29 +04:00
*/
if ( atomic_dec_and_test ( & drv_data - > dma_running ) ) {
/*
* If the other CPU is still handling the ROR interrupt we
* might not know about the error yet . So we re - check the
* ROR bit here before we clear the status register .
*/
2021-05-10 15:41:32 +03:00
if ( ! error )
error = read_SSSR_bits ( drv_data , drv_data - > mask_sr ) & SSSR_ROR ;
2013-01-22 14:26:29 +04:00
/* Clear status & disable interrupts */
2021-05-10 15:41:31 +03:00
clear_SSCR1_bits ( drv_data , drv_data - > dma_cr1 ) ;
2013-01-22 14:26:29 +04:00
write_SSSR_CS ( drv_data , drv_data - > clear_sr ) ;
if ( ! pxa25x_ssp_comp ( drv_data ) )
2014-12-18 16:04:23 +03:00
pxa2xx_spi_write ( drv_data , SSTO , 0 ) ;
2013-01-22 14:26:29 +04:00
2018-04-17 17:20:02 +03:00
if ( error ) {
2013-01-22 14:26:29 +04:00
/* In case we got an error we disable the SSP now */
2021-05-10 15:41:29 +03:00
pxa_ssp_disable ( drv_data - > ssp ) ;
2018-04-17 17:20:02 +03:00
msg - > status = - EIO ;
2013-01-22 14:26:29 +04:00
}
2019-01-16 18:13:31 +03:00
spi_finalize_current_transfer ( drv_data - > controller ) ;
2013-01-22 14:26:29 +04:00
}
}
static void pxa2xx_spi_dma_callback ( void * data )
{
pxa2xx_spi_dma_transfer_complete ( data , false ) ;
}
static struct dma_async_tx_descriptor *
pxa2xx_spi_dma_prepare_one ( struct driver_data * drv_data ,
2018-04-17 17:20:02 +03:00
enum dma_transfer_direction dir ,
struct spi_transfer * xfer )
2013-01-22 14:26:29 +04:00
{
2016-09-07 17:04:07 +03:00
struct chip_data * chip =
2019-01-16 18:13:31 +03:00
spi_get_ctldata ( drv_data - > controller - > cur_msg - > spi ) ;
2013-01-22 14:26:29 +04:00
enum dma_slave_buswidth width ;
struct dma_slave_config cfg ;
struct dma_chan * chan ;
struct sg_table * sgt ;
2016-06-21 13:21:34 +03:00
int ret ;
2013-01-22 14:26:29 +04:00
switch ( drv_data - > n_bytes ) {
case 1 :
width = DMA_SLAVE_BUSWIDTH_1_BYTE ;
break ;
case 2 :
width = DMA_SLAVE_BUSWIDTH_2_BYTES ;
break ;
default :
width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
break ;
}
memset ( & cfg , 0 , sizeof ( cfg ) ) ;
cfg . direction = dir ;
if ( dir = = DMA_MEM_TO_DEV ) {
2021-04-23 21:24:29 +03:00
cfg . dst_addr = drv_data - > ssp - > phys_base + SSDR ;
2013-01-22 14:26:29 +04:00
cfg . dst_addr_width = width ;
cfg . dst_maxburst = chip - > dma_burst_size ;
2016-06-21 13:21:34 +03:00
sgt = & xfer - > tx_sg ;
2019-01-16 18:13:31 +03:00
chan = drv_data - > controller - > dma_tx ;
2013-01-22 14:26:29 +04:00
} else {
2021-04-23 21:24:29 +03:00
cfg . src_addr = drv_data - > ssp - > phys_base + SSDR ;
2013-01-22 14:26:29 +04:00
cfg . src_addr_width = width ;
cfg . src_maxburst = chip - > dma_burst_size ;
2016-06-21 13:21:34 +03:00
sgt = & xfer - > rx_sg ;
2019-01-16 18:13:31 +03:00
chan = drv_data - > controller - > dma_rx ;
2013-01-22 14:26:29 +04:00
}
ret = dmaengine_slave_config ( chan , & cfg ) ;
if ( ret ) {
2021-04-23 21:24:30 +03:00
dev_warn ( drv_data - > ssp - > dev , " DMA slave config failed \n " ) ;
2013-01-22 14:26:29 +04:00
return NULL ;
}
2016-06-21 13:21:34 +03:00
return dmaengine_prep_slave_sg ( chan , sgt - > sgl , sgt - > nents , dir ,
2013-01-22 14:26:29 +04:00
DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
}
irqreturn_t pxa2xx_spi_dma_transfer ( struct driver_data * drv_data )
{
u32 status ;
2021-05-10 15:41:32 +03:00
status = read_SSSR_bits ( drv_data , drv_data - > mask_sr ) ;
2013-01-22 14:26:29 +04:00
if ( status & SSSR_ROR ) {
2021-04-23 21:24:30 +03:00
dev_err ( drv_data - > ssp - > dev , " FIFO overrun \n " ) ;
2013-01-22 14:26:29 +04:00
2019-01-16 18:13:31 +03:00
dmaengine_terminate_async ( drv_data - > controller - > dma_rx ) ;
dmaengine_terminate_async ( drv_data - > controller - > dma_tx ) ;
2013-01-22 14:26:29 +04:00
pxa2xx_spi_dma_transfer_complete ( drv_data , true ) ;
return IRQ_HANDLED ;
}
return IRQ_NONE ;
}
2018-04-17 17:20:02 +03:00
int pxa2xx_spi_dma_prepare ( struct driver_data * drv_data ,
struct spi_transfer * xfer )
2013-01-22 14:26:29 +04:00
{
struct dma_async_tx_descriptor * tx_desc , * rx_desc ;
2016-09-07 17:04:05 +03:00
int err ;
2013-01-22 14:26:29 +04:00
2018-04-17 17:20:02 +03:00
tx_desc = pxa2xx_spi_dma_prepare_one ( drv_data , DMA_MEM_TO_DEV , xfer ) ;
2013-01-22 14:26:29 +04:00
if ( ! tx_desc ) {
2021-04-23 21:24:30 +03:00
dev_err ( drv_data - > ssp - > dev , " failed to get DMA TX descriptor \n " ) ;
2016-03-24 16:35:42 +03:00
err = - EBUSY ;
goto err_tx ;
2013-01-22 14:26:29 +04:00
}
2018-04-17 17:20:02 +03:00
rx_desc = pxa2xx_spi_dma_prepare_one ( drv_data , DMA_DEV_TO_MEM , xfer ) ;
2013-01-22 14:26:29 +04:00
if ( ! rx_desc ) {
2021-04-23 21:24:30 +03:00
dev_err ( drv_data - > ssp - > dev , " failed to get DMA RX descriptor \n " ) ;
2016-03-24 16:35:42 +03:00
err = - EBUSY ;
goto err_rx ;
2013-01-22 14:26:29 +04:00
}
/* We are ready when RX completes */
rx_desc - > callback = pxa2xx_spi_dma_callback ;
rx_desc - > callback_param = drv_data ;
dmaengine_submit ( rx_desc ) ;
dmaengine_submit ( tx_desc ) ;
return 0 ;
2016-03-24 16:35:42 +03:00
err_rx :
2019-01-16 18:13:31 +03:00
dmaengine_terminate_async ( drv_data - > controller - > dma_tx ) ;
2016-03-24 16:35:42 +03:00
err_tx :
return err ;
2013-01-22 14:26:29 +04:00
}
void pxa2xx_spi_dma_start ( struct driver_data * drv_data )
{
2019-01-16 18:13:31 +03:00
dma_async_issue_pending ( drv_data - > controller - > dma_rx ) ;
dma_async_issue_pending ( drv_data - > controller - > dma_tx ) ;
2013-01-22 14:26:29 +04:00
atomic_set ( & drv_data - > dma_running , 1 ) ;
}
2018-04-17 17:20:02 +03:00
void pxa2xx_spi_dma_stop ( struct driver_data * drv_data )
{
atomic_set ( & drv_data - > dma_running , 0 ) ;
2019-01-16 18:13:31 +03:00
dmaengine_terminate_sync ( drv_data - > controller - > dma_rx ) ;
dmaengine_terminate_sync ( drv_data - > controller - > dma_tx ) ;
2018-04-17 17:20:02 +03:00
}
2013-01-22 14:26:29 +04:00
int pxa2xx_spi_dma_setup ( struct driver_data * drv_data )
{
2019-01-16 18:13:31 +03:00
struct pxa2xx_spi_controller * pdata = drv_data - > controller_info ;
struct spi_controller * controller = drv_data - > controller ;
2021-04-23 21:24:30 +03:00
struct device * dev = drv_data - > ssp - > dev ;
2013-01-22 14:26:29 +04:00
dma_cap_mask_t mask ;
dma_cap_zero ( mask ) ;
dma_cap_set ( DMA_SLAVE , mask ) ;
2019-01-16 18:13:31 +03:00
controller - > dma_tx = dma_request_slave_channel_compat ( mask ,
2014-08-19 21:29:19 +04:00
pdata - > dma_filter , pdata - > tx_param , dev , " tx " ) ;
2019-01-16 18:13:31 +03:00
if ( ! controller - > dma_tx )
2013-01-22 14:26:29 +04:00
return - ENODEV ;
2019-01-16 18:13:31 +03:00
controller - > dma_rx = dma_request_slave_channel_compat ( mask ,
2014-08-19 21:29:19 +04:00
pdata - > dma_filter , pdata - > rx_param , dev , " rx " ) ;
2019-01-16 18:13:31 +03:00
if ( ! controller - > dma_rx ) {
dma_release_channel ( controller - > dma_tx ) ;
controller - > dma_tx = NULL ;
2013-01-22 14:26:29 +04:00
return - ENODEV ;
}
return 0 ;
}
void pxa2xx_spi_dma_release ( struct driver_data * drv_data )
{
2019-01-16 18:13:31 +03:00
struct spi_controller * controller = drv_data - > controller ;
2016-06-21 13:21:34 +03:00
2019-01-16 18:13:31 +03:00
if ( controller - > dma_rx ) {
dmaengine_terminate_sync ( controller - > dma_rx ) ;
dma_release_channel ( controller - > dma_rx ) ;
controller - > dma_rx = NULL ;
2013-01-22 14:26:29 +04:00
}
2019-01-16 18:13:31 +03:00
if ( controller - > dma_tx ) {
dmaengine_terminate_sync ( controller - > dma_tx ) ;
dma_release_channel ( controller - > dma_tx ) ;
controller - > dma_tx = NULL ;
2013-01-22 14:26:29 +04:00
}
}
int pxa2xx_spi_set_dma_burst_and_threshold ( struct chip_data * chip ,
struct spi_device * spi ,
u8 bits_per_word , u32 * burst_code ,
u32 * threshold )
{
struct pxa2xx_spi_chip * chip_info = spi - > controller_data ;
2019-03-19 18:48:42 +03:00
struct driver_data * drv_data = spi_controller_get_devdata ( spi - > controller ) ;
u32 dma_burst_size = drv_data - > controller_info - > dma_burst_size ;
2013-01-22 14:26:29 +04:00
/*
* If the DMA burst size is given in chip_info we use that ,
* otherwise we use the default . Also we use the default FIFO
* thresholds for now .
*/
2019-03-19 18:48:42 +03:00
* burst_code = chip_info ? chip_info - > dma_burst_size : dma_burst_size ;
2013-01-22 14:26:29 +04:00
* threshold = SSCR1_RxTresh ( RX_THRESH_DFLT )
| SSCR1_TxTresh ( TX_THRESH_DFLT ) ;
return 0 ;
}