2010-12-24 13:59:11 +08:00
/*
2011-06-06 01:16:30 -06:00
* Special handling for DW core on Intel MID platform
2010-12-24 13:59:11 +08:00
*
2014-09-12 15:12:01 +03:00
* Copyright ( c ) 2009 , 2014 Intel Corporation .
2010-12-24 13:59:11 +08:00
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*/
# include <linux/dma-mapping.h>
# include <linux/dmaengine.h>
# include <linux/interrupt.h>
# include <linux/slab.h>
# include <linux/spi/spi.h>
2012-02-01 16:12:19 +05:30
# include <linux/types.h>
2011-02-28 12:47:12 -07:00
2011-06-06 01:16:30 -06:00
# include "spi-dw.h"
2010-12-24 13:59:11 +08:00
# ifdef CONFIG_SPI_DW_MID_DMA
# include <linux/intel_mid_dma.h>
# include <linux/pci.h>
2014-10-28 18:25:02 +02:00
# define RX_BUSY 0
# define TX_BUSY 1
2010-12-24 13:59:11 +08:00
struct mid_dma {
struct intel_mid_dma_slave dmas_tx ;
struct intel_mid_dma_slave dmas_rx ;
} ;
static bool mid_spi_dma_chan_filter ( struct dma_chan * chan , void * param )
{
struct dw_spi * dws = param ;
2014-09-12 15:12:00 +03:00
return dws - > dma_dev = = chan - > device - > dev ;
2010-12-24 13:59:11 +08:00
}
static int mid_spi_dma_init ( struct dw_spi * dws )
{
struct mid_dma * dw_dma = dws - > dma_priv ;
2014-09-12 15:12:00 +03:00
struct pci_dev * dma_dev ;
2010-12-24 13:59:11 +08:00
struct intel_mid_dma_slave * rxs , * txs ;
dma_cap_mask_t mask ;
/*
* Get pci device for DMA controller , currently it could only
2014-09-12 15:11:59 +03:00
* be the DMA controller of Medfield
2010-12-24 13:59:11 +08:00
*/
2014-09-12 15:12:00 +03:00
dma_dev = pci_get_device ( PCI_VENDOR_ID_INTEL , 0x0827 , NULL ) ;
if ( ! dma_dev )
return - ENODEV ;
dws - > dma_dev = & dma_dev - > dev ;
2010-12-24 13:59:11 +08:00
dma_cap_zero ( mask ) ;
dma_cap_set ( DMA_SLAVE , mask ) ;
/* 1. Init rx channel */
dws - > rxchan = dma_request_channel ( mask , mid_spi_dma_chan_filter , dws ) ;
if ( ! dws - > rxchan )
goto err_exit ;
rxs = & dw_dma - > dmas_rx ;
rxs - > hs_mode = LNW_DMA_HW_HS ;
rxs - > cfg_mode = LNW_DMA_PER_TO_MEM ;
dws - > rxchan - > private = rxs ;
/* 2. Init tx channel */
dws - > txchan = dma_request_channel ( mask , mid_spi_dma_chan_filter , dws ) ;
if ( ! dws - > txchan )
goto free_rxchan ;
txs = & dw_dma - > dmas_tx ;
txs - > hs_mode = LNW_DMA_HW_HS ;
txs - > cfg_mode = LNW_DMA_MEM_TO_PER ;
dws - > txchan - > private = txs ;
dws - > dma_inited = 1 ;
return 0 ;
free_rxchan :
dma_release_channel ( dws - > rxchan ) ;
err_exit :
2014-09-12 15:12:00 +03:00
return - EBUSY ;
2010-12-24 13:59:11 +08:00
}
static void mid_spi_dma_exit ( struct dw_spi * dws )
{
2014-09-12 15:11:58 +03:00
if ( ! dws - > dma_inited )
return ;
2014-09-18 20:08:53 +03:00
dmaengine_terminate_all ( dws - > txchan ) ;
2010-12-24 13:59:11 +08:00
dma_release_channel ( dws - > txchan ) ;
2014-09-18 20:08:53 +03:00
dmaengine_terminate_all ( dws - > rxchan ) ;
2010-12-24 13:59:11 +08:00
dma_release_channel ( dws - > rxchan ) ;
}
/*
2014-10-28 18:25:02 +02:00
* dws - > dma_chan_busy is set before the dma transfer starts , callback for tx
* channel will clear a corresponding bit .
2010-12-24 13:59:11 +08:00
*/
2014-10-28 18:25:02 +02:00
static void dw_spi_dma_tx_done ( void * arg )
2010-12-24 13:59:11 +08:00
{
struct dw_spi * dws = arg ;
2014-10-28 18:25:02 +02:00
if ( test_and_clear_bit ( TX_BUSY , & dws - > dma_chan_busy ) & BIT ( RX_BUSY ) )
2010-12-24 13:59:11 +08:00
return ;
dw_spi_xfer_done ( dws ) ;
}
2014-10-28 18:25:01 +02:00
static struct dma_async_tx_descriptor * dw_spi_dma_prepare_tx ( struct dw_spi * dws )
2010-12-24 13:59:11 +08:00
{
2014-10-28 18:25:01 +02:00
struct dma_slave_config txconf ;
struct dma_async_tx_descriptor * txdesc ;
2010-12-24 13:59:11 +08:00
2014-10-28 18:25:02 +02:00
if ( ! dws - > tx_dma )
return NULL ;
2011-10-14 10:47:38 +05:30
txconf . direction = DMA_MEM_TO_DEV ;
2010-12-24 13:59:11 +08:00
txconf . dst_addr = dws - > dma_addr ;
txconf . dst_maxburst = LNW_DMA_MSIZE_16 ;
txconf . src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
2014-09-18 20:08:51 +03:00
txconf . dst_addr_width = dws - > dma_width ;
2012-02-01 16:12:19 +05:30
txconf . device_fc = false ;
2010-12-24 13:59:11 +08:00
2014-10-02 16:31:08 +03:00
dmaengine_slave_config ( dws - > txchan , & txconf ) ;
2010-12-24 13:59:11 +08:00
memset ( & dws - > tx_sgl , 0 , sizeof ( dws - > tx_sgl ) ) ;
dws - > tx_sgl . dma_address = dws - > tx_dma ;
dws - > tx_sgl . length = dws - > len ;
2014-10-02 16:31:08 +03:00
txdesc = dmaengine_prep_slave_sg ( dws - > txchan ,
2010-12-24 13:59:11 +08:00
& dws - > tx_sgl ,
1 ,
2011-10-14 10:47:38 +05:30
DMA_MEM_TO_DEV ,
2014-10-02 16:31:09 +03:00
DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
2014-10-28 18:25:02 +02:00
txdesc - > callback = dw_spi_dma_tx_done ;
2010-12-24 13:59:11 +08:00
txdesc - > callback_param = dws ;
2014-10-28 18:25:01 +02:00
return txdesc ;
}
2014-10-28 18:25:02 +02:00
/*
* dws - > dma_chan_busy is set before the dma transfer starts , callback for rx
* channel will clear a corresponding bit .
*/
static void dw_spi_dma_rx_done ( void * arg )
{
struct dw_spi * dws = arg ;
if ( test_and_clear_bit ( RX_BUSY , & dws - > dma_chan_busy ) & BIT ( TX_BUSY ) )
return ;
dw_spi_xfer_done ( dws ) ;
}
2014-10-28 18:25:01 +02:00
static struct dma_async_tx_descriptor * dw_spi_dma_prepare_rx ( struct dw_spi * dws )
{
struct dma_slave_config rxconf ;
struct dma_async_tx_descriptor * rxdesc ;
2014-10-28 18:25:02 +02:00
if ( ! dws - > rx_dma )
return NULL ;
2011-10-14 10:47:38 +05:30
rxconf . direction = DMA_DEV_TO_MEM ;
2010-12-24 13:59:11 +08:00
rxconf . src_addr = dws - > dma_addr ;
rxconf . src_maxburst = LNW_DMA_MSIZE_16 ;
rxconf . dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
2014-09-18 20:08:51 +03:00
rxconf . src_addr_width = dws - > dma_width ;
2012-02-01 16:12:19 +05:30
rxconf . device_fc = false ;
2010-12-24 13:59:11 +08:00
2014-10-02 16:31:08 +03:00
dmaengine_slave_config ( dws - > rxchan , & rxconf ) ;
2010-12-24 13:59:11 +08:00
memset ( & dws - > rx_sgl , 0 , sizeof ( dws - > rx_sgl ) ) ;
dws - > rx_sgl . dma_address = dws - > rx_dma ;
dws - > rx_sgl . length = dws - > len ;
2014-10-02 16:31:08 +03:00
rxdesc = dmaengine_prep_slave_sg ( dws - > rxchan ,
2010-12-24 13:59:11 +08:00
& dws - > rx_sgl ,
1 ,
2011-10-14 10:47:38 +05:30
DMA_DEV_TO_MEM ,
2014-10-02 16:31:09 +03:00
DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
2014-10-28 18:25:02 +02:00
rxdesc - > callback = dw_spi_dma_rx_done ;
2010-12-24 13:59:11 +08:00
rxdesc - > callback_param = dws ;
2014-10-28 18:25:01 +02:00
return rxdesc ;
}
static void dw_spi_dma_setup ( struct dw_spi * dws )
{
u16 dma_ctrl = 0 ;
spi_enable_chip ( dws , 0 ) ;
dw_writew ( dws , DW_SPI_DMARDLR , 0xf ) ;
dw_writew ( dws , DW_SPI_DMATDLR , 0x10 ) ;
if ( dws - > tx_dma )
dma_ctrl | = SPI_DMA_TDMAE ;
if ( dws - > rx_dma )
dma_ctrl | = SPI_DMA_RDMAE ;
dw_writew ( dws , DW_SPI_DMACR , dma_ctrl ) ;
spi_enable_chip ( dws , 1 ) ;
}
static int mid_spi_dma_transfer ( struct dw_spi * dws , int cs_change )
{
struct dma_async_tx_descriptor * txdesc , * rxdesc ;
/* 1. setup DMA related registers */
if ( cs_change )
dw_spi_dma_setup ( dws ) ;
/* 2. Prepare the TX dma transfer */
txdesc = dw_spi_dma_prepare_tx ( dws ) ;
/* 3. Prepare the RX dma transfer */
rxdesc = dw_spi_dma_prepare_rx ( dws ) ;
2010-12-24 13:59:11 +08:00
/* rx must be started before tx due to spi instinct */
2014-10-28 18:25:02 +02:00
if ( rxdesc ) {
set_bit ( RX_BUSY , & dws - > dma_chan_busy ) ;
dmaengine_submit ( rxdesc ) ;
dma_async_issue_pending ( dws - > rxchan ) ;
}
if ( txdesc ) {
set_bit ( TX_BUSY , & dws - > dma_chan_busy ) ;
dmaengine_submit ( txdesc ) ;
dma_async_issue_pending ( dws - > txchan ) ;
}
2014-10-02 16:31:09 +03:00
2010-12-24 13:59:11 +08:00
return 0 ;
}
static struct dw_spi_dma_ops mid_dma_ops = {
. dma_init = mid_spi_dma_init ,
. dma_exit = mid_spi_dma_exit ,
. dma_transfer = mid_spi_dma_transfer ,
} ;
# endif
2014-09-12 15:11:59 +03:00
/* Some specific info for SPI0 controller on Intel MID */
2010-12-24 13:59:11 +08:00
2015-01-22 17:59:34 +02:00
/* HW info for MRST Clk Control Unit, 32b reg per controller */
2010-12-24 13:59:11 +08:00
# define MRST_SPI_CLK_BASE 100000000 /* 100m */
2015-01-22 17:59:34 +02:00
# define MRST_CLK_SPI_REG 0xff11d86c
2010-12-24 13:59:11 +08:00
# define CLK_SPI_BDIV_OFFSET 0
# define CLK_SPI_BDIV_MASK 0x00000007
# define CLK_SPI_CDIV_OFFSET 9
# define CLK_SPI_CDIV_MASK 0x00000e00
# define CLK_SPI_DISABLE_OFFSET 8
int dw_spi_mid_init ( struct dw_spi * dws )
{
2011-09-20 11:06:17 -07:00
void __iomem * clk_reg ;
u32 clk_cdiv ;
2010-12-24 13:59:11 +08:00
2015-01-22 17:59:34 +02:00
clk_reg = ioremap_nocache ( MRST_CLK_SPI_REG , 16 ) ;
2010-12-24 13:59:11 +08:00
if ( ! clk_reg )
return - ENOMEM ;
2015-01-22 17:59:34 +02:00
/* Get SPI controller operating freq info */
clk_cdiv = readl ( clk_reg + dws - > bus_num * sizeof ( u32 ) ) ;
clk_cdiv & = CLK_SPI_CDIV_MASK ;
clk_cdiv > > = CLK_SPI_CDIV_OFFSET ;
2010-12-24 13:59:11 +08:00
dws - > max_freq = MRST_SPI_CLK_BASE / ( clk_cdiv + 1 ) ;
2015-01-22 17:59:34 +02:00
2010-12-24 13:59:11 +08:00
iounmap ( clk_reg ) ;
# ifdef CONFIG_SPI_DW_MID_DMA
dws - > dma_priv = kzalloc ( sizeof ( struct mid_dma ) , GFP_KERNEL ) ;
if ( ! dws - > dma_priv )
return - ENOMEM ;
dws - > dma_ops = & mid_dma_ops ;
# endif
return 0 ;
}