2019-05-29 07:18:02 -07:00
// SPDX-License-Identifier: GPL-2.0-only
2010-12-24 13:59:11 +08:00
/*
2011-06-06 01:16:30 -06:00
* Special handling for DW core on Intel MID platform
2010-12-24 13:59:11 +08:00
*
2014-09-12 15:12:01 +03:00
* Copyright ( c ) 2009 , 2014 Intel Corporation .
2010-12-24 13:59:11 +08:00
*/
# include <linux/spi/spi.h>
2012-02-01 16:12:19 +05:30
# include <linux/types.h>
2011-02-28 12:47:12 -07:00
2011-06-06 01:16:30 -06:00
# include "spi-dw.h"
2010-12-24 13:59:11 +08:00
# ifdef CONFIG_SPI_DW_MID_DMA
2020-05-29 16:11:52 +03:00
# include <linux/completion.h>
2020-05-06 18:30:22 +03:00
# include <linux/dma-mapping.h>
# include <linux/dmaengine.h>
2020-05-06 18:30:21 +03:00
# include <linux/irqreturn.h>
2020-05-29 16:11:52 +03:00
# include <linux/jiffies.h>
2010-12-24 13:59:11 +08:00
# include <linux/pci.h>
2015-03-09 16:48:50 +02:00
# include <linux/platform_data/dma-dw.h>
2010-12-24 13:59:11 +08:00
2020-05-29 16:11:53 +03:00
# define WAIT_RETRIES 5
2014-10-28 18:25:02 +02:00
# define RX_BUSY 0
# define TX_BUSY 1
2010-12-24 13:59:11 +08:00
static bool mid_spi_dma_chan_filter ( struct dma_chan * chan , void * param )
{
2015-03-09 16:48:50 +02:00
struct dw_dma_slave * s = param ;
if ( s - > dma_dev ! = chan - > device - > dev )
return false ;
2010-12-24 13:59:11 +08:00
2015-03-09 16:48:50 +02:00
chan - > private = s ;
return true ;
2010-12-24 13:59:11 +08:00
}
2020-05-06 18:30:24 +03:00
static int mid_spi_dma_init_mfld ( struct device * dev , struct dw_spi * dws )
2010-12-24 13:59:11 +08:00
{
2020-05-22 03:07:52 +03:00
struct dw_dma_slave slave = {
. src_id = 0 ,
. dst_id = 0
} ;
2014-09-12 15:12:00 +03:00
struct pci_dev * dma_dev ;
2010-12-24 13:59:11 +08:00
dma_cap_mask_t mask ;
/*
* Get pci device for DMA controller , currently it could only
2014-09-12 15:11:59 +03:00
* be the DMA controller of Medfield
2010-12-24 13:59:11 +08:00
*/
2014-09-12 15:12:00 +03:00
dma_dev = pci_get_device ( PCI_VENDOR_ID_INTEL , 0x0827 , NULL ) ;
if ( ! dma_dev )
return - ENODEV ;
2010-12-24 13:59:11 +08:00
dma_cap_zero ( mask ) ;
dma_cap_set ( DMA_SLAVE , mask ) ;
/* 1. Init rx channel */
2020-05-22 03:07:52 +03:00
slave . dma_dev = & dma_dev - > dev ;
dws - > rxchan = dma_request_channel ( mask , mid_spi_dma_chan_filter , & slave ) ;
2010-12-24 13:59:11 +08:00
if ( ! dws - > rxchan )
goto err_exit ;
/* 2. Init tx channel */
2020-05-22 03:07:52 +03:00
slave . dst_id = 1 ;
dws - > txchan = dma_request_channel ( mask , mid_spi_dma_chan_filter , & slave ) ;
2010-12-24 13:59:11 +08:00
if ( ! dws - > txchan )
goto free_rxchan ;
2020-05-07 14:54:49 +03:00
dws - > master - > dma_rx = dws - > rxchan ;
2015-03-09 16:48:49 +02:00
dws - > master - > dma_tx = dws - > txchan ;
2010-12-24 13:59:11 +08:00
2020-05-29 16:11:52 +03:00
init_completion ( & dws - > dma_completion ) ;
2010-12-24 13:59:11 +08:00
return 0 ;
free_rxchan :
dma_release_channel ( dws - > rxchan ) ;
2020-05-07 14:54:49 +03:00
dws - > rxchan = NULL ;
2010-12-24 13:59:11 +08:00
err_exit :
2014-09-12 15:12:00 +03:00
return - EBUSY ;
2010-12-24 13:59:11 +08:00
}
2020-05-06 18:30:25 +03:00
static int mid_spi_dma_init_generic ( struct device * dev , struct dw_spi * dws )
{
dws - > rxchan = dma_request_slave_channel ( dev , " rx " ) ;
if ( ! dws - > rxchan )
return - ENODEV ;
dws - > txchan = dma_request_slave_channel ( dev , " tx " ) ;
if ( ! dws - > txchan ) {
dma_release_channel ( dws - > rxchan ) ;
2020-05-07 14:54:49 +03:00
dws - > rxchan = NULL ;
2020-05-06 18:30:25 +03:00
return - ENODEV ;
}
2020-05-07 14:54:49 +03:00
dws - > master - > dma_rx = dws - > rxchan ;
2020-05-06 18:30:25 +03:00
dws - > master - > dma_tx = dws - > txchan ;
2020-05-29 16:11:52 +03:00
init_completion ( & dws - > dma_completion ) ;
2020-05-06 18:30:25 +03:00
return 0 ;
}
2010-12-24 13:59:11 +08:00
static void mid_spi_dma_exit ( struct dw_spi * dws )
{
2020-05-07 14:54:49 +03:00
if ( dws - > txchan ) {
dmaengine_terminate_sync ( dws - > txchan ) ;
dma_release_channel ( dws - > txchan ) ;
}
2014-09-18 20:08:53 +03:00
2020-05-07 14:54:49 +03:00
if ( dws - > rxchan ) {
dmaengine_terminate_sync ( dws - > rxchan ) ;
dma_release_channel ( dws - > rxchan ) ;
}
2020-05-15 13:47:42 +03:00
dw_writel ( dws , DW_SPI_DMACR , 0 ) ;
2010-12-24 13:59:11 +08:00
}
2015-03-09 16:48:47 +02:00
static irqreturn_t dma_transfer ( struct dw_spi * dws )
{
2015-03-12 14:19:31 -05:00
u16 irq_status = dw_readl ( dws , DW_SPI_ISR ) ;
2015-03-09 16:48:47 +02:00
if ( ! irq_status )
return IRQ_NONE ;
2015-03-12 14:19:31 -05:00
dw_readl ( dws , DW_SPI_ICR ) ;
2015-03-09 16:48:47 +02:00
spi_reset_chip ( dws ) ;
dev_err ( & dws - > master - > dev , " %s: FIFO overrun/underrun \n " , __func__ ) ;
dws - > master - > cur_msg - > status = - EIO ;
2020-05-29 16:11:52 +03:00
complete ( & dws - > dma_completion ) ;
2015-03-09 16:48:47 +02:00
return IRQ_HANDLED ;
}
2018-02-01 17:17:29 +02:00
static bool mid_spi_can_dma ( struct spi_controller * master ,
struct spi_device * spi , struct spi_transfer * xfer )
2015-03-09 16:48:49 +02:00
{
2018-02-01 17:17:29 +02:00
struct dw_spi * dws = spi_controller_get_devdata ( master ) ;
2015-03-09 16:48:49 +02:00
return xfer - > len > dws - > fifo_len ;
}
2020-05-22 03:07:54 +03:00
static enum dma_slave_buswidth convert_dma_width ( u8 n_bytes ) {
if ( n_bytes = = 1 )
2015-03-09 16:48:45 +02:00
return DMA_SLAVE_BUSWIDTH_1_BYTE ;
2020-05-22 03:07:54 +03:00
else if ( n_bytes = = 2 )
2015-03-09 16:48:45 +02:00
return DMA_SLAVE_BUSWIDTH_2_BYTES ;
return DMA_SLAVE_BUSWIDTH_UNDEFINED ;
}
2020-05-29 16:11:52 +03:00
static int dw_spi_dma_wait ( struct dw_spi * dws , struct spi_transfer * xfer )
{
unsigned long long ms ;
ms = xfer - > len * MSEC_PER_SEC * BITS_PER_BYTE ;
do_div ( ms , xfer - > effective_speed_hz ) ;
ms + = ms + 200 ;
if ( ms > UINT_MAX )
ms = UINT_MAX ;
ms = wait_for_completion_timeout ( & dws - > dma_completion ,
msecs_to_jiffies ( ms ) ) ;
if ( ms = = 0 ) {
dev_err ( & dws - > master - > cur_msg - > spi - > dev ,
" DMA transaction timed out \n " ) ;
return - ETIMEDOUT ;
}
return 0 ;
}
2020-05-29 16:11:53 +03:00
static inline bool dw_spi_dma_tx_busy ( struct dw_spi * dws )
{
return ! ( dw_readl ( dws , DW_SPI_SR ) & SR_TF_EMPT ) ;
}
static int dw_spi_dma_wait_tx_done ( struct dw_spi * dws ,
struct spi_transfer * xfer )
{
int retry = WAIT_RETRIES ;
struct spi_delay delay ;
u32 nents ;
nents = dw_readl ( dws , DW_SPI_TXFLR ) ;
delay . unit = SPI_DELAY_UNIT_SCK ;
delay . value = nents * dws - > n_bytes * BITS_PER_BYTE ;
while ( dw_spi_dma_tx_busy ( dws ) & & retry - - )
spi_delay_exec ( & delay , xfer ) ;
if ( retry < 0 ) {
dev_err ( & dws - > master - > dev , " Tx hanged up \n " ) ;
return - EIO ;
}
return 0 ;
}
2010-12-24 13:59:11 +08:00
/*
2014-10-28 18:25:02 +02:00
* dws - > dma_chan_busy is set before the dma transfer starts , callback for tx
* channel will clear a corresponding bit .
2010-12-24 13:59:11 +08:00
*/
2014-10-28 18:25:02 +02:00
static void dw_spi_dma_tx_done ( void * arg )
2010-12-24 13:59:11 +08:00
{
struct dw_spi * dws = arg ;
2015-03-06 14:42:01 +02:00
clear_bit ( TX_BUSY , & dws - > dma_chan_busy ) ;
if ( test_bit ( RX_BUSY , & dws - > dma_chan_busy ) )
2010-12-24 13:59:11 +08:00
return ;
2020-05-15 13:47:42 +03:00
dw_writel ( dws , DW_SPI_DMACR , 0 ) ;
2020-05-29 16:11:52 +03:00
complete ( & dws - > dma_completion ) ;
2010-12-24 13:59:11 +08:00
}
2015-03-09 16:48:49 +02:00
static struct dma_async_tx_descriptor * dw_spi_dma_prepare_tx ( struct dw_spi * dws ,
struct spi_transfer * xfer )
2010-12-24 13:59:11 +08:00
{
2014-10-28 18:25:01 +02:00
struct dma_slave_config txconf ;
struct dma_async_tx_descriptor * txdesc ;
2010-12-24 13:59:11 +08:00
2015-03-09 16:48:49 +02:00
if ( ! xfer - > tx_buf )
2014-10-28 18:25:02 +02:00
return NULL ;
2020-05-06 18:30:18 +03:00
memset ( & txconf , 0 , sizeof ( txconf ) ) ;
2011-10-14 10:47:38 +05:30
txconf . direction = DMA_MEM_TO_DEV ;
2010-12-24 13:59:11 +08:00
txconf . dst_addr = dws - > dma_addr ;
2015-03-09 16:48:50 +02:00
txconf . dst_maxburst = 16 ;
2010-12-24 13:59:11 +08:00
txconf . src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
2020-05-22 03:07:54 +03:00
txconf . dst_addr_width = convert_dma_width ( dws - > n_bytes ) ;
2012-02-01 16:12:19 +05:30
txconf . device_fc = false ;
2010-12-24 13:59:11 +08:00
2014-10-02 16:31:08 +03:00
dmaengine_slave_config ( dws - > txchan , & txconf ) ;
2010-12-24 13:59:11 +08:00
2014-10-02 16:31:08 +03:00
txdesc = dmaengine_prep_slave_sg ( dws - > txchan ,
2015-03-09 16:48:49 +02:00
xfer - > tx_sg . sgl ,
xfer - > tx_sg . nents ,
2011-10-14 10:47:38 +05:30
DMA_MEM_TO_DEV ,
2014-10-02 16:31:09 +03:00
DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
2015-03-02 20:15:58 +02:00
if ( ! txdesc )
return NULL ;
2014-10-28 18:25:02 +02:00
txdesc - > callback = dw_spi_dma_tx_done ;
2010-12-24 13:59:11 +08:00
txdesc - > callback_param = dws ;
2014-10-28 18:25:01 +02:00
return txdesc ;
}
2020-05-29 16:11:54 +03:00
static inline bool dw_spi_dma_rx_busy ( struct dw_spi * dws )
{
return ! ! ( dw_readl ( dws , DW_SPI_SR ) & SR_RF_NOT_EMPT ) ;
}
static int dw_spi_dma_wait_rx_done ( struct dw_spi * dws )
{
int retry = WAIT_RETRIES ;
struct spi_delay delay ;
unsigned long ns , us ;
u32 nents ;
/*
* It ' s unlikely that DMA engine is still doing the data fetching , but
* if it ' s let ' s give it some reasonable time . The timeout calculation
* is based on the synchronous APB / SSI reference clock rate , on a
* number of data entries left in the Rx FIFO , times a number of clock
* periods normally needed for a single APB read / write transaction
* without PREADY signal utilized ( which is true for the DW APB SSI
* controller ) .
*/
nents = dw_readl ( dws , DW_SPI_RXFLR ) ;
ns = 4U * NSEC_PER_SEC / dws - > max_freq * nents ;
if ( ns < = NSEC_PER_USEC ) {
delay . unit = SPI_DELAY_UNIT_NSECS ;
delay . value = ns ;
} else {
us = DIV_ROUND_UP ( ns , NSEC_PER_USEC ) ;
delay . unit = SPI_DELAY_UNIT_USECS ;
delay . value = clamp_val ( us , 0 , USHRT_MAX ) ;
}
while ( dw_spi_dma_rx_busy ( dws ) & & retry - - )
spi_delay_exec ( & delay , NULL ) ;
if ( retry < 0 ) {
dev_err ( & dws - > master - > dev , " Rx hanged up \n " ) ;
return - EIO ;
}
return 0 ;
}
2014-10-28 18:25:02 +02:00
/*
* dws - > dma_chan_busy is set before the dma transfer starts , callback for rx
* channel will clear a corresponding bit .
*/
static void dw_spi_dma_rx_done ( void * arg )
{
struct dw_spi * dws = arg ;
2015-03-06 14:42:01 +02:00
clear_bit ( RX_BUSY , & dws - > dma_chan_busy ) ;
if ( test_bit ( TX_BUSY , & dws - > dma_chan_busy ) )
2014-10-28 18:25:02 +02:00
return ;
2020-05-15 13:47:42 +03:00
dw_writel ( dws , DW_SPI_DMACR , 0 ) ;
2020-05-29 16:11:52 +03:00
complete ( & dws - > dma_completion ) ;
2014-10-28 18:25:02 +02:00
}
2015-03-09 16:48:49 +02:00
static struct dma_async_tx_descriptor * dw_spi_dma_prepare_rx ( struct dw_spi * dws ,
struct spi_transfer * xfer )
2014-10-28 18:25:01 +02:00
{
struct dma_slave_config rxconf ;
struct dma_async_tx_descriptor * rxdesc ;
2015-03-09 16:48:49 +02:00
if ( ! xfer - > rx_buf )
2014-10-28 18:25:02 +02:00
return NULL ;
2020-05-06 18:30:18 +03:00
memset ( & rxconf , 0 , sizeof ( rxconf ) ) ;
2011-10-14 10:47:38 +05:30
rxconf . direction = DMA_DEV_TO_MEM ;
2010-12-24 13:59:11 +08:00
rxconf . src_addr = dws - > dma_addr ;
2015-03-09 16:48:50 +02:00
rxconf . src_maxburst = 16 ;
2010-12-24 13:59:11 +08:00
rxconf . dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
2020-05-22 03:07:54 +03:00
rxconf . src_addr_width = convert_dma_width ( dws - > n_bytes ) ;
2012-02-01 16:12:19 +05:30
rxconf . device_fc = false ;
2010-12-24 13:59:11 +08:00
2014-10-02 16:31:08 +03:00
dmaengine_slave_config ( dws - > rxchan , & rxconf ) ;
2010-12-24 13:59:11 +08:00
2014-10-02 16:31:08 +03:00
rxdesc = dmaengine_prep_slave_sg ( dws - > rxchan ,
2015-03-09 16:48:49 +02:00
xfer - > rx_sg . sgl ,
xfer - > rx_sg . nents ,
2011-10-14 10:47:38 +05:30
DMA_DEV_TO_MEM ,
2014-10-02 16:31:09 +03:00
DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
2015-03-02 20:15:58 +02:00
if ( ! rxdesc )
return NULL ;
2014-10-28 18:25:02 +02:00
rxdesc - > callback = dw_spi_dma_rx_done ;
2010-12-24 13:59:11 +08:00
rxdesc - > callback_param = dws ;
2014-10-28 18:25:01 +02:00
return rxdesc ;
}
2015-03-09 16:48:49 +02:00
static int mid_spi_dma_setup ( struct dw_spi * dws , struct spi_transfer * xfer )
2014-10-28 18:25:01 +02:00
{
2020-05-22 03:07:51 +03:00
u16 imr = 0 , dma_ctrl = 0 ;
2014-10-28 18:25:01 +02:00
2015-03-12 14:19:31 -05:00
dw_writel ( dws , DW_SPI_DMARDLR , 0xf ) ;
dw_writel ( dws , DW_SPI_DMATDLR , 0x10 ) ;
2014-10-28 18:25:01 +02:00
2020-05-22 03:07:51 +03:00
if ( xfer - > tx_buf ) {
2014-10-28 18:25:01 +02:00
dma_ctrl | = SPI_DMA_TDMAE ;
2020-05-22 03:07:51 +03:00
imr | = SPI_INT_TXOI ;
}
if ( xfer - > rx_buf ) {
2014-10-28 18:25:01 +02:00
dma_ctrl | = SPI_DMA_RDMAE ;
2020-05-22 03:07:51 +03:00
imr | = SPI_INT_RXUI | SPI_INT_RXOI ;
}
2015-03-12 14:19:31 -05:00
dw_writel ( dws , DW_SPI_DMACR , dma_ctrl ) ;
2014-10-28 18:25:01 +02:00
2015-03-09 16:48:47 +02:00
/* Set the interrupt mask */
2020-05-22 03:07:51 +03:00
spi_umask_intr ( dws , imr ) ;
2015-03-09 16:48:47 +02:00
2020-05-29 16:11:52 +03:00
reinit_completion ( & dws - > dma_completion ) ;
2015-03-09 16:48:47 +02:00
dws - > transfer_handler = dma_transfer ;
2015-03-09 16:48:46 +02:00
return 0 ;
2014-10-28 18:25:01 +02:00
}
2015-03-09 16:48:49 +02:00
static int mid_spi_dma_transfer ( struct dw_spi * dws , struct spi_transfer * xfer )
2014-10-28 18:25:01 +02:00
{
struct dma_async_tx_descriptor * txdesc , * rxdesc ;
2020-05-29 16:11:52 +03:00
int ret ;
2014-10-28 18:25:01 +02:00
2015-03-09 16:48:46 +02:00
/* Prepare the TX dma transfer */
2015-03-09 16:48:49 +02:00
txdesc = dw_spi_dma_prepare_tx ( dws , xfer ) ;
2014-10-28 18:25:01 +02:00
2015-03-09 16:48:46 +02:00
/* Prepare the RX dma transfer */
2015-03-09 16:48:49 +02:00
rxdesc = dw_spi_dma_prepare_rx ( dws , xfer ) ;
2014-10-28 18:25:01 +02:00
2010-12-24 13:59:11 +08:00
/* rx must be started before tx due to spi instinct */
2014-10-28 18:25:02 +02:00
if ( rxdesc ) {
set_bit ( RX_BUSY , & dws - > dma_chan_busy ) ;
dmaengine_submit ( rxdesc ) ;
dma_async_issue_pending ( dws - > rxchan ) ;
}
if ( txdesc ) {
set_bit ( TX_BUSY , & dws - > dma_chan_busy ) ;
dmaengine_submit ( txdesc ) ;
dma_async_issue_pending ( dws - > txchan ) ;
}
2014-10-02 16:31:09 +03:00
2020-05-29 16:11:52 +03:00
ret = dw_spi_dma_wait ( dws , xfer ) ;
if ( ret )
return ret ;
2020-05-29 16:11:53 +03:00
if ( txdesc & & dws - > master - > cur_msg - > status = = - EINPROGRESS ) {
ret = dw_spi_dma_wait_tx_done ( dws , xfer ) ;
if ( ret )
return ret ;
}
2020-05-29 16:11:54 +03:00
if ( rxdesc & & dws - > master - > cur_msg - > status = = - EINPROGRESS )
ret = dw_spi_dma_wait_rx_done ( dws ) ;
return ret ;
2010-12-24 13:59:11 +08:00
}
2015-03-09 16:48:48 +02:00
static void mid_spi_dma_stop ( struct dw_spi * dws )
{
if ( test_bit ( TX_BUSY , & dws - > dma_chan_busy ) ) {
2017-01-03 15:48:20 +02:00
dmaengine_terminate_sync ( dws - > txchan ) ;
2015-03-09 16:48:48 +02:00
clear_bit ( TX_BUSY , & dws - > dma_chan_busy ) ;
}
if ( test_bit ( RX_BUSY , & dws - > dma_chan_busy ) ) {
2017-01-03 15:48:20 +02:00
dmaengine_terminate_sync ( dws - > rxchan ) ;
2015-03-09 16:48:48 +02:00
clear_bit ( RX_BUSY , & dws - > dma_chan_busy ) ;
}
2020-05-15 13:47:42 +03:00
dw_writel ( dws , DW_SPI_DMACR , 0 ) ;
2015-03-09 16:48:48 +02:00
}
2020-05-06 18:30:23 +03:00
static const struct dw_spi_dma_ops mfld_dma_ops = {
. dma_init = mid_spi_dma_init_mfld ,
2010-12-24 13:59:11 +08:00
. dma_exit = mid_spi_dma_exit ,
2015-03-09 16:48:46 +02:00
. dma_setup = mid_spi_dma_setup ,
2015-03-09 16:48:49 +02:00
. can_dma = mid_spi_can_dma ,
2010-12-24 13:59:11 +08:00
. dma_transfer = mid_spi_dma_transfer ,
2015-03-09 16:48:48 +02:00
. dma_stop = mid_spi_dma_stop ,
2010-12-24 13:59:11 +08:00
} ;
2020-05-06 18:30:23 +03:00
static void dw_spi_mid_setup_dma_mfld ( struct dw_spi * dws )
{
dws - > dma_ops = & mfld_dma_ops ;
}
2020-05-06 18:30:25 +03:00
static const struct dw_spi_dma_ops generic_dma_ops = {
. dma_init = mid_spi_dma_init_generic ,
. dma_exit = mid_spi_dma_exit ,
. dma_setup = mid_spi_dma_setup ,
. can_dma = mid_spi_can_dma ,
. dma_transfer = mid_spi_dma_transfer ,
. dma_stop = mid_spi_dma_stop ,
} ;
static void dw_spi_mid_setup_dma_generic ( struct dw_spi * dws )
{
dws - > dma_ops = & generic_dma_ops ;
}
2020-05-06 18:30:23 +03:00
# else /* CONFIG_SPI_DW_MID_DMA */
static inline void dw_spi_mid_setup_dma_mfld ( struct dw_spi * dws ) { }
2020-05-06 18:30:25 +03:00
static inline void dw_spi_mid_setup_dma_generic ( struct dw_spi * dws ) { }
2010-12-24 13:59:11 +08:00
# endif
2014-09-12 15:11:59 +03:00
/* Some specific info for SPI0 controller on Intel MID */
2010-12-24 13:59:11 +08:00
2015-01-22 17:59:34 +02:00
/* HW info for MRST Clk Control Unit, 32b reg per controller */
2010-12-24 13:59:11 +08:00
# define MRST_SPI_CLK_BASE 100000000 /* 100m */
2015-01-22 17:59:34 +02:00
# define MRST_CLK_SPI_REG 0xff11d86c
2010-12-24 13:59:11 +08:00
# define CLK_SPI_BDIV_OFFSET 0
# define CLK_SPI_BDIV_MASK 0x00000007
# define CLK_SPI_CDIV_OFFSET 9
# define CLK_SPI_CDIV_MASK 0x00000e00
# define CLK_SPI_DISABLE_OFFSET 8
2020-05-06 18:30:23 +03:00
int dw_spi_mid_init_mfld ( struct dw_spi * dws )
2010-12-24 13:59:11 +08:00
{
2011-09-20 11:06:17 -07:00
void __iomem * clk_reg ;
u32 clk_cdiv ;
2010-12-24 13:59:11 +08:00
2020-01-06 09:43:50 +01:00
clk_reg = ioremap ( MRST_CLK_SPI_REG , 16 ) ;
2010-12-24 13:59:11 +08:00
if ( ! clk_reg )
return - ENOMEM ;
2015-01-22 17:59:34 +02:00
/* Get SPI controller operating freq info */
clk_cdiv = readl ( clk_reg + dws - > bus_num * sizeof ( u32 ) ) ;
clk_cdiv & = CLK_SPI_CDIV_MASK ;
clk_cdiv > > = CLK_SPI_CDIV_OFFSET ;
2010-12-24 13:59:11 +08:00
dws - > max_freq = MRST_SPI_CLK_BASE / ( clk_cdiv + 1 ) ;
2015-01-22 17:59:34 +02:00
2010-12-24 13:59:11 +08:00
iounmap ( clk_reg ) ;
2020-05-05 21:06:13 +08:00
/* Register hook to configure CTRLR0 */
dws - > update_cr0 = dw_spi_update_cr0 ;
2020-05-06 18:30:23 +03:00
dw_spi_mid_setup_dma_mfld ( dws ) ;
2010-12-24 13:59:11 +08:00
return 0 ;
}
2020-05-06 18:30:25 +03:00
int dw_spi_mid_init_generic ( struct dw_spi * dws )
{
/* Register hook to configure CTRLR0 */
dws - > update_cr0 = dw_spi_update_cr0 ;
dw_spi_mid_setup_dma_generic ( dws ) ;
return 0 ;
}