2016-06-04 03:39:34 +03:00
/*
* Driver for Cadence QSPI Controller
*
* Copyright Altera Corporation ( C ) 2012 - 2014. All rights reserved .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License along with
* this program . If not , see < http : //www.gnu.org/licenses/>.
*/
# include <linux/clk.h>
# include <linux/completion.h>
# include <linux/delay.h>
2018-04-10 11:19:10 +03:00
# include <linux/dma-mapping.h>
# include <linux/dmaengine.h>
2016-06-04 03:39:34 +03:00
# include <linux/err.h>
# include <linux/errno.h>
# include <linux/interrupt.h>
# include <linux/io.h>
# include <linux/jiffies.h>
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/mtd/mtd.h>
# include <linux/mtd/partitions.h>
# include <linux/mtd/spi-nor.h>
# include <linux/of_device.h>
# include <linux/of.h>
# include <linux/platform_device.h>
2017-10-03 08:19:25 +03:00
# include <linux/pm_runtime.h>
2016-06-04 03:39:34 +03:00
# include <linux/sched.h>
# include <linux/spi/spi.h>
# include <linux/timer.h>
# define CQSPI_NAME "cadence-qspi"
# define CQSPI_MAX_CHIPSELECT 16
2017-10-03 08:19:21 +03:00
/* Quirks */
# define CQSPI_NEEDS_WR_DELAY BIT(0)
2016-06-04 03:39:34 +03:00
struct cqspi_st ;
struct cqspi_flash_pdata {
struct spi_nor nor ;
struct cqspi_st * cqspi ;
u32 clk_rate ;
u32 read_delay ;
u32 tshsl_ns ;
u32 tsd2d_ns ;
u32 tchsh_ns ;
u32 tslch_ns ;
u8 inst_width ;
u8 addr_width ;
u8 data_width ;
u8 cs ;
bool registered ;
2017-12-29 12:11:03 +03:00
bool use_direct_mode ;
2016-06-04 03:39:34 +03:00
} ;
struct cqspi_st {
struct platform_device * pdev ;
struct clk * clk ;
unsigned int sclk ;
void __iomem * iobase ;
void __iomem * ahb_base ;
2017-12-29 12:11:03 +03:00
resource_size_t ahb_size ;
2016-06-04 03:39:34 +03:00
struct completion transfer_complete ;
struct mutex bus_mutex ;
2018-04-10 11:19:10 +03:00
struct dma_chan * rx_chan ;
struct completion rx_dma_complete ;
dma_addr_t mmap_phys_base ;
2016-06-04 03:39:34 +03:00
int current_cs ;
int current_page_size ;
int current_erase_size ;
int current_addr_width ;
unsigned long master_ref_clk_hz ;
bool is_decoded_cs ;
u32 fifo_depth ;
u32 fifo_width ;
2017-10-03 08:19:23 +03:00
bool rclk_en ;
2016-06-04 03:39:34 +03:00
u32 trigger_address ;
2017-10-03 08:19:21 +03:00
u32 wr_delay ;
2016-06-04 03:39:34 +03:00
struct cqspi_flash_pdata f_pdata [ CQSPI_MAX_CHIPSELECT ] ;
} ;
/* Operation timeout value */
# define CQSPI_TIMEOUT_MS 500
# define CQSPI_READ_TIMEOUT_MS 10
/* Instruction type */
# define CQSPI_INST_TYPE_SINGLE 0
# define CQSPI_INST_TYPE_DUAL 1
# define CQSPI_INST_TYPE_QUAD 2
# define CQSPI_DUMMY_CLKS_PER_BYTE 8
# define CQSPI_DUMMY_BYTES_MAX 4
# define CQSPI_DUMMY_CLKS_MAX 31
# define CQSPI_STIG_DATA_LEN_MAX 8
/* Register map */
# define CQSPI_REG_CONFIG 0x00
# define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0)
2017-12-29 12:11:03 +03:00
# define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL BIT(7)
2016-06-04 03:39:34 +03:00
# define CQSPI_REG_CONFIG_DECODE_MASK BIT(9)
# define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
# define CQSPI_REG_CONFIG_DMA_MASK BIT(15)
# define CQSPI_REG_CONFIG_BAUD_LSB 19
# define CQSPI_REG_CONFIG_IDLE_LSB 31
# define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
# define CQSPI_REG_CONFIG_BAUD_MASK 0xF
# define CQSPI_REG_RD_INSTR 0x04
# define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
# define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8
# define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12
# define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16
# define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20
# define CQSPI_REG_RD_INSTR_DUMMY_LSB 24
# define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3
# define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3
# define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3
# define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F
# define CQSPI_REG_WR_INSTR 0x08
# define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
# define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12
# define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16
# define CQSPI_REG_DELAY 0x0C
# define CQSPI_REG_DELAY_TSLCH_LSB 0
# define CQSPI_REG_DELAY_TCHSH_LSB 8
# define CQSPI_REG_DELAY_TSD2D_LSB 16
# define CQSPI_REG_DELAY_TSHSL_LSB 24
# define CQSPI_REG_DELAY_TSLCH_MASK 0xFF
# define CQSPI_REG_DELAY_TCHSH_MASK 0xFF
# define CQSPI_REG_DELAY_TSD2D_MASK 0xFF
# define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
# define CQSPI_REG_READCAPTURE 0x10
# define CQSPI_REG_READCAPTURE_BYPASS_LSB 0
# define CQSPI_REG_READCAPTURE_DELAY_LSB 1
# define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF
# define CQSPI_REG_SIZE 0x14
# define CQSPI_REG_SIZE_ADDRESS_LSB 0
# define CQSPI_REG_SIZE_PAGE_LSB 4
# define CQSPI_REG_SIZE_BLOCK_LSB 16
# define CQSPI_REG_SIZE_ADDRESS_MASK 0xF
# define CQSPI_REG_SIZE_PAGE_MASK 0xFFF
# define CQSPI_REG_SIZE_BLOCK_MASK 0x3F
# define CQSPI_REG_SRAMPARTITION 0x18
# define CQSPI_REG_INDIRECTTRIGGER 0x1C
# define CQSPI_REG_DMA 0x20
# define CQSPI_REG_DMA_SINGLE_LSB 0
# define CQSPI_REG_DMA_BURST_LSB 8
# define CQSPI_REG_DMA_SINGLE_MASK 0xFF
# define CQSPI_REG_DMA_BURST_MASK 0xFF
# define CQSPI_REG_REMAP 0x24
# define CQSPI_REG_MODE_BIT 0x28
# define CQSPI_REG_SDRAMLEVEL 0x2C
# define CQSPI_REG_SDRAMLEVEL_RD_LSB 0
# define CQSPI_REG_SDRAMLEVEL_WR_LSB 16
# define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
# define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
# define CQSPI_REG_IRQSTATUS 0x40
# define CQSPI_REG_IRQMASK 0x44
# define CQSPI_REG_INDIRECTRD 0x60
# define CQSPI_REG_INDIRECTRD_START_MASK BIT(0)
# define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1)
# define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5)
# define CQSPI_REG_INDIRECTRDWATERMARK 0x64
# define CQSPI_REG_INDIRECTRDSTARTADDR 0x68
# define CQSPI_REG_INDIRECTRDBYTES 0x6C
# define CQSPI_REG_CMDCTRL 0x90
# define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0)
# define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1)
# define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
# define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
# define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
# define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19
# define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20
# define CQSPI_REG_CMDCTRL_RD_EN_LSB 23
# define CQSPI_REG_CMDCTRL_OPCODE_LSB 24
# define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
# define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
# define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
# define CQSPI_REG_INDIRECTWR 0x70
# define CQSPI_REG_INDIRECTWR_START_MASK BIT(0)
# define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1)
# define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5)
# define CQSPI_REG_INDIRECTWRWATERMARK 0x74
# define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
# define CQSPI_REG_INDIRECTWRBYTES 0x7C
# define CQSPI_REG_CMDADDRESS 0x94
# define CQSPI_REG_CMDREADDATALOWER 0xA0
# define CQSPI_REG_CMDREADDATAUPPER 0xA4
# define CQSPI_REG_CMDWRITEDATALOWER 0xA8
# define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
/* Interrupt status bits */
# define CQSPI_REG_IRQ_MODE_ERR BIT(0)
# define CQSPI_REG_IRQ_UNDERFLOW BIT(1)
# define CQSPI_REG_IRQ_IND_COMP BIT(2)
# define CQSPI_REG_IRQ_IND_RD_REJECT BIT(3)
# define CQSPI_REG_IRQ_WR_PROTECTED_ERR BIT(4)
# define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR BIT(5)
# define CQSPI_REG_IRQ_WATERMARK BIT(6)
# define CQSPI_REG_IRQ_IND_SRAM_FULL BIT(12)
# define CQSPI_IRQ_MASK_RD (CQSPI_REG_IRQ_WATERMARK | \
CQSPI_REG_IRQ_IND_SRAM_FULL | \
CQSPI_REG_IRQ_IND_COMP )
# define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \
CQSPI_REG_IRQ_WATERMARK | \
CQSPI_REG_IRQ_UNDERFLOW )
# define CQSPI_IRQ_STATUS_MASK 0x1FFFF
static int cqspi_wait_for_bit ( void __iomem * reg , const u32 mask , bool clear )
{
unsigned long end = jiffies + msecs_to_jiffies ( CQSPI_TIMEOUT_MS ) ;
u32 val ;
while ( 1 ) {
val = readl ( reg ) ;
if ( clear )
val = ~ val ;
val & = mask ;
if ( val = = mask )
return 0 ;
if ( time_after ( jiffies , end ) )
return - ETIMEDOUT ;
}
}
static bool cqspi_is_idle ( struct cqspi_st * cqspi )
{
u32 reg = readl ( cqspi - > iobase + CQSPI_REG_CONFIG ) ;
return reg & ( 1 < < CQSPI_REG_CONFIG_IDLE_LSB ) ;
}
static u32 cqspi_get_rd_sram_level ( struct cqspi_st * cqspi )
{
u32 reg = readl ( cqspi - > iobase + CQSPI_REG_SDRAMLEVEL ) ;
reg > > = CQSPI_REG_SDRAMLEVEL_RD_LSB ;
return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK ;
}
static irqreturn_t cqspi_irq_handler ( int this_irq , void * dev )
{
struct cqspi_st * cqspi = dev ;
unsigned int irq_status ;
/* Read interrupt status */
irq_status = readl ( cqspi - > iobase + CQSPI_REG_IRQSTATUS ) ;
/* Clear interrupt */
writel ( irq_status , cqspi - > iobase + CQSPI_REG_IRQSTATUS ) ;
irq_status & = CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR ;
if ( irq_status )
complete ( & cqspi - > transfer_complete ) ;
return IRQ_HANDLED ;
}
static unsigned int cqspi_calc_rdreg ( struct spi_nor * nor , const u8 opcode )
{
struct cqspi_flash_pdata * f_pdata = nor - > priv ;
u32 rdreg = 0 ;
rdreg | = f_pdata - > inst_width < < CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB ;
rdreg | = f_pdata - > addr_width < < CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB ;
rdreg | = f_pdata - > data_width < < CQSPI_REG_RD_INSTR_TYPE_DATA_LSB ;
return rdreg ;
}
static int cqspi_wait_idle ( struct cqspi_st * cqspi )
{
const unsigned int poll_idle_retry = 3 ;
unsigned int count = 0 ;
unsigned long timeout ;
timeout = jiffies + msecs_to_jiffies ( CQSPI_TIMEOUT_MS ) ;
while ( 1 ) {
/*
* Read few times in succession to ensure the controller
* is indeed idle , that is , the bit does not transition
* low again .
*/
if ( cqspi_is_idle ( cqspi ) )
count + + ;
else
count = 0 ;
if ( count > = poll_idle_retry )
return 0 ;
if ( time_after ( jiffies , timeout ) ) {
/* Timeout, in busy mode. */
dev_err ( & cqspi - > pdev - > dev ,
" QSPI is still busy after %dms timeout. \n " ,
CQSPI_TIMEOUT_MS ) ;
return - ETIMEDOUT ;
}
cpu_relax ( ) ;
}
}
static int cqspi_exec_flash_cmd ( struct cqspi_st * cqspi , unsigned int reg )
{
void __iomem * reg_base = cqspi - > iobase ;
int ret ;
/* Write the CMDCTRL without start execution. */
writel ( reg , reg_base + CQSPI_REG_CMDCTRL ) ;
/* Start execute */
reg | = CQSPI_REG_CMDCTRL_EXECUTE_MASK ;
writel ( reg , reg_base + CQSPI_REG_CMDCTRL ) ;
/* Polling for completion. */
ret = cqspi_wait_for_bit ( reg_base + CQSPI_REG_CMDCTRL ,
CQSPI_REG_CMDCTRL_INPROGRESS_MASK , 1 ) ;
if ( ret ) {
dev_err ( & cqspi - > pdev - > dev ,
" Flash command execution timed out. \n " ) ;
return ret ;
}
/* Polling QSPI idle status. */
return cqspi_wait_idle ( cqspi ) ;
}
static int cqspi_command_read ( struct spi_nor * nor ,
const u8 * txbuf , const unsigned n_tx ,
u8 * rxbuf , const unsigned n_rx )
{
struct cqspi_flash_pdata * f_pdata = nor - > priv ;
struct cqspi_st * cqspi = f_pdata - > cqspi ;
void __iomem * reg_base = cqspi - > iobase ;
unsigned int rdreg ;
unsigned int reg ;
unsigned int read_len ;
int status ;
if ( ! n_rx | | n_rx > CQSPI_STIG_DATA_LEN_MAX | | ! rxbuf ) {
dev_err ( nor - > dev , " Invalid input argument, len %d rxbuf 0x%p \n " ,
n_rx , rxbuf ) ;
return - EINVAL ;
}
reg = txbuf [ 0 ] < < CQSPI_REG_CMDCTRL_OPCODE_LSB ;
rdreg = cqspi_calc_rdreg ( nor , txbuf [ 0 ] ) ;
writel ( rdreg , reg_base + CQSPI_REG_RD_INSTR ) ;
reg | = ( 0x1 < < CQSPI_REG_CMDCTRL_RD_EN_LSB ) ;
/* 0 means 1 byte. */
reg | = ( ( ( n_rx - 1 ) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK )
< < CQSPI_REG_CMDCTRL_RD_BYTES_LSB ) ;
status = cqspi_exec_flash_cmd ( cqspi , reg ) ;
if ( status )
return status ;
reg = readl ( reg_base + CQSPI_REG_CMDREADDATALOWER ) ;
/* Put the read value into rx_buf */
read_len = ( n_rx > 4 ) ? 4 : n_rx ;
memcpy ( rxbuf , & reg , read_len ) ;
rxbuf + = read_len ;
if ( n_rx > 4 ) {
reg = readl ( reg_base + CQSPI_REG_CMDREADDATAUPPER ) ;
read_len = n_rx - read_len ;
memcpy ( rxbuf , & reg , read_len ) ;
}
return 0 ;
}
static int cqspi_command_write ( struct spi_nor * nor , const u8 opcode ,
const u8 * txbuf , const unsigned n_tx )
{
struct cqspi_flash_pdata * f_pdata = nor - > priv ;
struct cqspi_st * cqspi = f_pdata - > cqspi ;
void __iomem * reg_base = cqspi - > iobase ;
unsigned int reg ;
unsigned int data ;
2019-01-28 08:02:29 +03:00
u32 write_len ;
2016-06-04 03:39:34 +03:00
int ret ;
2019-01-28 08:02:29 +03:00
if ( n_tx > CQSPI_STIG_DATA_LEN_MAX | | ( n_tx & & ! txbuf ) ) {
2016-06-04 03:39:34 +03:00
dev_err ( nor - > dev ,
" Invalid input argument, cmdlen %d txbuf 0x%p \n " ,
n_tx , txbuf ) ;
return - EINVAL ;
}
reg = opcode < < CQSPI_REG_CMDCTRL_OPCODE_LSB ;
if ( n_tx ) {
reg | = ( 0x1 < < CQSPI_REG_CMDCTRL_WR_EN_LSB ) ;
reg | = ( ( n_tx - 1 ) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK )
< < CQSPI_REG_CMDCTRL_WR_BYTES_LSB ;
data = 0 ;
2019-01-28 08:02:29 +03:00
write_len = ( n_tx > 4 ) ? 4 : n_tx ;
memcpy ( & data , txbuf , write_len ) ;
txbuf + = write_len ;
2016-06-04 03:39:34 +03:00
writel ( data , reg_base + CQSPI_REG_CMDWRITEDATALOWER ) ;
2019-01-28 08:02:29 +03:00
if ( n_tx > 4 ) {
data = 0 ;
write_len = n_tx - 4 ;
memcpy ( & data , txbuf , write_len ) ;
writel ( data , reg_base + CQSPI_REG_CMDWRITEDATAUPPER ) ;
}
}
2016-06-04 03:39:34 +03:00
ret = cqspi_exec_flash_cmd ( cqspi , reg ) ;
return ret ;
}
static int cqspi_command_write_addr ( struct spi_nor * nor ,
const u8 opcode , const unsigned int addr )
{
struct cqspi_flash_pdata * f_pdata = nor - > priv ;
struct cqspi_st * cqspi = f_pdata - > cqspi ;
void __iomem * reg_base = cqspi - > iobase ;
unsigned int reg ;
reg = opcode < < CQSPI_REG_CMDCTRL_OPCODE_LSB ;
reg | = ( 0x1 < < CQSPI_REG_CMDCTRL_ADDR_EN_LSB ) ;
reg | = ( ( nor - > addr_width - 1 ) & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK )
< < CQSPI_REG_CMDCTRL_ADD_BYTES_LSB ;
writel ( addr , reg_base + CQSPI_REG_CMDADDRESS ) ;
return cqspi_exec_flash_cmd ( cqspi , reg ) ;
}
2017-12-29 12:11:02 +03:00
static int cqspi_read_setup ( struct spi_nor * nor )
2016-06-04 03:39:34 +03:00
{
struct cqspi_flash_pdata * f_pdata = nor - > priv ;
struct cqspi_st * cqspi = f_pdata - > cqspi ;
void __iomem * reg_base = cqspi - > iobase ;
unsigned int dummy_clk = 0 ;
unsigned int reg ;
reg = nor - > read_opcode < < CQSPI_REG_RD_INSTR_OPCODE_LSB ;
reg | = cqspi_calc_rdreg ( nor , nor - > read_opcode ) ;
/* Setup dummy clock cycles */
dummy_clk = nor - > read_dummy ;
if ( dummy_clk > CQSPI_DUMMY_CLKS_MAX )
dummy_clk = CQSPI_DUMMY_CLKS_MAX ;
if ( dummy_clk / 8 ) {
reg | = ( 1 < < CQSPI_REG_RD_INSTR_MODE_EN_LSB ) ;
/* Set mode bits high to ensure chip doesn't enter XIP */
writel ( 0xFF , reg_base + CQSPI_REG_MODE_BIT ) ;
/* Need to subtract the mode byte (8 clocks). */
if ( f_pdata - > inst_width ! = CQSPI_INST_TYPE_QUAD )
dummy_clk - = 8 ;
if ( dummy_clk )
reg | = ( dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK )
< < CQSPI_REG_RD_INSTR_DUMMY_LSB ;
}
writel ( reg , reg_base + CQSPI_REG_RD_INSTR ) ;
/* Set address width */
reg = readl ( reg_base + CQSPI_REG_SIZE ) ;
reg & = ~ CQSPI_REG_SIZE_ADDRESS_MASK ;
reg | = ( nor - > addr_width - 1 ) ;
writel ( reg , reg_base + CQSPI_REG_SIZE ) ;
return 0 ;
}
2017-12-29 12:11:02 +03:00
static int cqspi_indirect_read_execute ( struct spi_nor * nor , u8 * rxbuf ,
loff_t from_addr , const size_t n_rx )
2016-06-04 03:39:34 +03:00
{
struct cqspi_flash_pdata * f_pdata = nor - > priv ;
struct cqspi_st * cqspi = f_pdata - > cqspi ;
void __iomem * reg_base = cqspi - > iobase ;
void __iomem * ahb_base = cqspi - > ahb_base ;
unsigned int remaining = n_rx ;
2018-04-23 20:45:11 +03:00
unsigned int mod_bytes = n_rx % 4 ;
2016-06-04 03:39:34 +03:00
unsigned int bytes_to_read = 0 ;
2018-04-23 20:45:11 +03:00
u8 * rxbuf_end = rxbuf + n_rx ;
2016-06-04 03:39:34 +03:00
int ret = 0 ;
2017-12-29 12:11:02 +03:00
writel ( from_addr , reg_base + CQSPI_REG_INDIRECTRDSTARTADDR ) ;
2016-06-04 03:39:34 +03:00
writel ( remaining , reg_base + CQSPI_REG_INDIRECTRDBYTES ) ;
/* Clear all interrupts. */
writel ( CQSPI_IRQ_STATUS_MASK , reg_base + CQSPI_REG_IRQSTATUS ) ;
writel ( CQSPI_IRQ_MASK_RD , reg_base + CQSPI_REG_IRQMASK ) ;
reinit_completion ( & cqspi - > transfer_complete ) ;
writel ( CQSPI_REG_INDIRECTRD_START_MASK ,
reg_base + CQSPI_REG_INDIRECTRD ) ;
while ( remaining > 0 ) {
2018-07-21 17:21:51 +03:00
if ( ! wait_for_completion_timeout ( & cqspi - > transfer_complete ,
msecs_to_jiffies ( CQSPI_READ_TIMEOUT_MS ) ) )
ret = - ETIMEDOUT ;
2016-06-04 03:39:34 +03:00
bytes_to_read = cqspi_get_rd_sram_level ( cqspi ) ;
2018-07-21 17:21:51 +03:00
if ( ret & & bytes_to_read = = 0 ) {
2016-06-04 03:39:34 +03:00
dev_err ( nor - > dev , " Indirect read timeout, no bytes \n " ) ;
goto failrd ;
}
while ( bytes_to_read ! = 0 ) {
2018-04-23 20:45:11 +03:00
unsigned int word_remain = round_down ( remaining , 4 ) ;
2016-06-04 03:39:34 +03:00
bytes_to_read * = cqspi - > fifo_width ;
bytes_to_read = bytes_to_read > remaining ?
remaining : bytes_to_read ;
2018-04-23 20:45:11 +03:00
bytes_to_read = round_down ( bytes_to_read , 4 ) ;
/* Read 4 byte word chunks then single bytes */
if ( bytes_to_read ) {
ioread32_rep ( ahb_base , rxbuf ,
( bytes_to_read / 4 ) ) ;
} else if ( ! word_remain & & mod_bytes ) {
unsigned int temp = ioread32 ( ahb_base ) ;
bytes_to_read = mod_bytes ;
memcpy ( rxbuf , & temp , min ( ( unsigned int )
( rxbuf_end - rxbuf ) ,
bytes_to_read ) ) ;
}
2016-06-04 03:39:34 +03:00
rxbuf + = bytes_to_read ;
remaining - = bytes_to_read ;
bytes_to_read = cqspi_get_rd_sram_level ( cqspi ) ;
}
if ( remaining > 0 )
reinit_completion ( & cqspi - > transfer_complete ) ;
}
/* Check indirect done status */
ret = cqspi_wait_for_bit ( reg_base + CQSPI_REG_INDIRECTRD ,
CQSPI_REG_INDIRECTRD_DONE_MASK , 0 ) ;
if ( ret ) {
dev_err ( nor - > dev ,
" Indirect read completion error (%i) \n " , ret ) ;
goto failrd ;
}
/* Disable interrupt */
writel ( 0 , reg_base + CQSPI_REG_IRQMASK ) ;
/* Clear indirect completion status */
writel ( CQSPI_REG_INDIRECTRD_DONE_MASK , reg_base + CQSPI_REG_INDIRECTRD ) ;
return 0 ;
failrd :
/* Disable interrupt */
writel ( 0 , reg_base + CQSPI_REG_IRQMASK ) ;
/* Cancel the indirect read */
writel ( CQSPI_REG_INDIRECTWR_CANCEL_MASK ,
reg_base + CQSPI_REG_INDIRECTRD ) ;
return ret ;
}
2017-12-29 12:11:02 +03:00
static int cqspi_write_setup ( struct spi_nor * nor )
2016-06-04 03:39:34 +03:00
{
unsigned int reg ;
struct cqspi_flash_pdata * f_pdata = nor - > priv ;
struct cqspi_st * cqspi = f_pdata - > cqspi ;
void __iomem * reg_base = cqspi - > iobase ;
/* Set opcode. */
reg = nor - > program_opcode < < CQSPI_REG_WR_INSTR_OPCODE_LSB ;
writel ( reg , reg_base + CQSPI_REG_WR_INSTR ) ;
reg = cqspi_calc_rdreg ( nor , nor - > program_opcode ) ;
writel ( reg , reg_base + CQSPI_REG_RD_INSTR ) ;
reg = readl ( reg_base + CQSPI_REG_SIZE ) ;
reg & = ~ CQSPI_REG_SIZE_ADDRESS_MASK ;
reg | = ( nor - > addr_width - 1 ) ;
writel ( reg , reg_base + CQSPI_REG_SIZE ) ;
return 0 ;
}
2017-12-29 12:11:02 +03:00
static int cqspi_indirect_write_execute ( struct spi_nor * nor , loff_t to_addr ,
const u8 * txbuf , const size_t n_tx )
2016-06-04 03:39:34 +03:00
{
const unsigned int page_size = nor - > page_size ;
struct cqspi_flash_pdata * f_pdata = nor - > priv ;
struct cqspi_st * cqspi = f_pdata - > cqspi ;
void __iomem * reg_base = cqspi - > iobase ;
unsigned int remaining = n_tx ;
unsigned int write_bytes ;
int ret ;
2017-12-29 12:11:02 +03:00
writel ( to_addr , reg_base + CQSPI_REG_INDIRECTWRSTARTADDR ) ;
2016-06-04 03:39:34 +03:00
writel ( remaining , reg_base + CQSPI_REG_INDIRECTWRBYTES ) ;
/* Clear all interrupts. */
writel ( CQSPI_IRQ_STATUS_MASK , reg_base + CQSPI_REG_IRQSTATUS ) ;
writel ( CQSPI_IRQ_MASK_WR , reg_base + CQSPI_REG_IRQMASK ) ;
reinit_completion ( & cqspi - > transfer_complete ) ;
writel ( CQSPI_REG_INDIRECTWR_START_MASK ,
reg_base + CQSPI_REG_INDIRECTWR ) ;
2017-10-03 08:19:21 +03:00
/*
* As per 66 AK2G02 TRM SPRUHY8F section 11.15 .5 .3 Indirect Access
* Controller programming sequence , couple of cycles of
* QSPI_REF_CLK delay is required for the above bit to
* be internally synchronized by the QSPI module . Provide 5
* cycles of delay .
*/
if ( cqspi - > wr_delay )
ndelay ( cqspi - > wr_delay ) ;
2016-06-04 03:39:34 +03:00
while ( remaining > 0 ) {
2018-11-16 17:25:49 +03:00
size_t write_words , mod_bytes ;
2016-06-04 03:39:34 +03:00
write_bytes = remaining > page_size ? page_size : remaining ;
2018-11-16 17:25:49 +03:00
write_words = write_bytes / 4 ;
mod_bytes = write_bytes % 4 ;
/* Write 4 bytes at a time then single bytes. */
if ( write_words ) {
iowrite32_rep ( cqspi - > ahb_base , txbuf , write_words ) ;
txbuf + = ( write_words * 4 ) ;
}
if ( mod_bytes ) {
unsigned int temp = 0xFFFFFFFF ;
memcpy ( & temp , txbuf , mod_bytes ) ;
iowrite32 ( temp , cqspi - > ahb_base ) ;
txbuf + = mod_bytes ;
}
2016-06-04 03:39:34 +03:00
2018-07-21 17:21:51 +03:00
if ( ! wait_for_completion_timeout ( & cqspi - > transfer_complete ,
msecs_to_jiffies ( CQSPI_TIMEOUT_MS ) ) ) {
2016-06-04 03:39:34 +03:00
dev_err ( nor - > dev , " Indirect write timeout \n " ) ;
ret = - ETIMEDOUT ;
goto failwr ;
}
remaining - = write_bytes ;
if ( remaining > 0 )
reinit_completion ( & cqspi - > transfer_complete ) ;
}
/* Check indirect done status */
ret = cqspi_wait_for_bit ( reg_base + CQSPI_REG_INDIRECTWR ,
CQSPI_REG_INDIRECTWR_DONE_MASK , 0 ) ;
if ( ret ) {
dev_err ( nor - > dev ,
" Indirect write completion error (%i) \n " , ret ) ;
goto failwr ;
}
/* Disable interrupt. */
writel ( 0 , reg_base + CQSPI_REG_IRQMASK ) ;
/* Clear indirect completion status */
writel ( CQSPI_REG_INDIRECTWR_DONE_MASK , reg_base + CQSPI_REG_INDIRECTWR ) ;
cqspi_wait_idle ( cqspi ) ;
return 0 ;
failwr :
/* Disable interrupt. */
writel ( 0 , reg_base + CQSPI_REG_IRQMASK ) ;
/* Cancel the indirect write */
writel ( CQSPI_REG_INDIRECTWR_CANCEL_MASK ,
reg_base + CQSPI_REG_INDIRECTWR ) ;
return ret ;
}
static void cqspi_chipselect ( struct spi_nor * nor )
{
struct cqspi_flash_pdata * f_pdata = nor - > priv ;
struct cqspi_st * cqspi = f_pdata - > cqspi ;
void __iomem * reg_base = cqspi - > iobase ;
unsigned int chip_select = f_pdata - > cs ;
unsigned int reg ;
reg = readl ( reg_base + CQSPI_REG_CONFIG ) ;
if ( cqspi - > is_decoded_cs ) {
reg | = CQSPI_REG_CONFIG_DECODE_MASK ;
} else {
reg & = ~ CQSPI_REG_CONFIG_DECODE_MASK ;
/* Convert CS if without decoder.
* CS0 to 4 b ' 1110
* CS1 to 4 b ' 1101
* CS2 to 4 b ' 1011
* CS3 to 4 b ' 0111
*/
chip_select = 0xF & ~ ( 1 < < chip_select ) ;
}
reg & = ~ ( CQSPI_REG_CONFIG_CHIPSELECT_MASK
< < CQSPI_REG_CONFIG_CHIPSELECT_LSB ) ;
reg | = ( chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK )
< < CQSPI_REG_CONFIG_CHIPSELECT_LSB ;
writel ( reg , reg_base + CQSPI_REG_CONFIG ) ;
}
static void cqspi_configure_cs_and_sizes ( struct spi_nor * nor )
{
struct cqspi_flash_pdata * f_pdata = nor - > priv ;
struct cqspi_st * cqspi = f_pdata - > cqspi ;
void __iomem * iobase = cqspi - > iobase ;
unsigned int reg ;
/* configure page size and block size. */
reg = readl ( iobase + CQSPI_REG_SIZE ) ;
reg & = ~ ( CQSPI_REG_SIZE_PAGE_MASK < < CQSPI_REG_SIZE_PAGE_LSB ) ;
reg & = ~ ( CQSPI_REG_SIZE_BLOCK_MASK < < CQSPI_REG_SIZE_BLOCK_LSB ) ;
reg & = ~ CQSPI_REG_SIZE_ADDRESS_MASK ;
reg | = ( nor - > page_size < < CQSPI_REG_SIZE_PAGE_LSB ) ;
reg | = ( ilog2 ( nor - > mtd . erasesize ) < < CQSPI_REG_SIZE_BLOCK_LSB ) ;
reg | = ( nor - > addr_width - 1 ) ;
writel ( reg , iobase + CQSPI_REG_SIZE ) ;
/* configure the chip select */
cqspi_chipselect ( nor ) ;
/* Store the new configuration of the controller */
cqspi - > current_page_size = nor - > page_size ;
cqspi - > current_erase_size = nor - > mtd . erasesize ;
cqspi - > current_addr_width = nor - > addr_width ;
}
static unsigned int calculate_ticks_for_ns ( const unsigned int ref_clk_hz ,
const unsigned int ns_val )
{
unsigned int ticks ;
ticks = ref_clk_hz / 1000 ; /* kHz */
ticks = DIV_ROUND_UP ( ticks * ns_val , 1000000 ) ;
return ticks ;
}
static void cqspi_delay ( struct spi_nor * nor )
{
struct cqspi_flash_pdata * f_pdata = nor - > priv ;
struct cqspi_st * cqspi = f_pdata - > cqspi ;
void __iomem * iobase = cqspi - > iobase ;
const unsigned int ref_clk_hz = cqspi - > master_ref_clk_hz ;
unsigned int tshsl , tchsh , tslch , tsd2d ;
unsigned int reg ;
unsigned int tsclk ;
/* calculate the number of ref ticks for one sclk tick */
tsclk = DIV_ROUND_UP ( ref_clk_hz , cqspi - > sclk ) ;
tshsl = calculate_ticks_for_ns ( ref_clk_hz , f_pdata - > tshsl_ns ) ;
/* this particular value must be at least one sclk */
if ( tshsl < tsclk )
tshsl = tsclk ;
tchsh = calculate_ticks_for_ns ( ref_clk_hz , f_pdata - > tchsh_ns ) ;
tslch = calculate_ticks_for_ns ( ref_clk_hz , f_pdata - > tslch_ns ) ;
tsd2d = calculate_ticks_for_ns ( ref_clk_hz , f_pdata - > tsd2d_ns ) ;
reg = ( tshsl & CQSPI_REG_DELAY_TSHSL_MASK )
< < CQSPI_REG_DELAY_TSHSL_LSB ;
reg | = ( tchsh & CQSPI_REG_DELAY_TCHSH_MASK )
< < CQSPI_REG_DELAY_TCHSH_LSB ;
reg | = ( tslch & CQSPI_REG_DELAY_TSLCH_MASK )
< < CQSPI_REG_DELAY_TSLCH_LSB ;
reg | = ( tsd2d & CQSPI_REG_DELAY_TSD2D_MASK )
< < CQSPI_REG_DELAY_TSD2D_LSB ;
writel ( reg , iobase + CQSPI_REG_DELAY ) ;
}
static void cqspi_config_baudrate_div ( struct cqspi_st * cqspi )
{
const unsigned int ref_clk_hz = cqspi - > master_ref_clk_hz ;
void __iomem * reg_base = cqspi - > iobase ;
u32 reg , div ;
/* Recalculate the baudrate divisor based on QSPI specification. */
div = DIV_ROUND_UP ( ref_clk_hz , 2 * cqspi - > sclk ) - 1 ;
reg = readl ( reg_base + CQSPI_REG_CONFIG ) ;
reg & = ~ ( CQSPI_REG_CONFIG_BAUD_MASK < < CQSPI_REG_CONFIG_BAUD_LSB ) ;
reg | = ( div & CQSPI_REG_CONFIG_BAUD_MASK ) < < CQSPI_REG_CONFIG_BAUD_LSB ;
writel ( reg , reg_base + CQSPI_REG_CONFIG ) ;
}
static void cqspi_readdata_capture ( struct cqspi_st * cqspi ,
2017-10-03 08:19:23 +03:00
const bool bypass ,
2016-06-04 03:39:34 +03:00
const unsigned int delay )
{
void __iomem * reg_base = cqspi - > iobase ;
unsigned int reg ;
reg = readl ( reg_base + CQSPI_REG_READCAPTURE ) ;
if ( bypass )
reg | = ( 1 < < CQSPI_REG_READCAPTURE_BYPASS_LSB ) ;
else
reg & = ~ ( 1 < < CQSPI_REG_READCAPTURE_BYPASS_LSB ) ;
reg & = ~ ( CQSPI_REG_READCAPTURE_DELAY_MASK
< < CQSPI_REG_READCAPTURE_DELAY_LSB ) ;
reg | = ( delay & CQSPI_REG_READCAPTURE_DELAY_MASK )
< < CQSPI_REG_READCAPTURE_DELAY_LSB ;
writel ( reg , reg_base + CQSPI_REG_READCAPTURE ) ;
}
static void cqspi_controller_enable ( struct cqspi_st * cqspi , bool enable )
{
void __iomem * reg_base = cqspi - > iobase ;
unsigned int reg ;
reg = readl ( reg_base + CQSPI_REG_CONFIG ) ;
if ( enable )
reg | = CQSPI_REG_CONFIG_ENABLE_MASK ;
else
reg & = ~ CQSPI_REG_CONFIG_ENABLE_MASK ;
writel ( reg , reg_base + CQSPI_REG_CONFIG ) ;
}
static void cqspi_configure ( struct spi_nor * nor )
{
struct cqspi_flash_pdata * f_pdata = nor - > priv ;
struct cqspi_st * cqspi = f_pdata - > cqspi ;
const unsigned int sclk = f_pdata - > clk_rate ;
int switch_cs = ( cqspi - > current_cs ! = f_pdata - > cs ) ;
int switch_ck = ( cqspi - > sclk ! = sclk ) ;
if ( ( cqspi - > current_page_size ! = nor - > page_size ) | |
( cqspi - > current_erase_size ! = nor - > mtd . erasesize ) | |
( cqspi - > current_addr_width ! = nor - > addr_width ) )
switch_cs = 1 ;
if ( switch_cs | | switch_ck )
cqspi_controller_enable ( cqspi , 0 ) ;
/* Switch chip select. */
if ( switch_cs ) {
cqspi - > current_cs = f_pdata - > cs ;
cqspi_configure_cs_and_sizes ( nor ) ;
}
/* Setup baudrate divisor and delays */
if ( switch_ck ) {
cqspi - > sclk = sclk ;
cqspi_config_baudrate_div ( cqspi ) ;
cqspi_delay ( nor ) ;
2017-10-03 08:19:23 +03:00
cqspi_readdata_capture ( cqspi , ! cqspi - > rclk_en ,
f_pdata - > read_delay ) ;
2016-06-04 03:39:34 +03:00
}
if ( switch_cs | | switch_ck )
cqspi_controller_enable ( cqspi , 1 ) ;
}
static int cqspi_set_protocol ( struct spi_nor * nor , const int read )
{
struct cqspi_flash_pdata * f_pdata = nor - > priv ;
f_pdata - > inst_width = CQSPI_INST_TYPE_SINGLE ;
f_pdata - > addr_width = CQSPI_INST_TYPE_SINGLE ;
f_pdata - > data_width = CQSPI_INST_TYPE_SINGLE ;
if ( read ) {
2017-04-25 23:08:46 +03:00
switch ( nor - > read_proto ) {
case SNOR_PROTO_1_1_1 :
2016-06-04 03:39:34 +03:00
f_pdata - > data_width = CQSPI_INST_TYPE_SINGLE ;
break ;
2017-04-25 23:08:46 +03:00
case SNOR_PROTO_1_1_2 :
2016-06-04 03:39:34 +03:00
f_pdata - > data_width = CQSPI_INST_TYPE_DUAL ;
break ;
2017-04-25 23:08:46 +03:00
case SNOR_PROTO_1_1_4 :
2016-06-04 03:39:34 +03:00
f_pdata - > data_width = CQSPI_INST_TYPE_QUAD ;
break ;
default :
return - EINVAL ;
}
}
cqspi_configure ( nor ) ;
return 0 ;
}
static ssize_t cqspi_write ( struct spi_nor * nor , loff_t to ,
size_t len , const u_char * buf )
{
2017-12-29 12:11:03 +03:00
struct cqspi_flash_pdata * f_pdata = nor - > priv ;
struct cqspi_st * cqspi = f_pdata - > cqspi ;
2016-06-04 03:39:34 +03:00
int ret ;
ret = cqspi_set_protocol ( nor , 0 ) ;
if ( ret )
return ret ;
2017-12-29 12:11:02 +03:00
ret = cqspi_write_setup ( nor ) ;
2016-06-04 03:39:34 +03:00
if ( ret )
return ret ;
2018-06-30 13:54:21 +03:00
if ( f_pdata - > use_direct_mode ) {
2017-12-29 12:11:03 +03:00
memcpy_toio ( cqspi - > ahb_base + to , buf , len ) ;
2018-06-30 13:54:21 +03:00
ret = cqspi_wait_idle ( cqspi ) ;
} else {
2017-12-29 12:11:03 +03:00
ret = cqspi_indirect_write_execute ( nor , to , buf , len ) ;
2018-06-30 13:54:21 +03:00
}
2016-06-04 03:39:34 +03:00
if ( ret )
return ret ;
2017-01-31 18:53:17 +03:00
return len ;
2016-06-04 03:39:34 +03:00
}
2018-04-10 11:19:10 +03:00
static void cqspi_rx_dma_callback ( void * param )
{
struct cqspi_st * cqspi = param ;
complete ( & cqspi - > rx_dma_complete ) ;
}
static int cqspi_direct_read_execute ( struct spi_nor * nor , u_char * buf ,
loff_t from , size_t len )
{
struct cqspi_flash_pdata * f_pdata = nor - > priv ;
struct cqspi_st * cqspi = f_pdata - > cqspi ;
enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT ;
dma_addr_t dma_src = ( dma_addr_t ) cqspi - > mmap_phys_base + from ;
int ret = 0 ;
struct dma_async_tx_descriptor * tx ;
dma_cookie_t cookie ;
dma_addr_t dma_dst ;
if ( ! cqspi - > rx_chan | | ! virt_addr_valid ( buf ) ) {
memcpy_fromio ( buf , cqspi - > ahb_base + from , len ) ;
return 0 ;
}
2018-09-25 10:32:03 +03:00
dma_dst = dma_map_single ( nor - > dev , buf , len , DMA_FROM_DEVICE ) ;
2018-04-10 11:19:10 +03:00
if ( dma_mapping_error ( nor - > dev , dma_dst ) ) {
dev_err ( nor - > dev , " dma mapping failed \n " ) ;
return - ENOMEM ;
}
tx = dmaengine_prep_dma_memcpy ( cqspi - > rx_chan , dma_dst , dma_src ,
len , flags ) ;
if ( ! tx ) {
dev_err ( nor - > dev , " device_prep_dma_memcpy error \n " ) ;
ret = - EIO ;
goto err_unmap ;
}
tx - > callback = cqspi_rx_dma_callback ;
tx - > callback_param = cqspi ;
cookie = tx - > tx_submit ( tx ) ;
reinit_completion ( & cqspi - > rx_dma_complete ) ;
ret = dma_submit_error ( cookie ) ;
if ( ret ) {
dev_err ( nor - > dev , " dma_submit_error %d \n " , cookie ) ;
ret = - EIO ;
goto err_unmap ;
}
dma_async_issue_pending ( cqspi - > rx_chan ) ;
2018-07-21 17:21:51 +03:00
if ( ! wait_for_completion_timeout ( & cqspi - > rx_dma_complete ,
msecs_to_jiffies ( len ) ) ) {
2018-04-10 11:19:10 +03:00
dmaengine_terminate_sync ( cqspi - > rx_chan ) ;
dev_err ( nor - > dev , " DMA wait_for_completion_timeout \n " ) ;
ret = - ETIMEDOUT ;
goto err_unmap ;
}
err_unmap :
2018-09-25 10:32:03 +03:00
dma_unmap_single ( nor - > dev , dma_dst , len , DMA_FROM_DEVICE ) ;
2018-04-10 11:19:10 +03:00
2018-10-16 10:13:46 +03:00
return ret ;
2018-04-10 11:19:10 +03:00
}
2016-06-04 03:39:34 +03:00
static ssize_t cqspi_read ( struct spi_nor * nor , loff_t from ,
size_t len , u_char * buf )
{
2017-12-29 12:11:03 +03:00
struct cqspi_flash_pdata * f_pdata = nor - > priv ;
2016-06-04 03:39:34 +03:00
int ret ;
ret = cqspi_set_protocol ( nor , 1 ) ;
if ( ret )
return ret ;
2017-12-29 12:11:02 +03:00
ret = cqspi_read_setup ( nor ) ;
2016-06-04 03:39:34 +03:00
if ( ret )
return ret ;
2017-12-29 12:11:03 +03:00
if ( f_pdata - > use_direct_mode )
2018-04-10 11:19:10 +03:00
ret = cqspi_direct_read_execute ( nor , buf , from , len ) ;
2017-12-29 12:11:03 +03:00
else
ret = cqspi_indirect_read_execute ( nor , buf , from , len ) ;
2016-06-04 03:39:34 +03:00
if ( ret )
return ret ;
2017-01-31 18:53:17 +03:00
return len ;
2016-06-04 03:39:34 +03:00
}
static int cqspi_erase ( struct spi_nor * nor , loff_t offs )
{
int ret ;
ret = cqspi_set_protocol ( nor , 0 ) ;
if ( ret )
return ret ;
/* Send write enable, then erase commands. */
ret = nor - > write_reg ( nor , SPINOR_OP_WREN , NULL , 0 ) ;
if ( ret )
return ret ;
/* Set up command buffer. */
ret = cqspi_command_write_addr ( nor , nor - > erase_opcode , offs ) ;
if ( ret )
return ret ;
return 0 ;
}
static int cqspi_prep ( struct spi_nor * nor , enum spi_nor_ops ops )
{
struct cqspi_flash_pdata * f_pdata = nor - > priv ;
struct cqspi_st * cqspi = f_pdata - > cqspi ;
mutex_lock ( & cqspi - > bus_mutex ) ;
return 0 ;
}
static void cqspi_unprep ( struct spi_nor * nor , enum spi_nor_ops ops )
{
struct cqspi_flash_pdata * f_pdata = nor - > priv ;
struct cqspi_st * cqspi = f_pdata - > cqspi ;
mutex_unlock ( & cqspi - > bus_mutex ) ;
}
static int cqspi_read_reg ( struct spi_nor * nor , u8 opcode , u8 * buf , int len )
{
int ret ;
ret = cqspi_set_protocol ( nor , 0 ) ;
if ( ! ret )
ret = cqspi_command_read ( nor , & opcode , 1 , buf , len ) ;
return ret ;
}
static int cqspi_write_reg ( struct spi_nor * nor , u8 opcode , u8 * buf , int len )
{
int ret ;
ret = cqspi_set_protocol ( nor , 0 ) ;
if ( ! ret )
ret = cqspi_command_write ( nor , opcode , buf , len ) ;
return ret ;
}
static int cqspi_of_get_flash_pdata ( struct platform_device * pdev ,
struct cqspi_flash_pdata * f_pdata ,
struct device_node * np )
{
if ( of_property_read_u32 ( np , " cdns,read-delay " , & f_pdata - > read_delay ) ) {
dev_err ( & pdev - > dev , " couldn't determine read-delay \n " ) ;
return - ENXIO ;
}
if ( of_property_read_u32 ( np , " cdns,tshsl-ns " , & f_pdata - > tshsl_ns ) ) {
dev_err ( & pdev - > dev , " couldn't determine tshsl-ns \n " ) ;
return - ENXIO ;
}
if ( of_property_read_u32 ( np , " cdns,tsd2d-ns " , & f_pdata - > tsd2d_ns ) ) {
dev_err ( & pdev - > dev , " couldn't determine tsd2d-ns \n " ) ;
return - ENXIO ;
}
if ( of_property_read_u32 ( np , " cdns,tchsh-ns " , & f_pdata - > tchsh_ns ) ) {
dev_err ( & pdev - > dev , " couldn't determine tchsh-ns \n " ) ;
return - ENXIO ;
}
if ( of_property_read_u32 ( np , " cdns,tslch-ns " , & f_pdata - > tslch_ns ) ) {
dev_err ( & pdev - > dev , " couldn't determine tslch-ns \n " ) ;
return - ENXIO ;
}
if ( of_property_read_u32 ( np , " spi-max-frequency " , & f_pdata - > clk_rate ) ) {
dev_err ( & pdev - > dev , " couldn't determine spi-max-frequency \n " ) ;
return - ENXIO ;
}
return 0 ;
}
static int cqspi_of_get_pdata ( struct platform_device * pdev )
{
struct device_node * np = pdev - > dev . of_node ;
struct cqspi_st * cqspi = platform_get_drvdata ( pdev ) ;
cqspi - > is_decoded_cs = of_property_read_bool ( np , " cdns,is-decoded-cs " ) ;
if ( of_property_read_u32 ( np , " cdns,fifo-depth " , & cqspi - > fifo_depth ) ) {
dev_err ( & pdev - > dev , " couldn't determine fifo-depth \n " ) ;
return - ENXIO ;
}
if ( of_property_read_u32 ( np , " cdns,fifo-width " , & cqspi - > fifo_width ) ) {
dev_err ( & pdev - > dev , " couldn't determine fifo-width \n " ) ;
return - ENXIO ;
}
if ( of_property_read_u32 ( np , " cdns,trigger-address " ,
& cqspi - > trigger_address ) ) {
dev_err ( & pdev - > dev , " couldn't determine trigger-address \n " ) ;
return - ENXIO ;
}
2017-10-03 08:19:23 +03:00
cqspi - > rclk_en = of_property_read_bool ( np , " cdns,rclk-en " ) ;
2016-06-04 03:39:34 +03:00
return 0 ;
}
static void cqspi_controller_init ( struct cqspi_st * cqspi )
{
2017-12-29 12:11:03 +03:00
u32 reg ;
2016-06-04 03:39:34 +03:00
cqspi_controller_enable ( cqspi , 0 ) ;
/* Configure the remap address register, no remap */
writel ( 0 , cqspi - > iobase + CQSPI_REG_REMAP ) ;
/* Disable all interrupts. */
writel ( 0 , cqspi - > iobase + CQSPI_REG_IRQMASK ) ;
/* Configure the SRAM split to 1:1 . */
writel ( cqspi - > fifo_depth / 2 , cqspi - > iobase + CQSPI_REG_SRAMPARTITION ) ;
/* Load indirect trigger address. */
writel ( cqspi - > trigger_address ,
cqspi - > iobase + CQSPI_REG_INDIRECTTRIGGER ) ;
/* Program read watermark -- 1/2 of the FIFO. */
writel ( cqspi - > fifo_depth * cqspi - > fifo_width / 2 ,
cqspi - > iobase + CQSPI_REG_INDIRECTRDWATERMARK ) ;
/* Program write watermark -- 1/8 of the FIFO. */
writel ( cqspi - > fifo_depth * cqspi - > fifo_width / 8 ,
cqspi - > iobase + CQSPI_REG_INDIRECTWRWATERMARK ) ;
2017-12-29 12:11:03 +03:00
/* Enable Direct Access Controller */
reg = readl ( cqspi - > iobase + CQSPI_REG_CONFIG ) ;
reg | = CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL ;
writel ( reg , cqspi - > iobase + CQSPI_REG_CONFIG ) ;
2016-06-04 03:39:34 +03:00
cqspi_controller_enable ( cqspi , 1 ) ;
}
2018-04-10 11:19:10 +03:00
static void cqspi_request_mmap_dma ( struct cqspi_st * cqspi )
{
dma_cap_mask_t mask ;
dma_cap_zero ( mask ) ;
dma_cap_set ( DMA_MEMCPY , mask ) ;
cqspi - > rx_chan = dma_request_chan_by_mask ( & mask ) ;
if ( IS_ERR ( cqspi - > rx_chan ) ) {
dev_err ( & cqspi - > pdev - > dev , " No Rx DMA available \n " ) ;
cqspi - > rx_chan = NULL ;
}
init_completion ( & cqspi - > rx_dma_complete ) ;
}
2016-06-04 03:39:34 +03:00
static int cqspi_setup_flash ( struct cqspi_st * cqspi , struct device_node * np )
{
2017-04-25 23:08:46 +03:00
const struct spi_nor_hwcaps hwcaps = {
. mask = SNOR_HWCAPS_READ |
SNOR_HWCAPS_READ_FAST |
SNOR_HWCAPS_READ_1_1_2 |
SNOR_HWCAPS_READ_1_1_4 |
SNOR_HWCAPS_PP ,
} ;
2016-06-04 03:39:34 +03:00
struct platform_device * pdev = cqspi - > pdev ;
struct device * dev = & pdev - > dev ;
struct cqspi_flash_pdata * f_pdata ;
struct spi_nor * nor ;
struct mtd_info * mtd ;
unsigned int cs ;
int i , ret ;
/* Get flash device data */
for_each_available_child_of_node ( dev - > of_node , np ) {
2016-10-13 11:30:39 +03:00
ret = of_property_read_u32 ( np , " reg " , & cs ) ;
if ( ret ) {
2016-06-04 03:39:34 +03:00
dev_err ( dev , " Couldn't determine chip select. \n " ) ;
goto err ;
}
2016-10-13 11:06:47 +03:00
if ( cs > = CQSPI_MAX_CHIPSELECT ) {
2016-10-13 11:30:39 +03:00
ret = - EINVAL ;
2016-06-04 03:39:34 +03:00
dev_err ( dev , " Chip select %d out of range. \n " , cs ) ;
goto err ;
}
f_pdata = & cqspi - > f_pdata [ cs ] ;
f_pdata - > cqspi = cqspi ;
f_pdata - > cs = cs ;
ret = cqspi_of_get_flash_pdata ( pdev , f_pdata , np ) ;
if ( ret )
goto err ;
nor = & f_pdata - > nor ;
mtd = & nor - > mtd ;
mtd - > priv = nor ;
nor - > dev = dev ;
spi_nor_set_flash_node ( nor , np ) ;
nor - > priv = f_pdata ;
nor - > read_reg = cqspi_read_reg ;
nor - > write_reg = cqspi_write_reg ;
nor - > read = cqspi_read ;
nor - > write = cqspi_write ;
nor - > erase = cqspi_erase ;
nor - > prepare = cqspi_prep ;
nor - > unprepare = cqspi_unprep ;
mtd - > name = devm_kasprintf ( dev , GFP_KERNEL , " %s.%d " ,
dev_name ( dev ) , cs ) ;
if ( ! mtd - > name ) {
ret = - ENOMEM ;
goto err ;
}
2017-04-25 23:08:46 +03:00
ret = spi_nor_scan ( nor , NULL , & hwcaps ) ;
2016-06-04 03:39:34 +03:00
if ( ret )
goto err ;
ret = mtd_device_register ( mtd , NULL , 0 ) ;
if ( ret )
goto err ;
f_pdata - > registered = true ;
2017-12-29 12:11:03 +03:00
if ( mtd - > size < = cqspi - > ahb_size ) {
f_pdata - > use_direct_mode = true ;
dev_dbg ( nor - > dev , " using direct mode for %s \n " ,
mtd - > name ) ;
2018-04-10 11:19:10 +03:00
if ( ! cqspi - > rx_chan )
cqspi_request_mmap_dma ( cqspi ) ;
2017-12-29 12:11:03 +03:00
}
2016-06-04 03:39:34 +03:00
}
return 0 ;
err :
for ( i = 0 ; i < CQSPI_MAX_CHIPSELECT ; i + + )
if ( cqspi - > f_pdata [ i ] . registered )
mtd_device_unregister ( & cqspi - > f_pdata [ i ] . nor . mtd ) ;
return ret ;
}
static int cqspi_probe ( struct platform_device * pdev )
{
struct device_node * np = pdev - > dev . of_node ;
struct device * dev = & pdev - > dev ;
struct cqspi_st * cqspi ;
struct resource * res ;
struct resource * res_ahb ;
2017-10-03 08:19:21 +03:00
unsigned long data ;
2016-06-04 03:39:34 +03:00
int ret ;
int irq ;
cqspi = devm_kzalloc ( dev , sizeof ( * cqspi ) , GFP_KERNEL ) ;
if ( ! cqspi )
return - ENOMEM ;
mutex_init ( & cqspi - > bus_mutex ) ;
cqspi - > pdev = pdev ;
platform_set_drvdata ( pdev , cqspi ) ;
/* Obtain configuration from OF. */
ret = cqspi_of_get_pdata ( pdev ) ;
if ( ret ) {
dev_err ( dev , " Cannot get mandatory OF data. \n " ) ;
return - ENODEV ;
}
/* Obtain QSPI clock. */
cqspi - > clk = devm_clk_get ( dev , NULL ) ;
if ( IS_ERR ( cqspi - > clk ) ) {
dev_err ( dev , " Cannot claim QSPI clock. \n " ) ;
return PTR_ERR ( cqspi - > clk ) ;
}
/* Obtain and remap controller address. */
res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
cqspi - > iobase = devm_ioremap_resource ( dev , res ) ;
if ( IS_ERR ( cqspi - > iobase ) ) {
dev_err ( dev , " Cannot remap controller address. \n " ) ;
return PTR_ERR ( cqspi - > iobase ) ;
}
/* Obtain and remap AHB address. */
res_ahb = platform_get_resource ( pdev , IORESOURCE_MEM , 1 ) ;
cqspi - > ahb_base = devm_ioremap_resource ( dev , res_ahb ) ;
if ( IS_ERR ( cqspi - > ahb_base ) ) {
dev_err ( dev , " Cannot remap AHB address. \n " ) ;
return PTR_ERR ( cqspi - > ahb_base ) ;
}
2018-04-10 11:19:10 +03:00
cqspi - > mmap_phys_base = ( dma_addr_t ) res_ahb - > start ;
2017-12-29 12:11:03 +03:00
cqspi - > ahb_size = resource_size ( res_ahb ) ;
2016-06-04 03:39:34 +03:00
init_completion ( & cqspi - > transfer_complete ) ;
/* Obtain IRQ line. */
irq = platform_get_irq ( pdev , 0 ) ;
if ( irq < 0 ) {
dev_err ( dev , " Cannot obtain IRQ. \n " ) ;
return - ENXIO ;
}
2017-10-03 08:19:25 +03:00
pm_runtime_enable ( dev ) ;
ret = pm_runtime_get_sync ( dev ) ;
if ( ret < 0 ) {
pm_runtime_put_noidle ( dev ) ;
return ret ;
}
2016-06-04 03:39:34 +03:00
ret = clk_prepare_enable ( cqspi - > clk ) ;
if ( ret ) {
dev_err ( dev , " Cannot enable QSPI clock. \n " ) ;
2017-10-03 08:19:25 +03:00
goto probe_clk_failed ;
2016-06-04 03:39:34 +03:00
}
cqspi - > master_ref_clk_hz = clk_get_rate ( cqspi - > clk ) ;
2017-10-03 08:19:21 +03:00
data = ( unsigned long ) of_device_get_match_data ( dev ) ;
if ( data & CQSPI_NEEDS_WR_DELAY )
cqspi - > wr_delay = 5 * DIV_ROUND_UP ( NSEC_PER_SEC ,
cqspi - > master_ref_clk_hz ) ;
2016-06-04 03:39:34 +03:00
ret = devm_request_irq ( dev , irq , cqspi_irq_handler , 0 ,
pdev - > name , cqspi ) ;
if ( ret ) {
dev_err ( dev , " Cannot request IRQ. \n " ) ;
goto probe_irq_failed ;
}
cqspi_wait_idle ( cqspi ) ;
cqspi_controller_init ( cqspi ) ;
cqspi - > current_cs = - 1 ;
cqspi - > sclk = 0 ;
ret = cqspi_setup_flash ( cqspi , np ) ;
if ( ret ) {
dev_err ( dev , " Cadence QSPI NOR probe failed %d \n " , ret ) ;
goto probe_setup_failed ;
}
return ret ;
probe_setup_failed :
2017-10-03 08:19:24 +03:00
cqspi_controller_enable ( cqspi , 0 ) ;
probe_irq_failed :
2016-06-04 03:39:34 +03:00
clk_disable_unprepare ( cqspi - > clk ) ;
2017-10-03 08:19:25 +03:00
probe_clk_failed :
pm_runtime_put_sync ( dev ) ;
pm_runtime_disable ( dev ) ;
2016-06-04 03:39:34 +03:00
return ret ;
}
static int cqspi_remove ( struct platform_device * pdev )
{
struct cqspi_st * cqspi = platform_get_drvdata ( pdev ) ;
int i ;
for ( i = 0 ; i < CQSPI_MAX_CHIPSELECT ; i + + )
if ( cqspi - > f_pdata [ i ] . registered )
mtd_device_unregister ( & cqspi - > f_pdata [ i ] . nor . mtd ) ;
cqspi_controller_enable ( cqspi , 0 ) ;
2018-04-10 11:19:10 +03:00
if ( cqspi - > rx_chan )
dma_release_channel ( cqspi - > rx_chan ) ;
2016-06-04 03:39:34 +03:00
clk_disable_unprepare ( cqspi - > clk ) ;
2017-10-03 08:19:25 +03:00
pm_runtime_put_sync ( & pdev - > dev ) ;
pm_runtime_disable ( & pdev - > dev ) ;
2016-06-04 03:39:34 +03:00
return 0 ;
}
# ifdef CONFIG_PM_SLEEP
static int cqspi_suspend ( struct device * dev )
{
struct cqspi_st * cqspi = dev_get_drvdata ( dev ) ;
cqspi_controller_enable ( cqspi , 0 ) ;
return 0 ;
}
static int cqspi_resume ( struct device * dev )
{
struct cqspi_st * cqspi = dev_get_drvdata ( dev ) ;
cqspi_controller_enable ( cqspi , 1 ) ;
return 0 ;
}
static const struct dev_pm_ops cqspi__dev_pm_ops = {
. suspend = cqspi_suspend ,
. resume = cqspi_resume ,
} ;
# define CQSPI_DEV_PM_OPS (&cqspi__dev_pm_ops)
# else
# define CQSPI_DEV_PM_OPS NULL
# endif
2017-06-27 18:34:19 +03:00
static const struct of_device_id cqspi_dt_ids [ ] = {
2017-10-03 08:19:21 +03:00
{
. compatible = " cdns,qspi-nor " ,
. data = ( void * ) 0 ,
} ,
{
. compatible = " ti,k2g-qspi " ,
. data = ( void * ) CQSPI_NEEDS_WR_DELAY ,
} ,
2016-06-04 03:39:34 +03:00
{ /* end of table */ }
} ;
MODULE_DEVICE_TABLE ( of , cqspi_dt_ids ) ;
static struct platform_driver cqspi_platform_driver = {
. probe = cqspi_probe ,
. remove = cqspi_remove ,
. driver = {
. name = CQSPI_NAME ,
. pm = CQSPI_DEV_PM_OPS ,
. of_match_table = cqspi_dt_ids ,
} ,
} ;
module_platform_driver ( cqspi_platform_driver ) ;
MODULE_DESCRIPTION ( " Cadence QSPI Controller Driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_ALIAS ( " platform: " CQSPI_NAME ) ;
MODULE_AUTHOR ( " Ley Foon Tan <lftan@altera.com> " ) ;
MODULE_AUTHOR ( " Graham Moore <grmoore@opensource.altera.com> " ) ;